VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp@ 55909

Last change on this file since 55909 was 55863, checked in by vboxsync, 10 years ago

IPRT,SUPDrv,VMM: Revised the context switching hook interface. Do less work when enabling the hook (formerly 'registration'). Drop the reference counting (kept internally for solaris) as it complicates restrictions wrt destroying enabled hooks. Bumped support driver version.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 202.9 KB
Line 
1/* $Id: HMSVMR0.cpp 55863 2015-05-14 18:29:34Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2013-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24
25#include <VBox/vmm/pdmapi.h>
26#include <VBox/vmm/dbgf.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/vmm/tm.h>
30#include <VBox/vmm/gim.h>
31#include "HMInternal.h"
32#include <VBox/vmm/vm.h>
33#include "HMSVMR0.h"
34#include "dtrace/VBoxVMM.h"
35
36#ifdef DEBUG_ramshankar
37# define HMSVM_SYNC_FULL_GUEST_STATE
38# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
39# define HMSVM_ALWAYS_TRAP_PF
40# define HMSVM_ALWAYS_TRAP_TASK_SWITCH
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47#ifdef VBOX_WITH_STATISTICS
48# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
49 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \
50 if ((u64ExitCode) == SVM_EXIT_NPF) \
51 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
52 else \
53 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
54 } while (0)
55#else
56# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
57#endif
58
59/** If we decide to use a function table approach this can be useful to
60 * switch to a "static DECLCALLBACK(int)". */
61#define HMSVM_EXIT_DECL static int
62
63/** @name Segment attribute conversion between CPU and AMD-V VMCB format.
64 *
65 * The CPU format of the segment attribute is described in X86DESCATTRBITS
66 * which is 16-bits (i.e. includes 4 bits of the segment limit).
67 *
68 * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly
69 * only the attribute bits and nothing else). Upper 4-bits are unused.
70 *
71 * @{ */
72#define HMSVM_CPU_2_VMCB_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0xf000) >> 4) )
73#define HMSVM_VMCB_2_CPU_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0x0f00) << 4) )
74/** @} */
75
76/** @name Macros for loading, storing segment registers to/from the VMCB.
77 * @{ */
78#define HMSVM_LOAD_SEG_REG(REG, reg) \
79 do \
80 { \
81 Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \
82 Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \
83 pVmcb->guest.REG.u16Sel = pCtx->reg.Sel; \
84 pVmcb->guest.REG.u32Limit = pCtx->reg.u32Limit; \
85 pVmcb->guest.REG.u64Base = pCtx->reg.u64Base; \
86 pVmcb->guest.REG.u16Attr = HMSVM_CPU_2_VMCB_SEG_ATTR(pCtx->reg.Attr.u); \
87 } while (0)
88
89#define HMSVM_SAVE_SEG_REG(REG, reg) \
90 do \
91 { \
92 pMixedCtx->reg.Sel = pVmcb->guest.REG.u16Sel; \
93 pMixedCtx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \
94 pMixedCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \
95 pMixedCtx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \
96 pMixedCtx->reg.u64Base = pVmcb->guest.REG.u64Base; \
97 pMixedCtx->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \
98 } while (0)
99/** @} */
100
101/** Macro for checking and returning from the using function for
102 * \#VMEXIT intercepts that maybe caused during delivering of another
103 * event in the guest. */
104#define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY() \
105 do \
106 { \
107 int rc = hmR0SvmCheckExitDueToEventDelivery(pVCpu, pCtx, pSvmTransient); \
108 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT)) \
109 return VINF_SUCCESS; \
110 else if (RT_UNLIKELY(rc == VINF_EM_RESET)) \
111 return rc; \
112 } while (0)
113
114/** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an
115 * instruction that exited. */
116#define HMSVM_CHECK_SINGLE_STEP(a_pVCpu, a_rc) \
117 do { \
118 if ((a_pVCpu)->hm.s.fSingleInstruction && (a_rc) == VINF_SUCCESS) \
119 (a_rc) = VINF_EM_DBG_STEPPED; \
120 } while (0)
121
122/** Assert that preemption is disabled or covered by thread-context hooks. */
123#define HMSVM_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
124 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
125
126/** Assert that we haven't migrated CPUs when thread-context hooks are not
127 * used. */
128#define HMSVM_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
129 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
130 ("Illegal migration! Entered on CPU %u Current %u\n", \
131 pVCpu->hm.s.idEnteredCpu, RTMpCpuId()));
132
133/** Exception bitmap mask for all contributory exceptions.
134 *
135 * Page fault is deliberately excluded here as it's conditional as to whether
136 * it's contributory or benign. Page faults are handled separately.
137 */
138#define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
139 | RT_BIT(X86_XCPT_DE))
140
141/** @name VMCB Clean Bits.
142 *
143 * These flags are used for VMCB-state caching. A set VMCB Clean bit indicates
144 * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
145 * memory.
146 *
147 * @{ */
148/** All intercepts vectors, TSC offset, PAUSE filter counter. */
149#define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
150/** I/O permission bitmap, MSR permission bitmap. */
151#define HMSVM_VMCB_CLEAN_IOPM_MSRPM RT_BIT(1)
152/** ASID. */
153#define HMSVM_VMCB_CLEAN_ASID RT_BIT(2)
154/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
155V_INTR_VECTOR. */
156#define HMSVM_VMCB_CLEAN_TPR RT_BIT(3)
157/** Nested Paging: Nested CR3 (nCR3), PAT. */
158#define HMSVM_VMCB_CLEAN_NP RT_BIT(4)
159/** Control registers (CR0, CR3, CR4, EFER). */
160#define HMSVM_VMCB_CLEAN_CRX_EFER RT_BIT(5)
161/** Debug registers (DR6, DR7). */
162#define HMSVM_VMCB_CLEAN_DRX RT_BIT(6)
163/** GDT, IDT limit and base. */
164#define HMSVM_VMCB_CLEAN_DT RT_BIT(7)
165/** Segment register: CS, SS, DS, ES limit and base. */
166#define HMSVM_VMCB_CLEAN_SEG RT_BIT(8)
167/** CR2.*/
168#define HMSVM_VMCB_CLEAN_CR2 RT_BIT(9)
169/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
170#define HMSVM_VMCB_CLEAN_LBR RT_BIT(10)
171/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
172PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
173#define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11)
174/** Mask of all valid VMCB Clean bits. */
175#define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \
176 | HMSVM_VMCB_CLEAN_IOPM_MSRPM \
177 | HMSVM_VMCB_CLEAN_ASID \
178 | HMSVM_VMCB_CLEAN_TPR \
179 | HMSVM_VMCB_CLEAN_NP \
180 | HMSVM_VMCB_CLEAN_CRX_EFER \
181 | HMSVM_VMCB_CLEAN_DRX \
182 | HMSVM_VMCB_CLEAN_DT \
183 | HMSVM_VMCB_CLEAN_SEG \
184 | HMSVM_VMCB_CLEAN_CR2 \
185 | HMSVM_VMCB_CLEAN_LBR \
186 | HMSVM_VMCB_CLEAN_AVIC)
187/** @} */
188
189/** @name SVM transient.
190 *
191 * A state structure for holding miscellaneous information across AMD-V
192 * VMRUN/#VMEXIT operation, restored after the transition.
193 *
194 * @{ */
195typedef struct SVMTRANSIENT
196{
197 /** The host's rflags/eflags. */
198 RTCCUINTREG fEFlags;
199#if HC_ARCH_BITS == 32
200 uint32_t u32Alignment0;
201#endif
202
203 /** The #VMEXIT exit code (the EXITCODE field in the VMCB). */
204 uint64_t u64ExitCode;
205 /** The guest's TPR value used for TPR shadowing. */
206 uint8_t u8GuestTpr;
207 /** Alignment. */
208 uint8_t abAlignment0[7];
209
210 /** Whether the guest FPU state was active at the time of #VMEXIT. */
211 bool fWasGuestFPUStateActive;
212 /** Whether the guest debug state was active at the time of #VMEXIT. */
213 bool fWasGuestDebugStateActive;
214 /** Whether the hyper debug state was active at the time of #VMEXIT. */
215 bool fWasHyperDebugStateActive;
216 /** Whether the TSC offset mode needs to be updated. */
217 bool fUpdateTscOffsetting;
218 /** Whether the TSC_AUX MSR needs restoring on #VMEXIT. */
219 bool fRestoreTscAuxMsr;
220 /** Whether the #VMEXIT was caused by a page-fault during delivery of a
221 * contributary exception or a page-fault. */
222 bool fVectoringDoublePF;
223 /** Whether the #VMEXIT was caused by a page-fault during delivery of an
224 * external interrupt or NMI. */
225 bool fVectoringPF;
226} SVMTRANSIENT, *PSVMTRANSIENT;
227AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t));
228AssertCompileMemberAlignment(SVMTRANSIENT, fWasGuestFPUStateActive, sizeof(uint64_t));
229/** @} */
230
231/**
232 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
233 */
234typedef enum SVMMSREXITREAD
235{
236 /** Reading this MSR causes a #VMEXIT. */
237 SVMMSREXIT_INTERCEPT_READ = 0xb,
238 /** Reading this MSR does not cause a #VMEXIT. */
239 SVMMSREXIT_PASSTHRU_READ
240} SVMMSREXITREAD;
241
242/**
243 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
244 */
245typedef enum SVMMSREXITWRITE
246{
247 /** Writing to this MSR causes a #VMEXIT. */
248 SVMMSREXIT_INTERCEPT_WRITE = 0xd,
249 /** Writing to this MSR does not cause a #VMEXIT. */
250 SVMMSREXIT_PASSTHRU_WRITE
251} SVMMSREXITWRITE;
252
253/**
254 * SVM #VMEXIT handler.
255 *
256 * @returns VBox status code.
257 * @param pVCpu Pointer to the VMCPU.
258 * @param pMixedCtx Pointer to the guest-CPU context.
259 * @param pSvmTransient Pointer to the SVM-transient structure.
260 */
261typedef int FNSVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
262
263/*******************************************************************************
264* Internal Functions *
265*******************************************************************************/
266static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
267static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
268static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
269
270/** @name #VMEXIT handlers.
271 * @{
272 */
273static FNSVMEXITHANDLER hmR0SvmExitIntr;
274static FNSVMEXITHANDLER hmR0SvmExitWbinvd;
275static FNSVMEXITHANDLER hmR0SvmExitInvd;
276static FNSVMEXITHANDLER hmR0SvmExitCpuid;
277static FNSVMEXITHANDLER hmR0SvmExitRdtsc;
278static FNSVMEXITHANDLER hmR0SvmExitRdtscp;
279static FNSVMEXITHANDLER hmR0SvmExitRdpmc;
280static FNSVMEXITHANDLER hmR0SvmExitInvlpg;
281static FNSVMEXITHANDLER hmR0SvmExitHlt;
282static FNSVMEXITHANDLER hmR0SvmExitMonitor;
283static FNSVMEXITHANDLER hmR0SvmExitMwait;
284static FNSVMEXITHANDLER hmR0SvmExitShutdown;
285static FNSVMEXITHANDLER hmR0SvmExitReadCRx;
286static FNSVMEXITHANDLER hmR0SvmExitWriteCRx;
287static FNSVMEXITHANDLER hmR0SvmExitSetPendingXcptUD;
288static FNSVMEXITHANDLER hmR0SvmExitMsr;
289static FNSVMEXITHANDLER hmR0SvmExitReadDRx;
290static FNSVMEXITHANDLER hmR0SvmExitWriteDRx;
291static FNSVMEXITHANDLER hmR0SvmExitXsetbv;
292static FNSVMEXITHANDLER hmR0SvmExitIOInstr;
293static FNSVMEXITHANDLER hmR0SvmExitNestedPF;
294static FNSVMEXITHANDLER hmR0SvmExitVIntr;
295static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch;
296static FNSVMEXITHANDLER hmR0SvmExitVmmCall;
297static FNSVMEXITHANDLER hmR0SvmExitIret;
298static FNSVMEXITHANDLER hmR0SvmExitXcptPF;
299static FNSVMEXITHANDLER hmR0SvmExitXcptNM;
300static FNSVMEXITHANDLER hmR0SvmExitXcptUD;
301static FNSVMEXITHANDLER hmR0SvmExitXcptMF;
302static FNSVMEXITHANDLER hmR0SvmExitXcptDB;
303/** @} */
304
305DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
306
307/*******************************************************************************
308* Global Variables *
309*******************************************************************************/
310/** Ring-0 memory object for the IO bitmap. */
311RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
312/** Physical address of the IO bitmap. */
313RTHCPHYS g_HCPhysIOBitmap = 0;
314/** Virtual address of the IO bitmap. */
315R0PTRTYPE(void *) g_pvIOBitmap = NULL;
316
317
318/**
319 * Sets up and activates AMD-V on the current CPU.
320 *
321 * @returns VBox status code.
322 * @param pCpu Pointer to the CPU info struct.
323 * @param pVM Pointer to the VM (can be NULL after a resume!).
324 * @param pvCpuPage Pointer to the global CPU page.
325 * @param HCPhysCpuPage Physical address of the global CPU page.
326 * @param fEnabledByHost Whether the host OS has already initialized AMD-V.
327 * @param pvArg Unused on AMD-V.
328 */
329VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
330 void *pvArg)
331{
332 Assert(!fEnabledByHost);
333 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
334 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
335 Assert(pvCpuPage);
336 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
337
338 NOREF(pvArg);
339 NOREF(fEnabledByHost);
340
341 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */
342 RTCCUINTREG fEFlags = ASMIntDisableFlags();
343
344 /*
345 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
346 */
347 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
348 if (u64HostEfer & MSR_K6_EFER_SVME)
349 {
350 /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
351 if ( pVM
352 && pVM->hm.s.svm.fIgnoreInUseError)
353 {
354 pCpu->fIgnoreAMDVInUseError = true;
355 }
356
357 if (!pCpu->fIgnoreAMDVInUseError)
358 {
359 ASMSetFlags(fEFlags);
360 return VERR_SVM_IN_USE;
361 }
362 }
363
364 /* Turn on AMD-V in the EFER MSR. */
365 ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
366
367 /* Write the physical page address where the CPU will store the host state while executing the VM. */
368 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
369
370 /* Restore interrupts. */
371 ASMSetFlags(fEFlags);
372
373 /*
374 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
375 * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
376 * upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB()
377 * to flush the TLB with before using a new ASID.
378 */
379 pCpu->fFlushAsidBeforeUse = true;
380
381 /*
382 * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
383 */
384 ++pCpu->cTlbFlushes;
385
386 return VINF_SUCCESS;
387}
388
389
390/**
391 * Deactivates AMD-V on the current CPU.
392 *
393 * @returns VBox status code.
394 * @param pCpu Pointer to the CPU info struct.
395 * @param pvCpuPage Pointer to the global CPU page.
396 * @param HCPhysCpuPage Physical address of the global CPU page.
397 */
398VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
399{
400 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
401 AssertReturn( HCPhysCpuPage
402 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
403 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
404 NOREF(pCpu);
405
406 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
407 RTCCUINTREG fEFlags = ASMIntDisableFlags();
408
409 /* Turn off AMD-V in the EFER MSR. */
410 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
411 ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
412
413 /* Invalidate host state physical address. */
414 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
415
416 /* Restore interrupts. */
417 ASMSetFlags(fEFlags);
418
419 return VINF_SUCCESS;
420}
421
422
423/**
424 * Does global AMD-V initialization (called during module initialization).
425 *
426 * @returns VBox status code.
427 */
428VMMR0DECL(int) SVMR0GlobalInit(void)
429{
430 /*
431 * Allocate 12 KB for the IO bitmap. Since this is non-optional and we always intercept all IO accesses, it's done
432 * once globally here instead of per-VM.
433 */
434 Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ);
435 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, 3 << PAGE_SHIFT, false /* fExecutable */);
436 if (RT_FAILURE(rc))
437 return rc;
438
439 g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
440 g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
441
442 /* Set all bits to intercept all IO accesses. */
443 ASMMemFill32(g_pvIOBitmap, 3 << PAGE_SHIFT, UINT32_C(0xffffffff));
444 return VINF_SUCCESS;
445}
446
447
448/**
449 * Does global AMD-V termination (called during module termination).
450 */
451VMMR0DECL(void) SVMR0GlobalTerm(void)
452{
453 if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
454 {
455 RTR0MemObjFree(g_hMemObjIOBitmap, true /* fFreeMappings */);
456 g_pvIOBitmap = NULL;
457 g_HCPhysIOBitmap = 0;
458 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
459 }
460}
461
462
463/**
464 * Frees any allocated per-VCPU structures for a VM.
465 *
466 * @param pVM Pointer to the VM.
467 */
468DECLINLINE(void) hmR0SvmFreeStructs(PVM pVM)
469{
470 for (uint32_t i = 0; i < pVM->cCpus; i++)
471 {
472 PVMCPU pVCpu = &pVM->aCpus[i];
473 AssertPtr(pVCpu);
474
475 if (pVCpu->hm.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
476 {
477 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false);
478 pVCpu->hm.s.svm.pvVmcbHost = 0;
479 pVCpu->hm.s.svm.HCPhysVmcbHost = 0;
480 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
481 }
482
483 if (pVCpu->hm.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ)
484 {
485 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false);
486 pVCpu->hm.s.svm.pvVmcb = 0;
487 pVCpu->hm.s.svm.HCPhysVmcb = 0;
488 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
489 }
490
491 if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
492 {
493 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
494 pVCpu->hm.s.svm.pvMsrBitmap = 0;
495 pVCpu->hm.s.svm.HCPhysMsrBitmap = 0;
496 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
497 }
498 }
499}
500
501
502/**
503 * Does per-VM AMD-V initialization.
504 *
505 * @returns VBox status code.
506 * @param pVM Pointer to the VM.
507 */
508VMMR0DECL(int) SVMR0InitVM(PVM pVM)
509{
510 int rc = VERR_INTERNAL_ERROR_5;
511
512 /*
513 * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
514 */
515 uint32_t u32Family;
516 uint32_t u32Model;
517 uint32_t u32Stepping;
518 if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
519 {
520 Log4(("SVMR0InitVM: AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
521 pVM->hm.s.svm.fAlwaysFlushTLB = true;
522 }
523
524 /*
525 * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
526 */
527 for (VMCPUID i = 0; i < pVM->cCpus; i++)
528 {
529 PVMCPU pVCpu = &pVM->aCpus[i];
530 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
531 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
532 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
533 }
534
535 for (VMCPUID i = 0; i < pVM->cCpus; i++)
536 {
537 PVMCPU pVCpu = &pVM->aCpus[i];
538
539 /*
540 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
541 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
542 */
543 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1 << PAGE_SHIFT, false /* fExecutable */);
544 if (RT_FAILURE(rc))
545 goto failure_cleanup;
546
547 pVCpu->hm.s.svm.pvVmcbHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost);
548 pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 /* iPage */);
549 Assert(pVCpu->hm.s.svm.HCPhysVmcbHost < _4G);
550 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcbHost);
551
552 /*
553 * Allocate one page for the guest-state VMCB.
554 */
555 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, 1 << PAGE_SHIFT, false /* fExecutable */);
556 if (RT_FAILURE(rc))
557 goto failure_cleanup;
558
559 pVCpu->hm.s.svm.pvVmcb = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);
560 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */);
561 Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G);
562 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcb);
563
564 /*
565 * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
566 * SVM to not require one.
567 */
568 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
569 if (RT_FAILURE(rc))
570 goto failure_cleanup;
571
572 pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
573 pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
574 /* Set all bits to intercept all MSR accesses (changed later on). */
575 ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, UINT32_C(0xffffffff));
576 }
577
578 return VINF_SUCCESS;
579
580failure_cleanup:
581 hmR0SvmFreeStructs(pVM);
582 return rc;
583}
584
585
586/**
587 * Does per-VM AMD-V termination.
588 *
589 * @returns VBox status code.
590 * @param pVM Pointer to the VM.
591 */
592VMMR0DECL(int) SVMR0TermVM(PVM pVM)
593{
594 hmR0SvmFreeStructs(pVM);
595 return VINF_SUCCESS;
596}
597
598
599/**
600 * Sets the permission bits for the specified MSR in the MSRPM.
601 *
602 * @param pVCpu Pointer to the VMCPU.
603 * @param uMsr The MSR for which the access permissions are being set.
604 * @param enmRead MSR read permissions.
605 * @param enmWrite MSR write permissions.
606 */
607static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
608{
609 unsigned ulBit;
610 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
611
612 /*
613 * Layout:
614 * Byte offset MSR range
615 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
616 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
617 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
618 * 0x1800 - 0x1fff Reserved
619 */
620 if (uMsr <= 0x00001FFF)
621 {
622 /* Pentium-compatible MSRs. */
623 ulBit = uMsr * 2;
624 }
625 else if ( uMsr >= 0xC0000000
626 && uMsr <= 0xC0001FFF)
627 {
628 /* AMD Sixth Generation x86 Processor MSRs. */
629 ulBit = (uMsr - 0xC0000000) * 2;
630 pbMsrBitmap += 0x800;
631 }
632 else if ( uMsr >= 0xC0010000
633 && uMsr <= 0xC0011FFF)
634 {
635 /* AMD Seventh and Eighth Generation Processor MSRs. */
636 ulBit = (uMsr - 0xC0001000) * 2;
637 pbMsrBitmap += 0x1000;
638 }
639 else
640 {
641 AssertFailed();
642 return;
643 }
644
645 Assert(ulBit < 0x3fff /* 16 * 1024 - 1 */);
646 if (enmRead == SVMMSREXIT_INTERCEPT_READ)
647 ASMBitSet(pbMsrBitmap, ulBit);
648 else
649 ASMBitClear(pbMsrBitmap, ulBit);
650
651 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
652 ASMBitSet(pbMsrBitmap, ulBit + 1);
653 else
654 ASMBitClear(pbMsrBitmap, ulBit + 1);
655
656 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
657 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
658}
659
660
661/**
662 * Sets up AMD-V for the specified VM.
663 * This function is only called once per-VM during initalization.
664 *
665 * @returns VBox status code.
666 * @param pVM Pointer to the VM.
667 */
668VMMR0DECL(int) SVMR0SetupVM(PVM pVM)
669{
670 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
671 AssertReturn(pVM, VERR_INVALID_PARAMETER);
672 Assert(pVM->hm.s.svm.fSupported);
673
674 for (VMCPUID i = 0; i < pVM->cCpus; i++)
675 {
676 PVMCPU pVCpu = &pVM->aCpus[i];
677 PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb;
678
679 AssertMsgReturn(pVmcb, ("Invalid pVmcb for vcpu[%u]\n", i), VERR_SVM_INVALID_PVMCB);
680
681 /* Initialize the #VMEXIT history array with end-of-array markers (UINT16_MAX). */
682 Assert(!pVCpu->hm.s.idxExitHistoryFree);
683 HMCPU_EXIT_HISTORY_RESET(pVCpu);
684
685 /* Trap exceptions unconditionally (debug purposes). */
686#ifdef HMSVM_ALWAYS_TRAP_PF
687 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
688#endif
689#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
690 /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
691 pVmcb->ctrl.u32InterceptException |= 0
692 | RT_BIT(X86_XCPT_BP)
693 | RT_BIT(X86_XCPT_DB)
694 | RT_BIT(X86_XCPT_DE)
695 | RT_BIT(X86_XCPT_NM)
696 | RT_BIT(X86_XCPT_UD)
697 | RT_BIT(X86_XCPT_NP)
698 | RT_BIT(X86_XCPT_SS)
699 | RT_BIT(X86_XCPT_GP)
700 | RT_BIT(X86_XCPT_PF)
701 | RT_BIT(X86_XCPT_MF)
702 ;
703#endif
704
705 /* Set up unconditional intercepts and conditions. */
706 pVmcb->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR /* External interrupt causes a #VMEXIT. */
707 | SVM_CTRL1_INTERCEPT_NMI /* Non-maskable interrupts causes a #VMEXIT. */
708 | SVM_CTRL1_INTERCEPT_INIT /* INIT signal causes a #VMEXIT. */
709 | SVM_CTRL1_INTERCEPT_RDPMC /* RDPMC causes a #VMEXIT. */
710 | SVM_CTRL1_INTERCEPT_CPUID /* CPUID causes a #VMEXIT. */
711 | SVM_CTRL1_INTERCEPT_RSM /* RSM causes a #VMEXIT. */
712 | SVM_CTRL1_INTERCEPT_HLT /* HLT causes a #VMEXIT. */
713 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO #VMEXITs. */
714 | SVM_CTRL1_INTERCEPT_MSR_SHADOW /* MSR access not covered by MSRPM causes a #VMEXIT.*/
715 | SVM_CTRL1_INTERCEPT_INVLPGA /* INVLPGA causes a #VMEXIT. */
716 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* Shutdown events causes a #VMEXIT. */
717 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Intercept "freezing" during legacy FPU handling. */
718
719 pVmcb->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* VMRUN causes a #VMEXIT. */
720 | SVM_CTRL2_INTERCEPT_VMMCALL /* VMMCALL causes a #VMEXIT. */
721 | SVM_CTRL2_INTERCEPT_VMLOAD /* VMLOAD causes a #VMEXIT. */
722 | SVM_CTRL2_INTERCEPT_VMSAVE /* VMSAVE causes a #VMEXIT. */
723 | SVM_CTRL2_INTERCEPT_STGI /* STGI causes a #VMEXIT. */
724 | SVM_CTRL2_INTERCEPT_CLGI /* CLGI causes a #VMEXIT. */
725 | SVM_CTRL2_INTERCEPT_SKINIT /* SKINIT causes a #VMEXIT. */
726 | SVM_CTRL2_INTERCEPT_WBINVD /* WBINVD causes a #VMEXIT. */
727 | SVM_CTRL2_INTERCEPT_MONITOR /* MONITOR causes a #VMEXIT. */
728 | SVM_CTRL2_INTERCEPT_MWAIT /* MWAIT causes a #VMEXIT. */
729 | SVM_CTRL2_INTERCEPT_XSETBV; /* XSETBV causes a #VMEXIT. */
730
731 /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
732 pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
733
734 /* CR0, CR4 writes must be intercepted for the same reasons as above. */
735 pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
736
737 /* Intercept all DRx reads and writes by default. Changed later on. */
738 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
739 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
740
741 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
742 pVmcb->ctrl.IntCtrl.n.u1VIrqMasking = 1;
743
744 /* Ignore the priority in the TPR. This is necessary for delivering PIC style (ExtInt) interrupts and we currently
745 deliver both PIC and APIC interrupts alike. See hmR0SvmInjectPendingEvent() */
746 pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR = 1;
747
748 /* Set IO and MSR bitmap permission bitmap physical addresses. */
749 pVmcb->ctrl.u64IOPMPhysAddr = g_HCPhysIOBitmap;
750 pVmcb->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
751
752 /* No LBR virtualization. */
753 pVmcb->ctrl.u64LBRVirt = 0;
754
755 /* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from the VMCB in memory. */
756 pVmcb->ctrl.u64VmcbCleanBits = 0;
757
758 /* The host ASID MBZ, for the guest start with 1. */
759 pVmcb->ctrl.TLBCtrl.n.u32ASID = 1;
760
761 /*
762 * Setup the PAT MSR (applicable for Nested Paging only).
763 * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
764 * so choose type 6 for all PAT slots.
765 */
766 pVmcb->guest.u64GPAT = UINT64_C(0x0006060606060606);
767
768 /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
769 pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
770
771 /* Without Nested Paging, we need additionally intercepts. */
772 if (!pVM->hm.s.fNestedPaging)
773 {
774 /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
775 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(3);
776 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(3);
777
778 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
779 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG
780 | SVM_CTRL1_INTERCEPT_TASK_SWITCH;
781
782 /* Page faults must be intercepted to implement shadow paging. */
783 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
784 }
785
786#ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
787 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_TASK_SWITCH;
788#endif
789
790 /* Apply the exceptions intercepts needed by the GIM provider. */
791 if (pVCpu->hm.s.fGIMTrapXcptUD)
792 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_UD);
793
794 /*
795 * The following MSRs are saved/restored automatically during the world-switch.
796 * Don't intercept guest read/write accesses to these MSRs.
797 */
798 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
799 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
800 hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
801 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
802 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
803 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
804 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
805 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
806 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
807 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
808 }
809
810 return VINF_SUCCESS;
811}
812
813
814/**
815 * Invalidates a guest page by guest virtual address.
816 *
817 * @returns VBox status code.
818 * @param pVM Pointer to the VM.
819 * @param pVCpu Pointer to the VMCPU.
820 * @param GCVirt Guest virtual address of the page to invalidate.
821 */
822VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
823{
824 AssertReturn(pVM, VERR_INVALID_PARAMETER);
825 Assert(pVM->hm.s.svm.fSupported);
826
827 bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
828
829 /* Skip it if a TLB flush is already pending. */
830 if (!fFlushPending)
831 {
832 Log4(("SVMR0InvalidatePage %RGv\n", GCVirt));
833
834 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
835 AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
836
837#if HC_ARCH_BITS == 32
838 /* If we get a flush in 64-bit guest mode, then force a full TLB flush. INVLPGA takes only 32-bit addresses. */
839 if (CPUMIsGuestInLongMode(pVCpu))
840 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
841 else
842#endif
843 {
844 SVMR0InvlpgA(GCVirt, pVmcb->ctrl.TLBCtrl.n.u32ASID);
845 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
846 }
847 }
848 return VINF_SUCCESS;
849}
850
851
852/**
853 * Flushes the appropriate tagged-TLB entries.
854 *
855 * @param pVM Pointer to the VM.
856 * @param pVCpu Pointer to the VMCPU.
857 */
858static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu)
859{
860 PVM pVM = pVCpu->CTX_SUFF(pVM);
861 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
862 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
863
864 /*
865 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
866 * This can happen both for start & resume due to long jumps back to ring-3.
867 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
868 * so we cannot reuse the ASIDs without flushing.
869 */
870 bool fNewAsid = false;
871 Assert(pCpu->idCpu != NIL_RTCPUID);
872 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
873 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
874 {
875 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
876 pVCpu->hm.s.fForceTLBFlush = true;
877 fNewAsid = true;
878 }
879
880 /* Set TLB flush state as checked until we return from the world switch. */
881 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
882
883 /* Check for explicit TLB shootdowns. */
884 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
885 {
886 pVCpu->hm.s.fForceTLBFlush = true;
887 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
888 }
889
890 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
891
892 if (pVM->hm.s.svm.fAlwaysFlushTLB)
893 {
894 /*
895 * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
896 */
897 pCpu->uCurrentAsid = 1;
898 pVCpu->hm.s.uCurrentAsid = 1;
899 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
900 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
901
902 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
903 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
904 }
905 else if (pVCpu->hm.s.fForceTLBFlush)
906 {
907 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
908 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
909
910 if (fNewAsid)
911 {
912 ++pCpu->uCurrentAsid;
913 bool fHitASIDLimit = false;
914 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
915 {
916 pCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */
917 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
918 fHitASIDLimit = true;
919
920 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
921 {
922 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
923 pCpu->fFlushAsidBeforeUse = true;
924 }
925 else
926 {
927 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
928 pCpu->fFlushAsidBeforeUse = false;
929 }
930 }
931
932 if ( !fHitASIDLimit
933 && pCpu->fFlushAsidBeforeUse)
934 {
935 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
936 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
937 else
938 {
939 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
940 pCpu->fFlushAsidBeforeUse = false;
941 }
942 }
943
944 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
945 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
946 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
947 }
948 else
949 {
950 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
951 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
952 else
953 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
954 }
955
956 pVCpu->hm.s.fForceTLBFlush = false;
957 }
958 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
959 * not be executed. See hmQueueInvlPage() where it is commented
960 * out. Support individual entry flushing someday. */
961#if 0
962 else
963 {
964 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
965 {
966 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
967 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
968 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
969 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVmcb->ctrl.TLBCtrl.n.u32ASID);
970
971 pVCpu->hm.s.TlbShootdown.cPages = 0;
972 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
973 }
974 }
975#endif
976
977
978 /* Update VMCB with the ASID. */
979 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hm.s.uCurrentAsid)
980 {
981 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
982 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
983 }
984
985 AssertMsg(pVCpu->hm.s.idLastCpu == pCpu->idCpu,
986 ("vcpu idLastCpu=%x pcpu idCpu=%x\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu));
987 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
988 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
989 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
990 ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
991 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
992 ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
993
994#ifdef VBOX_WITH_STATISTICS
995 if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
996 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
997 else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
998 || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
999 {
1000 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1001 }
1002 else
1003 {
1004 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE);
1005 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushEntire);
1006 }
1007#endif
1008}
1009
1010
1011/** @name 64-bit guest on 32-bit host OS helper functions.
1012 *
1013 * The host CPU is still 64-bit capable but the host OS is running in 32-bit
1014 * mode (code segment, paging). These wrappers/helpers perform the necessary
1015 * bits for the 32->64 switcher.
1016 *
1017 * @{ */
1018#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1019/**
1020 * Prepares for and executes VMRUN (64-bit guests on a 32-bit host).
1021 *
1022 * @returns VBox status code.
1023 * @param HCPhysVmcbHost Physical address of host VMCB.
1024 * @param HCPhysVmcb Physical address of the VMCB.
1025 * @param pCtx Pointer to the guest-CPU context.
1026 * @param pVM Pointer to the VM.
1027 * @param pVCpu Pointer to the VMCPU.
1028 */
1029DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
1030{
1031 uint32_t aParam[8];
1032 aParam[0] = (uint32_t)(HCPhysVmcbHost); /* Param 1: HCPhysVmcbHost - Lo. */
1033 aParam[1] = (uint32_t)(HCPhysVmcbHost >> 32); /* Param 1: HCPhysVmcbHost - Hi. */
1034 aParam[2] = (uint32_t)(HCPhysVmcb); /* Param 2: HCPhysVmcb - Lo. */
1035 aParam[3] = (uint32_t)(HCPhysVmcb >> 32); /* Param 2: HCPhysVmcb - Hi. */
1036 aParam[4] = VM_RC_ADDR(pVM, pVM);
1037 aParam[5] = 0;
1038 aParam[6] = VM_RC_ADDR(pVM, pVCpu);
1039 aParam[7] = 0;
1040
1041 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, RT_ELEMENTS(aParam), &aParam[0]);
1042}
1043
1044
1045/**
1046 * Executes the specified VMRUN handler in 64-bit mode.
1047 *
1048 * @returns VBox status code.
1049 * @param pVM Pointer to the VM.
1050 * @param pVCpu Pointer to the VMCPU.
1051 * @param pCtx Pointer to the guest-CPU context.
1052 * @param enmOp The operation to perform.
1053 * @param cParams Number of parameters.
1054 * @param paParam Array of 32-bit parameters.
1055 */
1056VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,
1057 uint32_t cParams, uint32_t *paParam)
1058{
1059 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
1060 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
1061
1062 /* Disable interrupts. */
1063 RTHCUINTREG uOldEFlags = ASMIntDisableFlags();
1064
1065#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1066 RTCPUID idHostCpu = RTMpCpuId();
1067 CPUMR0SetLApic(pVCpu, idHostCpu);
1068#endif
1069
1070 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
1071 CPUMSetHyperEIP(pVCpu, enmOp);
1072 for (int i = (int)cParams - 1; i >= 0; i--)
1073 CPUMPushHyper(pVCpu, paParam[i]);
1074
1075 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
1076 /* Call the switcher. */
1077 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
1078 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
1079
1080 /* Restore interrupts. */
1081 ASMSetFlags(uOldEFlags);
1082 return rc;
1083}
1084
1085#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
1086/** @} */
1087
1088
1089/**
1090 * Adds an exception to the intercept exception bitmap in the VMCB and updates
1091 * the corresponding VMCB Clean bit.
1092 *
1093 * @param pVmcb Pointer to the VM control block.
1094 * @param u32Xcpt The value of the exception (X86_XCPT_*).
1095 */
1096DECLINLINE(void) hmR0SvmAddXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt)
1097{
1098 if (!(pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt)))
1099 {
1100 pVmcb->ctrl.u32InterceptException |= RT_BIT(u32Xcpt);
1101 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1102 }
1103}
1104
1105
1106/**
1107 * Removes an exception from the intercept-exception bitmap in the VMCB and
1108 * updates the corresponding VMCB Clean bit.
1109 *
1110 * @param pVmcb Pointer to the VM control block.
1111 * @param u32Xcpt The value of the exception (X86_XCPT_*).
1112 */
1113DECLINLINE(void) hmR0SvmRemoveXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt)
1114{
1115#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
1116 if (pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt))
1117 {
1118 pVmcb->ctrl.u32InterceptException &= ~RT_BIT(u32Xcpt);
1119 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1120 }
1121#endif
1122}
1123
1124
1125/**
1126 * Loads the guest CR0 control register into the guest-state area in the VMCB.
1127 * Although the guest CR0 is a separate field in the VMCB we have to consider
1128 * the FPU state itself which is shared between the host and the guest.
1129 *
1130 * @returns VBox status code.
1131 * @param pVM Pointer to the VMCPU.
1132 * @param pVmcb Pointer to the VM control block.
1133 * @param pCtx Pointer to the guest-CPU context.
1134 *
1135 * @remarks No-long-jump zone!!!
1136 */
1137static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1138{
1139 /*
1140 * Guest CR0.
1141 */
1142 PVM pVM = pVCpu->CTX_SUFF(pVM);
1143 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
1144 {
1145 uint64_t u64GuestCR0 = pCtx->cr0;
1146
1147 /* Always enable caching. */
1148 u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);
1149
1150 /*
1151 * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
1152 */
1153 if (!pVM->hm.s.fNestedPaging)
1154 {
1155 u64GuestCR0 |= X86_CR0_PG; /* When Nested Paging is not available, use shadow page tables. */
1156 u64GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
1157 }
1158
1159 /*
1160 * Guest FPU bits.
1161 */
1162 bool fInterceptNM = false;
1163 bool fInterceptMF = false;
1164 u64GuestCR0 |= X86_CR0_NE; /* Use internal x87 FPU exceptions handling rather than external interrupts. */
1165 if (CPUMIsGuestFPUStateActive(pVCpu))
1166 {
1167 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
1168 if (!(pCtx->cr0 & X86_CR0_NE))
1169 {
1170 Log4(("hmR0SvmLoadGuestControlRegs: Intercepting Guest CR0.MP Old-style FPU handling!!!\n"));
1171 fInterceptMF = true;
1172 }
1173 }
1174 else
1175 {
1176 fInterceptNM = true; /* Guest FPU inactive, #VMEXIT on #NM for lazy FPU loading. */
1177 u64GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
1178 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
1179 }
1180
1181 /*
1182 * Update the exception intercept bitmap.
1183 */
1184 if (fInterceptNM)
1185 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_NM);
1186 else
1187 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_NM);
1188
1189 if (fInterceptMF)
1190 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_MF);
1191 else
1192 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_MF);
1193
1194 pVmcb->guest.u64CR0 = u64GuestCR0;
1195 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1196 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
1197 }
1198}
1199
1200
1201/**
1202 * Loads the guest control registers (CR2, CR3, CR4) into the VMCB.
1203 *
1204 * @returns VBox status code.
1205 * @param pVCpu Pointer to the VMCPU.
1206 * @param pVmcb Pointer to the VM control block.
1207 * @param pCtx Pointer to the guest-CPU context.
1208 *
1209 * @remarks No-long-jump zone!!!
1210 */
1211static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1212{
1213 PVM pVM = pVCpu->CTX_SUFF(pVM);
1214
1215 /*
1216 * Guest CR2.
1217 */
1218 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR2))
1219 {
1220 pVmcb->guest.u64CR2 = pCtx->cr2;
1221 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
1222 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
1223 }
1224
1225 /*
1226 * Guest CR3.
1227 */
1228 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
1229 {
1230 if (pVM->hm.s.fNestedPaging)
1231 {
1232 PGMMODE enmShwPagingMode;
1233#if HC_ARCH_BITS == 32
1234 if (CPUMIsGuestInLongModeEx(pCtx))
1235 enmShwPagingMode = PGMMODE_AMD64_NX;
1236 else
1237#endif
1238 enmShwPagingMode = PGMGetHostMode(pVM);
1239
1240 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
1241 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1242 Assert(pVmcb->ctrl.u64NestedPagingCR3);
1243 pVmcb->guest.u64CR3 = pCtx->cr3;
1244 }
1245 else
1246 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
1247
1248 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1249 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
1250 }
1251
1252 /*
1253 * Guest CR4.
1254 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
1255 */
1256 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
1257 {
1258 uint64_t u64GuestCR4 = pCtx->cr4;
1259 if (!pVM->hm.s.fNestedPaging)
1260 {
1261 switch (pVCpu->hm.s.enmShadowMode)
1262 {
1263 case PGMMODE_REAL:
1264 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
1265 AssertFailed();
1266 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1267
1268 case PGMMODE_32_BIT: /* 32-bit paging. */
1269 u64GuestCR4 &= ~X86_CR4_PAE;
1270 break;
1271
1272 case PGMMODE_PAE: /* PAE paging. */
1273 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
1274 /** Must use PAE paging as we could use physical memory > 4 GB */
1275 u64GuestCR4 |= X86_CR4_PAE;
1276 break;
1277
1278 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
1279 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
1280#ifdef VBOX_ENABLE_64_BITS_GUESTS
1281 break;
1282#else
1283 AssertFailed();
1284 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1285#endif
1286
1287 default: /* shut up gcc */
1288 AssertFailed();
1289 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1290 }
1291 }
1292
1293 pVmcb->guest.u64CR4 = u64GuestCR4;
1294 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1295
1296 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
1297 pVCpu->hm.s.fLoadSaveGuestXcr0 = (u64GuestCR4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
1298
1299 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
1300 }
1301
1302 return VINF_SUCCESS;
1303}
1304
1305
1306/**
1307 * Loads the guest segment registers into the VMCB.
1308 *
1309 * @returns VBox status code.
1310 * @param pVCpu Pointer to the VMCPU.
1311 * @param pVmcb Pointer to the VM control block.
1312 * @param pCtx Pointer to the guest-CPU context.
1313 *
1314 * @remarks No-long-jump zone!!!
1315 */
1316static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1317{
1318 /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
1319 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
1320 {
1321 HMSVM_LOAD_SEG_REG(CS, cs);
1322 HMSVM_LOAD_SEG_REG(SS, ss);
1323 HMSVM_LOAD_SEG_REG(DS, ds);
1324 HMSVM_LOAD_SEG_REG(ES, es);
1325 HMSVM_LOAD_SEG_REG(FS, fs);
1326 HMSVM_LOAD_SEG_REG(GS, gs);
1327
1328 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
1329 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
1330 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
1331 }
1332
1333 /* Guest TR. */
1334 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
1335 {
1336 HMSVM_LOAD_SEG_REG(TR, tr);
1337 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
1338 }
1339
1340 /* Guest LDTR. */
1341 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
1342 {
1343 HMSVM_LOAD_SEG_REG(LDTR, ldtr);
1344 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
1345 }
1346
1347 /* Guest GDTR. */
1348 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
1349 {
1350 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
1351 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
1352 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1353 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
1354 }
1355
1356 /* Guest IDTR. */
1357 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
1358 {
1359 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
1360 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
1361 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1362 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
1363 }
1364}
1365
1366
1367/**
1368 * Loads the guest MSRs into the VMCB.
1369 *
1370 * @param pVCpu Pointer to the VMCPU.
1371 * @param pVmcb Pointer to the VM control block.
1372 * @param pCtx Pointer to the guest-CPU context.
1373 *
1374 * @remarks No-long-jump zone!!!
1375 */
1376static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1377{
1378 /* Guest Sysenter MSRs. */
1379 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
1380 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
1381 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
1382
1383 /*
1384 * Guest EFER MSR.
1385 * AMD-V requires guest EFER.SVME to be set. Weird.
1386 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
1387 */
1388 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
1389 {
1390 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
1391 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1392 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
1393 }
1394
1395 /* 64-bit MSRs. */
1396 if (CPUMIsGuestInLongModeEx(pCtx))
1397 {
1398 pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;
1399 pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;
1400 }
1401 else
1402 {
1403 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */
1404 if (pCtx->msrEFER & MSR_K6_EFER_LME)
1405 {
1406 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
1407 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1408 }
1409 }
1410
1411
1412 /** @todo The following are used in 64-bit only (SYSCALL/SYSRET) but they might
1413 * be writable in 32-bit mode. Clarify with AMD spec. */
1414 pVmcb->guest.u64STAR = pCtx->msrSTAR;
1415 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
1416 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
1417 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
1418 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1419}
1420
1421
1422/**
1423 * Loads the guest state into the VMCB and programs the necessary intercepts
1424 * accordingly.
1425 *
1426 * @param pVCpu Pointer to the VMCPU.
1427 * @param pVmcb Pointer to the VM control block.
1428 * @param pCtx Pointer to the guest-CPU context.
1429 *
1430 * @remarks No-long-jump zone!!!
1431 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
1432 */
1433static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1434{
1435 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
1436 return;
1437 Assert((pCtx->dr[6] & X86_DR6_RA1_MASK) == X86_DR6_RA1_MASK); Assert((pCtx->dr[6] & X86_DR6_RAZ_MASK) == 0);
1438 Assert((pCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); Assert((pCtx->dr[7] & X86_DR7_RAZ_MASK) == 0);
1439
1440 bool fInterceptDB = false;
1441 bool fInterceptMovDRx = false;
1442
1443 /*
1444 * Anyone single stepping on the host side? If so, we'll have to use the
1445 * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
1446 * the VMM level like the VT-x implementations does.
1447 */
1448 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
1449 if (fStepping)
1450 {
1451 pVCpu->hm.s.fClearTrapFlag = true;
1452 pVmcb->guest.u64RFlags |= X86_EFL_TF;
1453 fInterceptDB = true;
1454 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
1455 }
1456
1457 if ( fStepping
1458 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
1459 {
1460 /*
1461 * Use the combined guest and host DRx values found in the hypervisor
1462 * register set because the debugger has breakpoints active or someone
1463 * is single stepping on the host side.
1464 *
1465 * Note! DBGF expects a clean DR6 state before executing guest code.
1466 */
1467#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1468 if ( CPUMIsGuestInLongModeEx(pCtx)
1469 && !CPUMIsHyperDebugStateActivePending(pVCpu))
1470 {
1471 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1472 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
1473 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
1474 }
1475 else
1476#endif
1477 if (!CPUMIsHyperDebugStateActive(pVCpu))
1478 {
1479 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1480 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1481 Assert(CPUMIsHyperDebugStateActive(pVCpu));
1482 }
1483
1484 /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
1485 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
1486 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu))
1487 {
1488 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
1489 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
1490 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1491 pVCpu->hm.s.fUsingHyperDR7 = true;
1492 }
1493
1494 /** @todo If we cared, we could optimize to allow the guest to read registers
1495 * with the same values. */
1496 fInterceptDB = true;
1497 fInterceptMovDRx = true;
1498 Log5(("hmR0SvmLoadSharedDebugState: Loaded hyper DRx\n"));
1499 }
1500 else
1501 {
1502 /*
1503 * Update DR6, DR7 with the guest values if necessary.
1504 */
1505 if ( pVmcb->guest.u64DR7 != pCtx->dr[7]
1506 || pVmcb->guest.u64DR6 != pCtx->dr[6])
1507 {
1508 pVmcb->guest.u64DR7 = pCtx->dr[7];
1509 pVmcb->guest.u64DR6 = pCtx->dr[6];
1510 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1511 pVCpu->hm.s.fUsingHyperDR7 = false;
1512 }
1513
1514 /*
1515 * If the guest has enabled debug registers, we need to load them prior to
1516 * executing guest code so they'll trigger at the right time.
1517 */
1518 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
1519 {
1520#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1521 if ( CPUMIsGuestInLongModeEx(pCtx)
1522 && !CPUMIsGuestDebugStateActivePending(pVCpu))
1523 {
1524 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
1525 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1526 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
1527 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
1528 }
1529 else
1530#endif
1531 if (!CPUMIsGuestDebugStateActive(pVCpu))
1532 {
1533 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
1534 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1535 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1536 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1537 }
1538 Log5(("hmR0SvmLoadSharedDebugState: Loaded guest DRx\n"));
1539 }
1540 /*
1541 * If no debugging enabled, we'll lazy load DR0-3. We don't need to
1542 * intercept #DB as DR6 is updated in the VMCB.
1543 */
1544#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1545 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
1546 && !CPUMIsGuestDebugStateActive(pVCpu))
1547#else
1548 else if (!CPUMIsGuestDebugStateActive(pVCpu))
1549#endif
1550 {
1551 fInterceptMovDRx = true;
1552 }
1553 }
1554
1555 /*
1556 * Set up the intercepts.
1557 */
1558 if (fInterceptDB)
1559 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_DB);
1560 else
1561 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_DB);
1562
1563 if (fInterceptMovDRx)
1564 {
1565 if ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
1566 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
1567 {
1568 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
1569 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
1570 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1571 }
1572 }
1573 else
1574 {
1575 if ( pVmcb->ctrl.u16InterceptRdDRx
1576 || pVmcb->ctrl.u16InterceptWrDRx)
1577 {
1578 pVmcb->ctrl.u16InterceptRdDRx = 0;
1579 pVmcb->ctrl.u16InterceptWrDRx = 0;
1580 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1581 }
1582 }
1583
1584 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
1585}
1586
1587
1588/**
1589 * Loads the guest APIC state (currently just the TPR).
1590 *
1591 * @returns VBox status code.
1592 * @param pVCpu Pointer to the VMCPU.
1593 * @param pVmcb Pointer to the VM control block.
1594 * @param pCtx Pointer to the guest-CPU context.
1595 */
1596static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1597{
1598 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE))
1599 return VINF_SUCCESS;
1600
1601 bool fPendingIntr;
1602 uint8_t u8Tpr;
1603 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
1604 AssertRCReturn(rc, rc);
1605
1606 /* Assume that we need to trap all TPR accesses and thus need not check on
1607 every #VMEXIT if we should update the TPR. */
1608 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking);
1609 pVCpu->hm.s.svm.fSyncVTpr = false;
1610
1611 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
1612 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
1613 {
1614 pCtx->msrLSTAR = u8Tpr;
1615
1616 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
1617 if (fPendingIntr)
1618 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
1619 else
1620 {
1621 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1622 pVCpu->hm.s.svm.fSyncVTpr = true;
1623 }
1624 }
1625 else
1626 {
1627 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
1628 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
1629
1630 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
1631 if (fPendingIntr)
1632 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
1633 else
1634 {
1635 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
1636 pVCpu->hm.s.svm.fSyncVTpr = true;
1637 }
1638
1639 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
1640 }
1641
1642 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
1643 return rc;
1644}
1645
1646
1647/**
1648 * Loads the exception interrupts required for guest execution in the VMCB.
1649 *
1650 * @returns VBox status code.
1651 * @param pVCpu Pointer to the VMCPU.
1652 * @param pVmcb Pointer to the VM control block.
1653 * @param pCtx Pointer to the guest-CPU context.
1654 */
1655static int hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1656{
1657 int rc = VINF_SUCCESS;
1658 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
1659 {
1660 /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmLoadSharedCR0(). */
1661 if (pVCpu->hm.s.fGIMTrapXcptUD)
1662 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_UD);
1663 else
1664 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_UD);
1665 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
1666 }
1667 return rc;
1668}
1669
1670
1671/**
1672 * Sets up the appropriate function to run guest code.
1673 *
1674 * @returns VBox status code.
1675 * @param pVCpu Pointer to the VMCPU.
1676 * @param pCtx Pointer to the guest-CPU context.
1677 *
1678 * @remarks No-long-jump zone!!!
1679 */
1680static int hmR0SvmSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pCtx)
1681{
1682 if (CPUMIsGuestInLongModeEx(pCtx))
1683 {
1684#ifndef VBOX_ENABLE_64_BITS_GUESTS
1685 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1686#endif
1687 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
1688#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1689 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
1690 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
1691#else
1692 /* 64-bit host or hybrid host. */
1693 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64;
1694#endif
1695 }
1696 else
1697 {
1698 /* Guest is not in long mode, use the 32-bit handler. */
1699 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
1700 }
1701 return VINF_SUCCESS;
1702}
1703
1704
1705/**
1706 * Enters the AMD-V session.
1707 *
1708 * @returns VBox status code.
1709 * @param pVM Pointer to the VM.
1710 * @param pVCpu Pointer to the VMCPU.
1711 * @param pCpu Pointer to the CPU info struct.
1712 */
1713VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1714{
1715 AssertPtr(pVM);
1716 AssertPtr(pVCpu);
1717 Assert(pVM->hm.s.svm.fSupported);
1718 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1719 NOREF(pVM); NOREF(pCpu);
1720
1721 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
1722 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
1723
1724 pVCpu->hm.s.fLeaveDone = false;
1725 return VINF_SUCCESS;
1726}
1727
1728
1729/**
1730 * Thread-context callback for AMD-V.
1731 *
1732 * @param enmEvent The thread-context event.
1733 * @param pVCpu Pointer to the VMCPU.
1734 * @param fGlobalInit Whether global VT-x/AMD-V init. is used.
1735 * @thread EMT(pVCpu)
1736 */
1737VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
1738{
1739 NOREF(fGlobalInit);
1740
1741 switch (enmEvent)
1742 {
1743 case RTTHREADCTXEVENT_OUT:
1744 {
1745 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1746 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
1747 VMCPU_ASSERT_EMT(pVCpu);
1748
1749 PVM pVM = pVCpu->CTX_SUFF(pVM);
1750 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1751
1752 /* No longjmps (log-flush, locks) in this fragile context. */
1753 VMMRZCallRing3Disable(pVCpu);
1754
1755 if (!pVCpu->hm.s.fLeaveDone)
1756 {
1757 hmR0SvmLeave(pVM, pVCpu, pCtx);
1758 pVCpu->hm.s.fLeaveDone = true;
1759 }
1760
1761 /* Leave HM context, takes care of local init (term). */
1762 int rc = HMR0LeaveCpu(pVCpu);
1763 AssertRC(rc); NOREF(rc);
1764
1765 /* Restore longjmp state. */
1766 VMMRZCallRing3Enable(pVCpu);
1767 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptPreempting);
1768 break;
1769 }
1770
1771 case RTTHREADCTXEVENT_IN:
1772 {
1773 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1774 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
1775 VMCPU_ASSERT_EMT(pVCpu);
1776
1777 /* No longjmps (log-flush, locks) in this fragile context. */
1778 VMMRZCallRing3Disable(pVCpu);
1779
1780 /*
1781 * Initialize the bare minimum state required for HM. This takes care of
1782 * initializing AMD-V if necessary (onlined CPUs, local init etc.)
1783 */
1784 int rc = HMR0EnterCpu(pVCpu);
1785 AssertRC(rc); NOREF(rc);
1786 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
1787
1788 pVCpu->hm.s.fLeaveDone = false;
1789
1790 /* Restore longjmp state. */
1791 VMMRZCallRing3Enable(pVCpu);
1792 break;
1793 }
1794
1795 default:
1796 break;
1797 }
1798}
1799
1800
1801/**
1802 * Saves the host state.
1803 *
1804 * @returns VBox status code.
1805 * @param pVM Pointer to the VM.
1806 * @param pVCpu Pointer to the VMCPU.
1807 *
1808 * @remarks No-long-jump zone!!!
1809 */
1810VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
1811{
1812 NOREF(pVM);
1813 NOREF(pVCpu);
1814 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
1815 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
1816 return VINF_SUCCESS;
1817}
1818
1819
1820/**
1821 * Loads the guest state into the VMCB.
1822 *
1823 * The CPU state will be loaded from these fields on every successful VM-entry.
1824 * Also sets up the appropriate VMRUN function to execute guest code based on
1825 * the guest CPU mode.
1826 *
1827 * @returns VBox status code.
1828 * @param pVM Pointer to the VM.
1829 * @param pVCpu Pointer to the VMCPU.
1830 * @param pCtx Pointer to the guest-CPU context.
1831 *
1832 * @remarks No-long-jump zone!!!
1833 */
1834static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1835{
1836 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1837 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
1838
1839 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
1840
1841 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcb, pCtx);
1842 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1843
1844 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcb, pCtx);
1845 hmR0SvmLoadGuestMsrs(pVCpu, pVmcb, pCtx);
1846
1847 pVmcb->guest.u64RIP = pCtx->rip;
1848 pVmcb->guest.u64RSP = pCtx->rsp;
1849 pVmcb->guest.u64RFlags = pCtx->eflags.u32;
1850 pVmcb->guest.u64RAX = pCtx->rax;
1851
1852 rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx);
1853 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1854
1855 rc = hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb, pCtx);
1856 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1857
1858 rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx);
1859 AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1860
1861 /* Clear any unused and reserved bits. */
1862 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */
1863 | HM_CHANGED_GUEST_RSP
1864 | HM_CHANGED_GUEST_RFLAGS
1865 | HM_CHANGED_GUEST_SYSENTER_CS_MSR
1866 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
1867 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
1868 | HM_CHANGED_GUEST_LAZY_MSRS /* Unused. */
1869 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */
1870 | HM_CHANGED_SVM_RESERVED2
1871 | HM_CHANGED_SVM_RESERVED3
1872 | HM_CHANGED_SVM_RESERVED4);
1873
1874 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
1875 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
1876 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
1877 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
1878
1879 Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss.Sel, pCtx->rsp));
1880 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
1881 return rc;
1882}
1883
1884
1885/**
1886 * Loads the state shared between the host and guest into the
1887 * VMCB.
1888 *
1889 * @param pVCpu Pointer to the VMCPU.
1890 * @param pVmcb Pointer to the VM control block.
1891 * @param pCtx Pointer to the guest-CPU context.
1892 *
1893 * @remarks No-long-jump zone!!!
1894 */
1895static void hmR0SvmLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1896{
1897 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1898 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1899
1900 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
1901 hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx);
1902
1903 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
1904 hmR0SvmLoadSharedDebugState(pVCpu, pVmcb, pCtx);
1905
1906 /* Unused on AMD-V. */
1907 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
1908
1909 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
1910 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
1911}
1912
1913
1914/**
1915 * Saves the entire guest state from the VMCB into the
1916 * guest-CPU context. Currently there is no residual state left in the CPU that
1917 * is not updated in the VMCB.
1918 *
1919 * @returns VBox status code.
1920 * @param pVCpu Pointer to the VMCPU.
1921 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1922 * out-of-sync. Make sure to update the required fields
1923 * before using them.
1924 */
1925static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1926{
1927 Assert(VMMRZCallRing3IsEnabled(pVCpu));
1928
1929 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1930
1931 pMixedCtx->rip = pVmcb->guest.u64RIP;
1932 pMixedCtx->rsp = pVmcb->guest.u64RSP;
1933 pMixedCtx->eflags.u32 = pVmcb->guest.u64RFlags;
1934 pMixedCtx->rax = pVmcb->guest.u64RAX;
1935
1936 /*
1937 * Guest interrupt shadow.
1938 */
1939 if (pVmcb->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
1940 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
1941 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1942 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1943
1944 /*
1945 * Guest Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
1946 */
1947 pMixedCtx->cr2 = pVmcb->guest.u64CR2;
1948
1949 /*
1950 * Guest MSRs.
1951 */
1952 pMixedCtx->msrSTAR = pVmcb->guest.u64STAR; /* legacy syscall eip, cs & ss */
1953 pMixedCtx->msrLSTAR = pVmcb->guest.u64LSTAR; /* 64-bit mode syscall rip */
1954 pMixedCtx->msrCSTAR = pVmcb->guest.u64CSTAR; /* compatibility mode syscall rip */
1955 pMixedCtx->msrSFMASK = pVmcb->guest.u64SFMASK; /* syscall flag mask */
1956 pMixedCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; /* swapgs exchange value */
1957 pMixedCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS;
1958 pMixedCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP;
1959 pMixedCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP;
1960
1961 /*
1962 * Guest segment registers (includes FS, GS base MSRs for 64-bit guests).
1963 */
1964 HMSVM_SAVE_SEG_REG(CS, cs);
1965 HMSVM_SAVE_SEG_REG(SS, ss);
1966 HMSVM_SAVE_SEG_REG(DS, ds);
1967 HMSVM_SAVE_SEG_REG(ES, es);
1968 HMSVM_SAVE_SEG_REG(FS, fs);
1969 HMSVM_SAVE_SEG_REG(GS, gs);
1970
1971 /*
1972 * Correct the hidden CS granularity bit. Haven't seen it being wrong in any other
1973 * register (yet).
1974 */
1975 /** @todo SELM might need to be fixed as it too should not care about the
1976 * granularity bit. See @bugref{6785}. */
1977 if ( !pMixedCtx->cs.Attr.n.u1Granularity
1978 && pMixedCtx->cs.Attr.n.u1Present
1979 && pMixedCtx->cs.u32Limit > UINT32_C(0xfffff))
1980 {
1981 Assert((pMixedCtx->cs.u32Limit & 0xfff) == 0xfff);
1982 pMixedCtx->cs.Attr.n.u1Granularity = 1;
1983 }
1984
1985#ifdef VBOX_STRICT
1986# define HMSVM_ASSERT_SEG_GRANULARITY(reg) \
1987 AssertMsg( !pMixedCtx->reg.Attr.n.u1Present \
1988 || ( pMixedCtx->reg.Attr.n.u1Granularity \
1989 ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \
1990 : pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \
1991 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", pMixedCtx->reg.u32Limit, \
1992 pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base))
1993
1994 HMSVM_ASSERT_SEG_GRANULARITY(cs);
1995 HMSVM_ASSERT_SEG_GRANULARITY(ss);
1996 HMSVM_ASSERT_SEG_GRANULARITY(ds);
1997 HMSVM_ASSERT_SEG_GRANULARITY(es);
1998 HMSVM_ASSERT_SEG_GRANULARITY(fs);
1999 HMSVM_ASSERT_SEG_GRANULARITY(gs);
2000
2001# undef HMSVM_ASSERT_SEL_GRANULARITY
2002#endif
2003
2004 /*
2005 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the VMCB and uses that
2006 * and thus it's possible that when the CPL changes during guest execution that the SS DPL
2007 * isn't updated by AMD-V. Observed on some AMD Fusion CPUs with 64-bit guests.
2008 * See AMD spec. 15.5.1 "Basic operation".
2009 */
2010 Assert(!(pVmcb->guest.u8CPL & ~0x3));
2011 pMixedCtx->ss.Attr.n.u2Dpl = pVmcb->guest.u8CPL & 0x3;
2012
2013 /*
2014 * Guest TR.
2015 * Fixup TR attributes so it's compatible with Intel. Important when saved-states are used
2016 * between Intel and AMD. See @bugref{6208} comment #39.
2017 */
2018 HMSVM_SAVE_SEG_REG(TR, tr);
2019 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2020 pMixedCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2021
2022 /*
2023 * Guest Descriptor-Table registers.
2024 */
2025 HMSVM_SAVE_SEG_REG(LDTR, ldtr);
2026 pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit;
2027 pMixedCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base;
2028
2029 pMixedCtx->idtr.cbIdt = pVmcb->guest.IDTR.u32Limit;
2030 pMixedCtx->idtr.pIdt = pVmcb->guest.IDTR.u64Base;
2031
2032 /*
2033 * Guest Debug registers.
2034 */
2035 if (!pVCpu->hm.s.fUsingHyperDR7)
2036 {
2037 pMixedCtx->dr[6] = pVmcb->guest.u64DR6;
2038 pMixedCtx->dr[7] = pVmcb->guest.u64DR7;
2039 }
2040 else
2041 {
2042 Assert(pVmcb->guest.u64DR7 == CPUMGetHyperDR7(pVCpu));
2043 CPUMSetHyperDR6(pVCpu, pVmcb->guest.u64DR6);
2044 }
2045
2046 /*
2047 * With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now.
2048 * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
2049 */
2050 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
2051 && pMixedCtx->cr3 != pVmcb->guest.u64CR3)
2052 {
2053 CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3);
2054 PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3);
2055 }
2056}
2057
2058
2059/**
2060 * Does the necessary state syncing before returning to ring-3 for any reason
2061 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
2062 *
2063 * @param pVM Pointer to the VM.
2064 * @param pVCpu Pointer to the VMCPU.
2065 * @param pMixedCtx Pointer to the guest-CPU context.
2066 *
2067 * @remarks No-long-jmp zone!!!
2068 */
2069static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2070{
2071 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2072 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2073 Assert(VMMR0IsLogFlushDisabled(pVCpu));
2074
2075 /*
2076 * !!! IMPORTANT !!!
2077 * If you modify code here, make sure to check whether hmR0SvmCallRing3Callback() needs to be updated too.
2078 */
2079
2080 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
2081 if (CPUMIsGuestFPUStateActive(pVCpu))
2082 {
2083 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
2084 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
2085 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
2086 }
2087
2088 /*
2089 * Restore host debug registers if necessary and resync on next R0 reentry.
2090 */
2091#ifdef VBOX_STRICT
2092 if (CPUMIsHyperDebugStateActive(pVCpu))
2093 {
2094 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2095 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
2096 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
2097 }
2098#endif
2099 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
2100 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
2101
2102 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
2103 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
2104
2105 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
2106 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
2107 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
2108 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
2109 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
2110
2111 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
2112}
2113
2114
2115/**
2116 * Leaves the AMD-V session.
2117 *
2118 * @returns VBox status code.
2119 * @param pVM Pointer to the VM.
2120 * @param pVCpu Pointer to the VMCPU.
2121 * @param pCtx Pointer to the guest-CPU context.
2122 */
2123static int hmR0SvmLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2124{
2125 HM_DISABLE_PREEMPT();
2126 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2127 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2128
2129 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
2130 and done this from the SVMR0ThreadCtxCallback(). */
2131 if (!pVCpu->hm.s.fLeaveDone)
2132 {
2133 hmR0SvmLeave(pVM, pVCpu, pCtx);
2134 pVCpu->hm.s.fLeaveDone = true;
2135 }
2136
2137 /*
2138 * !!! IMPORTANT !!!
2139 * If you modify code here, make sure to check whether hmR0SvmCallRing3Callback() needs to be updated too.
2140 */
2141
2142 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
2143 /* Deregister hook now that we've left HM context before re-enabling preemption. */
2144 VMMR0ThreadCtxHookDisable(pVCpu);
2145
2146 /* Leave HM context. This takes care of local init (term). */
2147 int rc = HMR0LeaveCpu(pVCpu);
2148
2149 HM_RESTORE_PREEMPT();
2150 return rc;
2151}
2152
2153
2154/**
2155 * Does the necessary state syncing before doing a longjmp to ring-3.
2156 *
2157 * @returns VBox status code.
2158 * @param pVM Pointer to the VM.
2159 * @param pVCpu Pointer to the VMCPU.
2160 * @param pCtx Pointer to the guest-CPU context.
2161 *
2162 * @remarks No-long-jmp zone!!!
2163 */
2164static int hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2165{
2166 return hmR0SvmLeaveSession(pVM, pVCpu, pCtx);
2167}
2168
2169
2170/**
2171 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
2172 * any remaining host state) before we longjump to ring-3 and possibly get
2173 * preempted.
2174 *
2175 * @param pVCpu Pointer to the VMCPU.
2176 * @param enmOperation The operation causing the ring-3 longjump.
2177 * @param pvUser The user argument (pointer to the possibly
2178 * out-of-date guest-CPU context).
2179 */
2180DECLCALLBACK(int) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
2181{
2182 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
2183 {
2184 /*
2185 * !!! IMPORTANT !!!
2186 * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
2187 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
2188 */
2189 VMMRZCallRing3RemoveNotification(pVCpu);
2190 VMMRZCallRing3Disable(pVCpu);
2191 HM_DISABLE_PREEMPT();
2192
2193 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
2194 if (CPUMIsGuestFPUStateActive(pVCpu))
2195 CPUMR0SaveGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
2196
2197 /* Restore host debug registers if necessary and resync on next R0 reentry. */
2198 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
2199
2200 /* Deregister the hook now that we've left HM context before re-enabling preemption. */
2201 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
2202 VMMR0ThreadCtxHookDisable(pVCpu);
2203
2204 /* Leave HM context. This takes care of local init (term). */
2205 HMR0LeaveCpu(pVCpu);
2206
2207 HM_RESTORE_PREEMPT();
2208 return VINF_SUCCESS;
2209 }
2210
2211 Assert(pVCpu);
2212 Assert(pvUser);
2213 Assert(VMMRZCallRing3IsEnabled(pVCpu));
2214 HMSVM_ASSERT_PREEMPT_SAFE();
2215
2216 VMMRZCallRing3Disable(pVCpu);
2217 Assert(VMMR0IsLogFlushDisabled(pVCpu));
2218
2219 Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));
2220 int rc = hmR0SvmLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
2221 AssertRCReturn(rc, rc);
2222
2223 VMMRZCallRing3Enable(pVCpu);
2224 return VINF_SUCCESS;
2225}
2226
2227
2228/**
2229 * Take necessary actions before going back to ring-3.
2230 *
2231 * An action requires us to go back to ring-3. This function does the necessary
2232 * steps before we can safely return to ring-3. This is not the same as longjmps
2233 * to ring-3, this is voluntary.
2234 *
2235 * @param pVM Pointer to the VM.
2236 * @param pVCpu Pointer to the VMCPU.
2237 * @param pCtx Pointer to the guest-CPU context.
2238 * @param rcExit The reason for exiting to ring-3. Can be
2239 * VINF_VMM_UNKNOWN_RING3_CALL.
2240 */
2241static void hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)
2242{
2243 Assert(pVM);
2244 Assert(pVCpu);
2245 Assert(pCtx);
2246 HMSVM_ASSERT_PREEMPT_SAFE();
2247
2248 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
2249 VMMRZCallRing3Disable(pVCpu);
2250 Log4(("hmR0SvmExitToRing3: rcExit=%d\n", rcExit));
2251
2252 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
2253 if (pVCpu->hm.s.Event.fPending)
2254 {
2255 hmR0SvmPendingEventToTrpmTrap(pVCpu);
2256 Assert(!pVCpu->hm.s.Event.fPending);
2257 }
2258
2259 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
2260 and if we're injecting an event we should have a TRPM trap pending. */
2261 Assert(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu));
2262 Assert(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu));
2263
2264 /* Sync. the necessary state for going back to ring-3. */
2265 hmR0SvmLeaveSession(pVM, pVCpu, pCtx);
2266 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
2267
2268 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
2269 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
2270 | CPUM_CHANGED_LDTR
2271 | CPUM_CHANGED_GDTR
2272 | CPUM_CHANGED_IDTR
2273 | CPUM_CHANGED_TR
2274 | CPUM_CHANGED_HIDDEN_SEL_REGS);
2275 if ( pVM->hm.s.fNestedPaging
2276 && CPUMIsGuestPagingEnabledEx(pCtx))
2277 {
2278 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
2279 }
2280
2281 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
2282 if (rcExit != VINF_EM_RAW_INTERRUPT)
2283 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
2284
2285 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
2286
2287 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
2288 VMMRZCallRing3RemoveNotification(pVCpu);
2289 VMMRZCallRing3Enable(pVCpu);
2290}
2291
2292
2293/**
2294 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
2295 * intercepts.
2296 *
2297 * @param pVM The shared VM handle.
2298 * @param pVCpu Pointer to the VMCPU.
2299 *
2300 * @remarks No-long-jump zone!!!
2301 */
2302static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu)
2303{
2304 bool fParavirtTsc;
2305 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2306 bool fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc);
2307 if (fCanUseRealTsc)
2308 {
2309 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
2310 pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
2311 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
2312 }
2313 else
2314 {
2315 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
2316 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
2317 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
2318 }
2319 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2320
2321 /** @todo later optimize this to be done elsewhere and not before every
2322 * VM-entry. */
2323 if (fParavirtTsc)
2324 {
2325 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
2326 AssertRC(rc);
2327 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
2328 }
2329}
2330
2331
2332/**
2333 * Sets an event as a pending event to be injected into the guest.
2334 *
2335 * @param pVCpu Pointer to the VMCPU.
2336 * @param pEvent Pointer to the SVM event.
2337 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
2338 * page-fault.
2339 *
2340 * @remarks Statistics counter assumes this is a guest event being reflected to
2341 * the guest i.e. 'StatInjectPendingReflect' is incremented always.
2342 */
2343DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
2344{
2345 Assert(!pVCpu->hm.s.Event.fPending);
2346 Assert(pEvent->n.u1Valid);
2347
2348 pVCpu->hm.s.Event.u64IntInfo = pEvent->u;
2349 pVCpu->hm.s.Event.fPending = true;
2350 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
2351
2352 Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,
2353 pEvent->n.u8Vector, (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
2354
2355 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
2356}
2357
2358
2359/**
2360 * Injects an event into the guest upon VMRUN by updating the relevant field
2361 * in the VMCB.
2362 *
2363 * @param pVCpu Pointer to the VMCPU.
2364 * @param pVmcb Pointer to the guest VM control block.
2365 * @param pCtx Pointer to the guest-CPU context.
2366 * @param pEvent Pointer to the event.
2367 *
2368 * @remarks No-long-jump zone!!!
2369 * @remarks Requires CR0!
2370 */
2371DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent)
2372{
2373 NOREF(pVCpu); NOREF(pCtx);
2374
2375 pVmcb->ctrl.EventInject.u = pEvent->u;
2376 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
2377
2378 Log4(("hmR0SvmInjectEventVmcb: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,
2379 pEvent->n.u8Vector, (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
2380}
2381
2382
2383
2384/**
2385 * Converts any TRPM trap into a pending HM event. This is typically used when
2386 * entering from ring-3 (not longjmp returns).
2387 *
2388 * @param pVCpu Pointer to the VMCPU.
2389 */
2390static void hmR0SvmTrpmTrapToPendingEvent(PVMCPU pVCpu)
2391{
2392 Assert(TRPMHasTrap(pVCpu));
2393 Assert(!pVCpu->hm.s.Event.fPending);
2394
2395 uint8_t uVector;
2396 TRPMEVENT enmTrpmEvent;
2397 RTGCUINT uErrCode;
2398 RTGCUINTPTR GCPtrFaultAddress;
2399 uint8_t cbInstr;
2400
2401 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
2402 AssertRC(rc);
2403
2404 SVMEVENT Event;
2405 Event.u = 0;
2406 Event.n.u1Valid = 1;
2407 Event.n.u8Vector = uVector;
2408
2409 /* Refer AMD spec. 15.20 "Event Injection" for the format. */
2410 if (enmTrpmEvent == TRPM_TRAP)
2411 {
2412 Event.n.u3Type = SVM_EVENT_EXCEPTION;
2413 switch (uVector)
2414 {
2415 case X86_XCPT_NMI:
2416 {
2417 Event.n.u3Type = SVM_EVENT_NMI;
2418 break;
2419 }
2420
2421 case X86_XCPT_PF:
2422 case X86_XCPT_DF:
2423 case X86_XCPT_TS:
2424 case X86_XCPT_NP:
2425 case X86_XCPT_SS:
2426 case X86_XCPT_GP:
2427 case X86_XCPT_AC:
2428 {
2429 Event.n.u1ErrorCodeValid = 1;
2430 Event.n.u32ErrorCode = uErrCode;
2431 break;
2432 }
2433 }
2434 }
2435 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
2436 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
2437 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
2438 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
2439 else
2440 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
2441
2442 rc = TRPMResetTrap(pVCpu);
2443 AssertRC(rc);
2444
2445 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
2446 !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode));
2447
2448 hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress);
2449 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
2450}
2451
2452
2453/**
2454 * Converts any pending SVM event into a TRPM trap. Typically used when leaving
2455 * AMD-V to execute any instruction.
2456 *
2457 * @param pvCpu Pointer to the VMCPU.
2458 */
2459static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu)
2460{
2461 Assert(pVCpu->hm.s.Event.fPending);
2462 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
2463
2464 SVMEVENT Event;
2465 Event.u = pVCpu->hm.s.Event.u64IntInfo;
2466
2467 uint8_t uVector = Event.n.u8Vector;
2468 uint8_t uVectorType = Event.n.u3Type;
2469
2470 TRPMEVENT enmTrapType;
2471 switch (uVectorType)
2472 {
2473 case SVM_EVENT_EXTERNAL_IRQ:
2474 enmTrapType = TRPM_HARDWARE_INT;
2475 break;
2476 case SVM_EVENT_SOFTWARE_INT:
2477 enmTrapType = TRPM_SOFTWARE_INT;
2478 break;
2479 case SVM_EVENT_EXCEPTION:
2480 case SVM_EVENT_NMI:
2481 enmTrapType = TRPM_TRAP;
2482 break;
2483 default:
2484 AssertMsgFailed(("Invalid pending-event type %#x\n", uVectorType));
2485 enmTrapType = TRPM_32BIT_HACK;
2486 break;
2487 }
2488
2489 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, uVectorType));
2490
2491 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
2492 AssertRC(rc);
2493
2494 if (Event.n.u1ErrorCodeValid)
2495 TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode);
2496
2497 if ( uVectorType == SVM_EVENT_EXCEPTION
2498 && uVector == X86_XCPT_PF)
2499 {
2500 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
2501 Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
2502 }
2503 else if (uVectorType == SVM_EVENT_SOFTWARE_INT)
2504 {
2505 AssertMsg( uVectorType == SVM_EVENT_SOFTWARE_INT
2506 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
2507 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
2508 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
2509 }
2510 pVCpu->hm.s.Event.fPending = false;
2511}
2512
2513
2514/**
2515 * Gets the guest's interrupt-shadow.
2516 *
2517 * @returns The guest's interrupt-shadow.
2518 * @param pVCpu Pointer to the VMCPU.
2519 * @param pCtx Pointer to the guest-CPU context.
2520 *
2521 * @remarks No-long-jump zone!!!
2522 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2523 */
2524DECLINLINE(uint32_t) hmR0SvmGetGuestIntrShadow(PVMCPU pVCpu, PCPUMCTX pCtx)
2525{
2526 /*
2527 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2528 * inhibit interrupts or clear any existing interrupt-inhibition.
2529 */
2530 uint32_t uIntrState = 0;
2531 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2532 {
2533 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2534 {
2535 /*
2536 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2537 * AMD-V, the flag's condition to be cleared is met and thus the cleared state is correct.
2538 */
2539 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2540 }
2541 else
2542 uIntrState = SVM_INTERRUPT_SHADOW_ACTIVE;
2543 }
2544 return uIntrState;
2545}
2546
2547
2548/**
2549 * Sets the virtual interrupt intercept control in the VMCB which
2550 * instructs AMD-V to cause a #VMEXIT as soon as the guest is in a state to
2551 * receive interrupts.
2552 *
2553 * @param pVmcb Pointer to the VM control block.
2554 */
2555DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
2556{
2557 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_VINTR))
2558 {
2559 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 1; /* A virtual interrupt is pending. */
2560 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* Not necessary as we #VMEXIT for delivering the interrupt. */
2561 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
2562 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
2563
2564 Log4(("Setting VINTR intercept\n"));
2565 }
2566}
2567
2568
2569/**
2570 * Sets the IRET intercept control in the VMCB which instructs AMD-V to cause a
2571 * #VMEXIT as soon as a guest starts executing an IRET. This is used to unblock
2572 * virtual NMIs.
2573 *
2574 * @param pVmcb Pointer to the VM control block.
2575 */
2576DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb)
2577{
2578 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET))
2579 {
2580 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_IRET;
2581 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
2582
2583 Log4(("Setting IRET intercept\n"));
2584 }
2585}
2586
2587
2588/**
2589 * Clears the IRET intercept control in the VMCB.
2590 *
2591 * @param pVmcb Pointer to the VM control block.
2592 */
2593DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb)
2594{
2595 if (pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET)
2596 {
2597 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_IRET;
2598 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
2599
2600 Log4(("Clearing IRET intercept\n"));
2601 }
2602}
2603
2604
2605/**
2606 * Evaluates the event to be delivered to the guest and sets it as the pending
2607 * event.
2608 *
2609 * @param pVCpu Pointer to the VMCPU.
2610 * @param pCtx Pointer to the guest-CPU context.
2611 */
2612static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
2613{
2614 Assert(!pVCpu->hm.s.Event.fPending);
2615 Log4Func(("\n"));
2616
2617 bool const fIntShadow = RT_BOOL(hmR0SvmGetGuestIntrShadow(pVCpu, pCtx));
2618 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
2619 bool const fBlockNmi = RT_BOOL(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
2620 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2621
2622 SVMEVENT Event;
2623 Event.u = 0;
2624 /** @todo SMI. SMIs take priority over NMIs. */
2625 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
2626 {
2627 if (fBlockNmi)
2628 hmR0SvmSetIretIntercept(pVmcb);
2629 else if (fIntShadow)
2630 hmR0SvmSetVirtIntrIntercept(pVmcb);
2631 else
2632 {
2633 Log4(("Pending NMI\n"));
2634
2635 Event.n.u1Valid = 1;
2636 Event.n.u8Vector = X86_XCPT_NMI;
2637 Event.n.u3Type = SVM_EVENT_NMI;
2638
2639 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
2640 hmR0SvmSetIretIntercept(pVmcb);
2641 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
2642 }
2643 }
2644 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
2645 {
2646 /*
2647 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
2648 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt which is why it is
2649 * evaluated here and not set as pending, solely based on the force-flags.
2650 */
2651 if ( !fBlockInt
2652 && !fIntShadow)
2653 {
2654 uint8_t u8Interrupt;
2655 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
2656 if (RT_SUCCESS(rc))
2657 {
2658 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
2659
2660 Event.n.u1Valid = 1;
2661 Event.n.u8Vector = u8Interrupt;
2662 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
2663
2664 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
2665 }
2666 else
2667 {
2668 /** @todo Does this actually happen? If not turn it into an assertion. */
2669 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
2670 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
2671 }
2672 }
2673 else
2674 hmR0SvmSetVirtIntrIntercept(pVmcb);
2675 }
2676}
2677
2678
2679/**
2680 * Injects any pending events into the guest if the guest is in a state to
2681 * receive them.
2682 *
2683 * @param pVCpu Pointer to the VMCPU.
2684 * @param pCtx Pointer to the guest-CPU context.
2685 */
2686static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
2687{
2688 Assert(!TRPMHasTrap(pVCpu));
2689 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2690
2691 bool const fIntShadow = RT_BOOL(hmR0SvmGetGuestIntrShadow(pVCpu, pCtx));
2692 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
2693 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2694
2695 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
2696 {
2697 SVMEVENT Event;
2698 Event.u = pVCpu->hm.s.Event.u64IntInfo;
2699 Assert(Event.n.u1Valid);
2700#ifdef VBOX_STRICT
2701 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
2702 {
2703 Assert(!fBlockInt);
2704 Assert(!fIntShadow);
2705 }
2706 else if (Event.n.u3Type == SVM_EVENT_NMI)
2707 Assert(!fIntShadow);
2708#endif
2709
2710 Log4(("Injecting pending HM event.\n"));
2711 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
2712 pVCpu->hm.s.Event.fPending = false;
2713
2714#ifdef VBOX_WITH_STATISTICS
2715 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
2716 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
2717 else
2718 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
2719#endif
2720 }
2721
2722 /* Update the guest interrupt shadow in the VMCB. */
2723 pVmcb->ctrl.u64IntShadow = !!fIntShadow;
2724 NOREF(fBlockInt);
2725}
2726
2727
2728/**
2729 * Reports world-switch error and dumps some useful debug info.
2730 *
2731 * @param pVM Pointer to the VM.
2732 * @param pVCpu Pointer to the VMCPU.
2733 * @param rcVMRun The return code from VMRUN (or
2734 * VERR_SVM_INVALID_GUEST_STATE for invalid
2735 * guest-state).
2736 * @param pCtx Pointer to the guest-CPU context.
2737 */
2738static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx)
2739{
2740 NOREF(pCtx);
2741 HMSVM_ASSERT_PREEMPT_SAFE();
2742 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2743
2744 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
2745 {
2746 HMDumpRegs(pVM, pVCpu, pCtx); NOREF(pVM);
2747#ifdef VBOX_STRICT
2748 Log4(("ctrl.u64VmcbCleanBits %#RX64\n", pVmcb->ctrl.u64VmcbCleanBits));
2749 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx));
2750 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx));
2751 Log4(("ctrl.u16InterceptRdDRx %#x\n", pVmcb->ctrl.u16InterceptRdDRx));
2752 Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx));
2753 Log4(("ctrl.u32InterceptException %#x\n", pVmcb->ctrl.u32InterceptException));
2754 Log4(("ctrl.u32InterceptCtrl1 %#x\n", pVmcb->ctrl.u32InterceptCtrl1));
2755 Log4(("ctrl.u32InterceptCtrl2 %#x\n", pVmcb->ctrl.u32InterceptCtrl2));
2756 Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr));
2757 Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr));
2758 Log4(("ctrl.u64TSCOffset %#RX64\n", pVmcb->ctrl.u64TSCOffset));
2759
2760 Log4(("ctrl.TLBCtrl.u32ASID %#x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID));
2761 Log4(("ctrl.TLBCtrl.u8TLBFlush %#x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush));
2762 Log4(("ctrl.TLBCtrl.u24Reserved %#x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved));
2763
2764 Log4(("ctrl.IntCtrl.u8VTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR));
2765 Log4(("ctrl.IntCtrl.u1VIrqValid %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqValid));
2766 Log4(("ctrl.IntCtrl.u7Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u7Reserved));
2767 Log4(("ctrl.IntCtrl.u4VIrqPriority %#x\n", pVmcb->ctrl.IntCtrl.n.u4VIrqPriority));
2768 Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR));
2769 Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved));
2770 Log4(("ctrl.IntCtrl.u1VIrqMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqMasking));
2771 Log4(("ctrl.IntCtrl.u6Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved));
2772 Log4(("ctrl.IntCtrl.u8VIrqVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIrqVector));
2773 Log4(("ctrl.IntCtrl.u24Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved));
2774
2775 Log4(("ctrl.u64IntShadow %#RX64\n", pVmcb->ctrl.u64IntShadow));
2776 Log4(("ctrl.u64ExitCode %#RX64\n", pVmcb->ctrl.u64ExitCode));
2777 Log4(("ctrl.u64ExitInfo1 %#RX64\n", pVmcb->ctrl.u64ExitInfo1));
2778 Log4(("ctrl.u64ExitInfo2 %#RX64\n", pVmcb->ctrl.u64ExitInfo2));
2779 Log4(("ctrl.ExitIntInfo.u8Vector %#x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector));
2780 Log4(("ctrl.ExitIntInfo.u3Type %#x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type));
2781 Log4(("ctrl.ExitIntInfo.u1ErrorCodeValid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
2782 Log4(("ctrl.ExitIntInfo.u19Reserved %#x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved));
2783 Log4(("ctrl.ExitIntInfo.u1Valid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid));
2784 Log4(("ctrl.ExitIntInfo.u32ErrorCode %#x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
2785 Log4(("ctrl.NestedPaging %#RX64\n", pVmcb->ctrl.NestedPaging.u));
2786 Log4(("ctrl.EventInject.u8Vector %#x\n", pVmcb->ctrl.EventInject.n.u8Vector));
2787 Log4(("ctrl.EventInject.u3Type %#x\n", pVmcb->ctrl.EventInject.n.u3Type));
2788 Log4(("ctrl.EventInject.u1ErrorCodeValid %#x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid));
2789 Log4(("ctrl.EventInject.u19Reserved %#x\n", pVmcb->ctrl.EventInject.n.u19Reserved));
2790 Log4(("ctrl.EventInject.u1Valid %#x\n", pVmcb->ctrl.EventInject.n.u1Valid));
2791 Log4(("ctrl.EventInject.u32ErrorCode %#x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode));
2792
2793 Log4(("ctrl.u64NestedPagingCR3 %#RX64\n", pVmcb->ctrl.u64NestedPagingCR3));
2794 Log4(("ctrl.u64LBRVirt %#RX64\n", pVmcb->ctrl.u64LBRVirt));
2795
2796 Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel));
2797 Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr));
2798 Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit));
2799 Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base));
2800 Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel));
2801 Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr));
2802 Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit));
2803 Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base));
2804 Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel));
2805 Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr));
2806 Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit));
2807 Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base));
2808 Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel));
2809 Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr));
2810 Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit));
2811 Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base));
2812 Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel));
2813 Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr));
2814 Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit));
2815 Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base));
2816
2817 Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit));
2818 Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base));
2819
2820 Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel));
2821 Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr));
2822 Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit));
2823 Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base));
2824
2825 Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit));
2826 Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base));
2827
2828 Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel));
2829 Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr));
2830 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit));
2831 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base));
2832
2833 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL));
2834 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0));
2835 Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2));
2836 Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3));
2837 Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4));
2838 Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6));
2839 Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7));
2840
2841 Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP));
2842 Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP));
2843 Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX));
2844 Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags));
2845
2846 Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS));
2847 Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP));
2848 Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP));
2849
2850 Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER));
2851 Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR));
2852 Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR));
2853 Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR));
2854 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK));
2855 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase));
2856 Log4(("guest.u64GPAT %#RX64\n", pVmcb->guest.u64GPAT));
2857 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL));
2858 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM));
2859 Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO));
2860 Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM));
2861 Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO));
2862#else
2863 NOREF(pVmcb);
2864#endif /* VBOX_STRICT */
2865 }
2866 else
2867 Log4(("hmR0SvmReportWorldSwitchError: rcVMRun=%d\n", rcVMRun));
2868}
2869
2870
2871/**
2872 * Check per-VM and per-VCPU force flag actions that require us to go back to
2873 * ring-3 for one reason or another.
2874 *
2875 * @returns VBox status code (information status code included).
2876 * @retval VINF_SUCCESS if we don't have any actions that require going back to
2877 * ring-3.
2878 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
2879 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
2880 * interrupts)
2881 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
2882 * all EMTs to be in ring-3.
2883 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
2884 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
2885 * to the EM loop.
2886 *
2887 * @param pVM Pointer to the VM.
2888 * @param pVCpu Pointer to the VMCPU.
2889 * @param pCtx Pointer to the guest-CPU context.
2890 */
2891static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2892{
2893 Assert(VMMRZCallRing3IsEnabled(pVCpu));
2894
2895 /* On AMD-V we don't need to update CR3, PAE PDPES lazily. See hmR0SvmSaveGuestState(). */
2896 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
2897 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
2898
2899 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
2900 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2901 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
2902 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2903 {
2904 /* Pending PGM C3 sync. */
2905 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
2906 {
2907 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2908 if (rc != VINF_SUCCESS)
2909 {
2910 Log4(("hmR0SvmCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
2911 return rc;
2912 }
2913 }
2914
2915 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
2916 /* -XXX- what was that about single stepping? */
2917 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
2918 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2919 {
2920 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
2921 int rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
2922 Log4(("hmR0SvmCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
2923 return rc;
2924 }
2925
2926 /* Pending VM request packets, such as hardware interrupts. */
2927 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
2928 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2929 {
2930 Log4(("hmR0SvmCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
2931 return VINF_EM_PENDING_REQUEST;
2932 }
2933
2934 /* Pending PGM pool flushes. */
2935 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
2936 {
2937 Log4(("hmR0SvmCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
2938 return VINF_PGM_POOL_FLUSH_PENDING;
2939 }
2940
2941 /* Pending DMA requests. */
2942 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
2943 {
2944 Log4(("hmR0SvmCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
2945 return VINF_EM_RAW_TO_R3;
2946 }
2947 }
2948
2949 return VINF_SUCCESS;
2950}
2951
2952
2953/**
2954 * Does the preparations before executing guest code in AMD-V.
2955 *
2956 * This may cause longjmps to ring-3 and may even result in rescheduling to the
2957 * recompiler. We must be cautious what we do here regarding committing
2958 * guest-state information into the the VMCB assuming we assuredly execute the
2959 * guest in AMD-V. If we fall back to the recompiler after updating the VMCB and
2960 * clearing the common-state (TRPM/forceflags), we must undo those changes so
2961 * that the recompiler can (and should) use them when it resumes guest
2962 * execution. Otherwise such operations must be done when we can no longer
2963 * exit to ring-3.
2964 *
2965 * @returns VBox status code (informational status codes included).
2966 * @retval VINF_SUCCESS if we can proceed with running the guest.
2967 * @retval VINF_* scheduling changes, we have to go back to ring-3.
2968 *
2969 * @param pVM Pointer to the VM.
2970 * @param pVCpu Pointer to the VMCPU.
2971 * @param pCtx Pointer to the guest-CPU context.
2972 * @param pSvmTransient Pointer to the SVM transient structure.
2973 */
2974static int hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
2975{
2976 HMSVM_ASSERT_PREEMPT_SAFE();
2977
2978 /* Check force flag actions that might require us to go back to ring-3. */
2979 int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
2980 if (rc != VINF_SUCCESS)
2981 return rc;
2982
2983 if (TRPMHasTrap(pVCpu))
2984 hmR0SvmTrpmTrapToPendingEvent(pVCpu);
2985 else if (!pVCpu->hm.s.Event.fPending)
2986 hmR0SvmEvaluatePendingEvent(pVCpu, pCtx);
2987
2988#ifdef HMSVM_SYNC_FULL_GUEST_STATE
2989 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
2990#endif
2991
2992 /* Load the guest bits that are not shared with the host in any way since we can longjmp or get preempted. */
2993 rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx);
2994 AssertRCReturn(rc, rc);
2995 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
2996
2997 /*
2998 * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
2999 * so we can update it on the way back if the guest changed the TPR.
3000 */
3001 if (pVCpu->hm.s.svm.fSyncVTpr)
3002 {
3003 if (pVM->hm.s.fTPRPatchingActive)
3004 pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
3005 else
3006 {
3007 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
3008 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
3009 }
3010 }
3011
3012 /*
3013 * No longjmps to ring-3 from this point on!!!
3014 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
3015 * This also disables flushing of the R0-logger instance (if any).
3016 */
3017 VMMRZCallRing3Disable(pVCpu);
3018
3019 /*
3020 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
3021 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
3022 *
3023 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
3024 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
3025 *
3026 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
3027 * executing guest code.
3028 */
3029 pSvmTransient->fEFlags = ASMIntDisableFlags();
3030 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
3031 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3032 {
3033 ASMSetFlags(pSvmTransient->fEFlags);
3034 VMMRZCallRing3Enable(pVCpu);
3035 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
3036 return VINF_EM_RAW_TO_R3;
3037 }
3038 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
3039 {
3040 ASMSetFlags(pSvmTransient->fEFlags);
3041 VMMRZCallRing3Enable(pVCpu);
3042 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
3043 return VINF_EM_RAW_INTERRUPT;
3044 }
3045
3046 /*
3047 * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute
3048 * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
3049 * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.
3050 *
3051 * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the
3052 * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
3053 */
3054 if (pVCpu->hm.s.Event.fPending)
3055 {
3056 SVMEVENT Event;
3057 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3058 if ( Event.n.u1Valid
3059 && Event.n.u3Type == SVM_EVENT_NMI
3060 && Event.n.u8Vector == X86_XCPT_NMI
3061 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
3062 {
3063 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3064 }
3065 }
3066
3067 return VINF_SUCCESS;
3068}
3069
3070
3071/**
3072 * Prepares to run guest code in AMD-V and we've committed to doing so. This
3073 * means there is no backing out to ring-3 or anywhere else at this
3074 * point.
3075 *
3076 * @param pVM Pointer to the VM.
3077 * @param pVCpu Pointer to the VMCPU.
3078 * @param pCtx Pointer to the guest-CPU context.
3079 * @param pSvmTransient Pointer to the SVM transient structure.
3080 *
3081 * @remarks Called with preemption disabled.
3082 * @remarks No-long-jump zone!!!
3083 */
3084static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3085{
3086 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3087 Assert(VMMR0IsLogFlushDisabled(pVCpu));
3088 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3089
3090 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
3091 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
3092
3093 hmR0SvmInjectPendingEvent(pVCpu, pCtx);
3094
3095 if ( pVCpu->hm.s.fPreloadGuestFpu
3096 && !CPUMIsGuestFPUStateActive(pVCpu))
3097 {
3098 CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx);
3099 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
3100 }
3101
3102 /* Load the state shared between host and guest (FPU, debug). */
3103 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
3104 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
3105 hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx);
3106 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); /* Preemption might set this, nothing to do on AMD-V. */
3107 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
3108
3109 /* Setup TSC offsetting. */
3110 RTCPUID idCurrentCpu = HMR0GetCurrentCpu()->idCpu;
3111 if ( pSvmTransient->fUpdateTscOffsetting
3112 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
3113 {
3114 hmR0SvmUpdateTscOffsetting(pVM, pVCpu);
3115 pSvmTransient->fUpdateTscOffsetting = false;
3116 }
3117
3118 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
3119 if (idCurrentCpu != pVCpu->hm.s.idLastCpu)
3120 pVmcb->ctrl.u64VmcbCleanBits = 0;
3121
3122 /* Store status of the shared guest-host state at the time of VMRUN. */
3123#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3124 if (CPUMIsGuestInLongModeEx(pCtx))
3125 {
3126 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
3127 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
3128 }
3129 else
3130#endif
3131 {
3132 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
3133 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
3134 }
3135 pSvmTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
3136
3137 /* Flush the appropriate tagged-TLB entries. */
3138 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
3139 hmR0SvmFlushTaggedTlb(pVCpu);
3140 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
3141
3142 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
3143
3144 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
3145 to start executing. */
3146
3147 /*
3148 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
3149 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
3150 *
3151 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
3152 */
3153 if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
3154 && !(pVmcb->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
3155 {
3156 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
3157 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
3158 uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
3159 if (u64GuestTscAux != pVCpu->hm.s.u64HostTscAux)
3160 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
3161 pSvmTransient->fRestoreTscAuxMsr = true;
3162 }
3163 else
3164 {
3165 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
3166 pSvmTransient->fRestoreTscAuxMsr = false;
3167 }
3168
3169 /* If VMCB Clean bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
3170 if (!(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN))
3171 pVmcb->ctrl.u64VmcbCleanBits = 0;
3172}
3173
3174
3175/**
3176 * Wrapper for running the guest code in AMD-V.
3177 *
3178 * @returns VBox strict status code.
3179 * @param pVM Pointer to the VM.
3180 * @param pVCpu Pointer to the VMCPU.
3181 * @param pCtx Pointer to the guest-CPU context.
3182 *
3183 * @remarks No-long-jump zone!!!
3184 */
3185DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3186{
3187 /*
3188 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
3189 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
3190 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
3191 */
3192#ifdef VBOX_WITH_KERNEL_USING_XMM
3193 return HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
3194 pVCpu->hm.s.svm.pfnVMRun);
3195#else
3196 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
3197#endif
3198}
3199
3200
3201/**
3202 * Performs some essential restoration of state after running guest code in
3203 * AMD-V.
3204 *
3205 * @param pVM Pointer to the VM.
3206 * @param pVCpu Pointer to the VMCPU.
3207 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
3208 * out-of-sync. Make sure to update the required fields
3209 * before using them.
3210 * @param pSvmTransient Pointer to the SVM transient structure.
3211 * @param rcVMRun Return code of VMRUN.
3212 *
3213 * @remarks Called with interrupts disabled.
3214 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
3215 * unconditionally when it is safe to do so.
3216 */
3217static void hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
3218{
3219 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3220
3221 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
3222 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
3223
3224 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
3225 pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
3226
3227 if (pSvmTransient->fRestoreTscAuxMsr)
3228 {
3229 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
3230 CPUMR0SetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
3231 if (u64GuestTscAuxMsr != pVCpu->hm.s.u64HostTscAux)
3232 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
3233 }
3234
3235 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
3236 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset);
3237
3238 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
3239 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
3240 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
3241
3242 Assert(!(ASMGetFlags() & X86_EFL_IF));
3243 ASMSetFlags(pSvmTransient->fEFlags); /* Enable interrupts. */
3244 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
3245
3246 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
3247 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
3248 {
3249 Log4(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun));
3250 return;
3251 }
3252
3253 pSvmTransient->u64ExitCode = pVmcb->ctrl.u64ExitCode; /* Save the #VMEXIT reason. */
3254 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcb->ctrl.u64ExitCode); /* Update the #VMEXIT history array. */
3255 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
3256 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
3257
3258 hmR0SvmSaveGuestState(pVCpu, pMixedCtx); /* Save the guest state from the VMCB to the guest-CPU context. */
3259
3260 if (RT_LIKELY(pSvmTransient->u64ExitCode != (uint64_t)SVM_EXIT_INVALID))
3261 {
3262 if (pVCpu->hm.s.svm.fSyncVTpr)
3263 {
3264 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
3265 if ( pVM->hm.s.fTPRPatchingActive
3266 && (pMixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
3267 {
3268 int rc = PDMApicSetTPR(pVCpu, pMixedCtx->msrLSTAR & 0xff);
3269 AssertRC(rc);
3270 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
3271 }
3272 else if (pSvmTransient->u8GuestTpr != pVmcb->ctrl.IntCtrl.n.u8VTPR)
3273 {
3274 int rc = PDMApicSetTPR(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4);
3275 AssertRC(rc);
3276 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
3277 }
3278 }
3279 }
3280}
3281
3282
3283/**
3284 * Runs the guest code using AMD-V.
3285 *
3286 * @returns VBox status code.
3287 * @param pVM Pointer to the VM.
3288 * @param pVCpu Pointer to the VMCPU.
3289 */
3290static int hmR0SvmRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3291{
3292 SVMTRANSIENT SvmTransient;
3293 SvmTransient.fUpdateTscOffsetting = true;
3294 uint32_t cLoops = 0;
3295 int rc = VERR_INTERNAL_ERROR_5;
3296
3297 for (;; cLoops++)
3298 {
3299 Assert(!HMR0SuspendPending());
3300 HMSVM_ASSERT_CPU_SAFE();
3301
3302 /* Preparatory work for running guest code, this may force us to return
3303 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
3304 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
3305 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
3306 if (rc != VINF_SUCCESS)
3307 break;
3308
3309 /*
3310 * No longjmps to ring-3 from this point on!!!
3311 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
3312 * This also disables flushing of the R0-logger instance (if any).
3313 */
3314 hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
3315 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
3316
3317 /* Restore any residual host-state and save any bits shared between host
3318 and guest into the guest-CPU state. Re-enables interrupts! */
3319 hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
3320
3321 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
3322 || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
3323 {
3324 if (rc == VINF_SUCCESS)
3325 rc = VERR_SVM_INVALID_GUEST_STATE;
3326 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
3327 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
3328 break;
3329 }
3330
3331 /* Handle the #VMEXIT. */
3332 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
3333 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
3334 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb);
3335 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
3336 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
3337 if (rc != VINF_SUCCESS)
3338 break;
3339 if (cLoops > pVM->hm.s.cMaxResumeLoops)
3340 {
3341 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
3342 rc = VINF_EM_RAW_INTERRUPT;
3343 break;
3344 }
3345 }
3346
3347 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
3348 return rc;
3349}
3350
3351
3352/**
3353 * Runs the guest code using AMD-V in single step mode.
3354 *
3355 * @returns VBox status code.
3356 * @param pVM Pointer to the VM.
3357 * @param pVCpu Pointer to the VMCPU.
3358 * @param pCtx Pointer to the guest-CPU context.
3359 */
3360static int hmR0SvmRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3361{
3362 SVMTRANSIENT SvmTransient;
3363 SvmTransient.fUpdateTscOffsetting = true;
3364 uint32_t cLoops = 0;
3365 int rc = VERR_INTERNAL_ERROR_5;
3366 uint16_t uCsStart = pCtx->cs.Sel;
3367 uint64_t uRipStart = pCtx->rip;
3368
3369 for (;; cLoops++)
3370 {
3371 Assert(!HMR0SuspendPending());
3372 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
3373 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
3374 (unsigned)RTMpCpuId(), cLoops));
3375
3376 /* Preparatory work for running guest code, this may force us to return
3377 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
3378 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
3379 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
3380 if (rc != VINF_SUCCESS)
3381 break;
3382
3383 /*
3384 * No longjmps to ring-3 from this point on!!!
3385 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
3386 * This also disables flushing of the R0-logger instance (if any).
3387 */
3388 VMMRZCallRing3Disable(pVCpu);
3389 VMMRZCallRing3RemoveNotification(pVCpu);
3390 hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
3391
3392 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
3393
3394 /*
3395 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
3396 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
3397 */
3398 hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
3399 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
3400 || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
3401 {
3402 if (rc == VINF_SUCCESS)
3403 rc = VERR_SVM_INVALID_GUEST_STATE;
3404 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
3405 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
3406 return rc;
3407 }
3408
3409 /* Handle the #VMEXIT. */
3410 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
3411 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
3412 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb);
3413 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
3414 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
3415 if (rc != VINF_SUCCESS)
3416 break;
3417 if (cLoops > pVM->hm.s.cMaxResumeLoops)
3418 {
3419 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
3420 rc = VINF_EM_RAW_INTERRUPT;
3421 break;
3422 }
3423
3424 /*
3425 * Did the RIP change, if so, consider it a single step.
3426 * Otherwise, make sure one of the TFs gets set.
3427 */
3428 if ( pCtx->rip != uRipStart
3429 || pCtx->cs.Sel != uCsStart)
3430 {
3431 rc = VINF_EM_DBG_STEPPED;
3432 break;
3433 }
3434 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
3435 }
3436
3437 /*
3438 * Clear the X86_EFL_TF if necessary.
3439 */
3440 if (pVCpu->hm.s.fClearTrapFlag)
3441 {
3442 pVCpu->hm.s.fClearTrapFlag = false;
3443 pCtx->eflags.Bits.u1TF = 0;
3444 }
3445
3446 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
3447 return rc;
3448}
3449
3450
3451/**
3452 * Runs the guest code using AMD-V.
3453 *
3454 * @returns VBox status code.
3455 * @param pVM Pointer to the VM.
3456 * @param pVCpu Pointer to the VMCPU.
3457 * @param pCtx Pointer to the guest-CPU context.
3458 */
3459VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3460{
3461 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3462 HMSVM_ASSERT_PREEMPT_SAFE();
3463 VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pCtx);
3464
3465 int rc;
3466 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
3467 rc = hmR0SvmRunGuestCodeNormal(pVM, pVCpu, pCtx);
3468 else
3469 rc = hmR0SvmRunGuestCodeStep(pVM, pVCpu, pCtx);
3470
3471 if (rc == VERR_EM_INTERPRETER)
3472 rc = VINF_EM_RAW_EMULATE_INSTR;
3473 else if (rc == VINF_EM_RESET)
3474 rc = VINF_EM_TRIPLE_FAULT;
3475
3476 /* Prepare to return to ring-3. This will remove longjmp notifications. */
3477 hmR0SvmExitToRing3(pVM, pVCpu, pCtx, rc);
3478 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
3479 return rc;
3480}
3481
3482
3483/**
3484 * Handles a #VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
3485 *
3486 * @returns VBox status code (informational status codes included).
3487 * @param pVCpu Pointer to the VMCPU.
3488 * @param pCtx Pointer to the guest-CPU context.
3489 * @param pSvmTransient Pointer to the SVM transient structure.
3490 */
3491DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3492{
3493 Assert(pSvmTransient->u64ExitCode != (uint64_t)SVM_EXIT_INVALID);
3494 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
3495
3496 /*
3497 * The ordering of the case labels is based on most-frequently-occurring #VMEXITs for most guests under
3498 * normal workloads (for some definition of "normal").
3499 */
3500 uint32_t u32ExitCode = pSvmTransient->u64ExitCode;
3501 switch (pSvmTransient->u64ExitCode)
3502 {
3503 case SVM_EXIT_NPF:
3504 return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient);
3505
3506 case SVM_EXIT_IOIO:
3507 return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
3508
3509 case SVM_EXIT_RDTSC:
3510 return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
3511
3512 case SVM_EXIT_RDTSCP:
3513 return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
3514
3515 case SVM_EXIT_CPUID:
3516 return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient);
3517
3518 case SVM_EXIT_EXCEPTION_E: /* X86_XCPT_PF */
3519 return hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient);
3520
3521 case SVM_EXIT_EXCEPTION_7: /* X86_XCPT_NM */
3522 return hmR0SvmExitXcptNM(pVCpu, pCtx, pSvmTransient);
3523
3524 case SVM_EXIT_EXCEPTION_6: /* X86_XCPT_UD */
3525 return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
3526
3527 case SVM_EXIT_EXCEPTION_10: /* X86_XCPT_MF */
3528 return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient);
3529
3530 case SVM_EXIT_EXCEPTION_1: /* X86_XCPT_DB */
3531 return hmR0SvmExitXcptDB(pVCpu, pCtx, pSvmTransient);
3532
3533 case SVM_EXIT_MONITOR:
3534 return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient);
3535
3536 case SVM_EXIT_MWAIT:
3537 return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient);
3538
3539 case SVM_EXIT_HLT:
3540 return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient);
3541
3542 case SVM_EXIT_READ_CR0:
3543 case SVM_EXIT_READ_CR3:
3544 case SVM_EXIT_READ_CR4:
3545 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
3546
3547 case SVM_EXIT_WRITE_CR0:
3548 case SVM_EXIT_WRITE_CR3:
3549 case SVM_EXIT_WRITE_CR4:
3550 case SVM_EXIT_WRITE_CR8:
3551 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
3552
3553 case SVM_EXIT_VMMCALL:
3554 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
3555
3556 case SVM_EXIT_VINTR:
3557 return hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient);
3558
3559 case SVM_EXIT_INTR:
3560 case SVM_EXIT_FERR_FREEZE:
3561 case SVM_EXIT_NMI:
3562 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
3563
3564 case SVM_EXIT_MSR:
3565 return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient);
3566
3567 case SVM_EXIT_INVLPG:
3568 return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient);
3569
3570 case SVM_EXIT_WBINVD:
3571 return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient);
3572
3573 case SVM_EXIT_INVD:
3574 return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient);
3575
3576 case SVM_EXIT_RDPMC:
3577 return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient);
3578
3579 default:
3580 {
3581 switch (pSvmTransient->u64ExitCode)
3582 {
3583 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
3584 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
3585 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
3586 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
3587 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
3588
3589 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
3590 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
3591 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
3592 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
3593 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
3594
3595 case SVM_EXIT_XSETBV:
3596 return hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient);
3597
3598 case SVM_EXIT_TASK_SWITCH:
3599 return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
3600
3601 case SVM_EXIT_IRET:
3602 return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient);
3603
3604 case SVM_EXIT_SHUTDOWN:
3605 return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient);
3606
3607 case SVM_EXIT_SMI:
3608 case SVM_EXIT_INIT:
3609 {
3610 /*
3611 * We don't intercept NMIs. As for INIT signals, it really shouldn't ever happen here. If it ever does,
3612 * we want to know about it so log the exit code and bail.
3613 */
3614 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
3615 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
3616 return VERR_SVM_UNEXPECTED_EXIT;
3617 }
3618
3619 case SVM_EXIT_INVLPGA:
3620 case SVM_EXIT_RSM:
3621 case SVM_EXIT_VMRUN:
3622 case SVM_EXIT_VMLOAD:
3623 case SVM_EXIT_VMSAVE:
3624 case SVM_EXIT_STGI:
3625 case SVM_EXIT_CLGI:
3626 case SVM_EXIT_SKINIT:
3627 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
3628
3629#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
3630 case SVM_EXIT_EXCEPTION_0: /* X86_XCPT_DE */
3631 /* SVM_EXIT_EXCEPTION_1: */ /* X86_XCPT_DB - Handled above. */
3632 case SVM_EXIT_EXCEPTION_2: /* X86_XCPT_NMI */
3633 case SVM_EXIT_EXCEPTION_3: /* X86_XCPT_BP */
3634 case SVM_EXIT_EXCEPTION_4: /* X86_XCPT_OF */
3635 case SVM_EXIT_EXCEPTION_5: /* X86_XCPT_BR */
3636 /* case SVM_EXIT_EXCEPTION_6: */ /* X86_XCPT_UD - Handled above. */
3637 /* SVM_EXIT_EXCEPTION_7: */ /* X86_XCPT_NM - Handled above. */
3638 case SVM_EXIT_EXCEPTION_8: /* X86_XCPT_DF */
3639 case SVM_EXIT_EXCEPTION_9: /* X86_XCPT_CO_SEG_OVERRUN */
3640 case SVM_EXIT_EXCEPTION_A: /* X86_XCPT_TS */
3641 case SVM_EXIT_EXCEPTION_B: /* X86_XCPT_NP */
3642 case SVM_EXIT_EXCEPTION_C: /* X86_XCPT_SS */
3643 case SVM_EXIT_EXCEPTION_D: /* X86_XCPT_GP */
3644 /* SVM_EXIT_EXCEPTION_E: */ /* X86_XCPT_PF - Handled above. */
3645 /* SVM_EXIT_EXCEPTION_10: */ /* X86_XCPT_MF - Handled above. */
3646 case SVM_EXIT_EXCEPTION_11: /* X86_XCPT_AC */
3647 case SVM_EXIT_EXCEPTION_12: /* X86_XCPT_MC */
3648 case SVM_EXIT_EXCEPTION_13: /* X86_XCPT_XF */
3649 case SVM_EXIT_EXCEPTION_F: /* Reserved */
3650 case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16:
3651 case SVM_EXIT_EXCEPTION_17: case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19:
3652 case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B: case SVM_EXIT_EXCEPTION_1C:
3653 case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
3654 {
3655 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
3656 SVMEVENT Event;
3657 Event.u = 0;
3658 Event.n.u1Valid = 1;
3659 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3660 Event.n.u8Vector = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0;
3661
3662 switch (Event.n.u8Vector)
3663 {
3664 case X86_XCPT_DE:
3665 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
3666 break;
3667
3668 case X86_XCPT_BP:
3669 /** Saves the wrong EIP on the stack (pointing to the int3) instead of the
3670 * next instruction. */
3671 /** @todo Investigate this later. */
3672 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
3673 break;
3674
3675 case X86_XCPT_NP:
3676 Event.n.u1ErrorCodeValid = 1;
3677 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1;
3678 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
3679 break;
3680
3681 case X86_XCPT_SS:
3682 Event.n.u1ErrorCodeValid = 1;
3683 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1;
3684 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
3685 break;
3686
3687 case X86_XCPT_GP:
3688 Event.n.u1ErrorCodeValid = 1;
3689 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1;
3690 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
3691 break;
3692
3693 default:
3694 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit caused by exception %#x\n", Event.n.u8Vector));
3695 pVCpu->hm.s.u32HMError = Event.n.u8Vector;
3696 return VERR_SVM_UNEXPECTED_XCPT_EXIT;
3697 }
3698
3699 Log4(("#Xcpt: Vector=%#x at CS:RIP=%04x:%RGv\n", Event.n.u8Vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
3700 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3701 return VINF_SUCCESS;
3702 }
3703#endif /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */
3704
3705 default:
3706 {
3707 AssertMsgFailed(("hmR0SvmHandleExit: Unknown exit code %#x\n", u32ExitCode));
3708 pVCpu->hm.s.u32HMError = u32ExitCode;
3709 return VERR_SVM_UNKNOWN_EXIT;
3710 }
3711 }
3712 }
3713 }
3714 return VERR_INTERNAL_ERROR_5; /* Should never happen. */
3715}
3716
3717
3718#ifdef DEBUG
3719/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
3720# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
3721 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
3722
3723# define HMSVM_ASSERT_PREEMPT_CPUID() \
3724 do \
3725 { \
3726 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
3727 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
3728 } while (0)
3729
3730# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \
3731 do { \
3732 AssertPtr(pVCpu); \
3733 AssertPtr(pCtx); \
3734 AssertPtr(pSvmTransient); \
3735 Assert(ASMIntAreEnabled()); \
3736 HMSVM_ASSERT_PREEMPT_SAFE(); \
3737 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
3738 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
3739 HMSVM_ASSERT_PREEMPT_SAFE(); \
3740 if (VMMR0IsLogFlushDisabled(pVCpu)) \
3741 HMSVM_ASSERT_PREEMPT_CPUID(); \
3742 } while (0)
3743#else /* Release builds */
3744# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { NOREF(pVCpu); NOREF(pCtx); NOREF(pSvmTransient); } while (0)
3745#endif
3746
3747
3748/**
3749 * Worker for hmR0SvmInterpretInvlpg().
3750 *
3751 * @return VBox status code.
3752 * @param pVCpu Pointer to the VMCPU.
3753 * @param pCpu Pointer to the disassembler state.
3754 * @param pCtx The guest CPU context.
3755 */
3756static int hmR0SvmInterpretInvlPgEx(PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTX pCtx)
3757{
3758 DISQPVPARAMVAL Param1;
3759 RTGCPTR GCPtrPage;
3760
3761 int rc = DISQueryParamVal(CPUMCTX2CORE(pCtx), pCpu, &pCpu->Param1, &Param1, DISQPVWHICH_SRC);
3762 if (RT_FAILURE(rc))
3763 return VERR_EM_INTERPRETER;
3764
3765 if ( Param1.type == DISQPV_TYPE_IMMEDIATE
3766 || Param1.type == DISQPV_TYPE_ADDRESS)
3767 {
3768 if (!(Param1.flags & (DISQPV_FLAG_32 | DISQPV_FLAG_64)))
3769 return VERR_EM_INTERPRETER;
3770
3771 GCPtrPage = Param1.val.val64;
3772 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), GCPtrPage);
3773 rc = VBOXSTRICTRC_VAL(rc2);
3774 }
3775 else
3776 {
3777 Log4(("hmR0SvmInterpretInvlPgEx invalid parameter type %#x\n", Param1.type));
3778 rc = VERR_EM_INTERPRETER;
3779 }
3780
3781 return rc;
3782}
3783
3784
3785/**
3786 * Interprets INVLPG.
3787 *
3788 * @returns VBox status code.
3789 * @retval VINF_* Scheduling instructions.
3790 * @retval VERR_EM_INTERPRETER Something we can't cope with.
3791 * @retval VERR_* Fatal errors.
3792 *
3793 * @param pVM Pointer to the VM.
3794 * @param pCtx The guest CPU context.
3795 *
3796 * @remarks Updates the RIP if the instruction was executed successfully.
3797 */
3798static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3799{
3800 /* Only allow 32 & 64 bit code. */
3801 if (CPUMGetGuestCodeBits(pVCpu) != 16)
3802 {
3803 PDISSTATE pDis = &pVCpu->hm.s.DisState;
3804 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
3805 if ( RT_SUCCESS(rc)
3806 && pDis->pCurInstr->uOpcode == OP_INVLPG)
3807 {
3808 rc = hmR0SvmInterpretInvlPgEx(pVCpu, pDis, pCtx);
3809 if (RT_SUCCESS(rc))
3810 pCtx->rip += pDis->cbInstr;
3811 return rc;
3812 }
3813 else
3814 Log4(("hmR0SvmInterpretInvlpg: EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
3815 }
3816 return VERR_EM_INTERPRETER;
3817}
3818
3819
3820/**
3821 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
3822 *
3823 * @param pVCpu Pointer to the VMCPU.
3824 */
3825DECLINLINE(void) hmR0SvmSetPendingXcptUD(PVMCPU pVCpu)
3826{
3827 SVMEVENT Event;
3828 Event.u = 0;
3829 Event.n.u1Valid = 1;
3830 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3831 Event.n.u8Vector = X86_XCPT_UD;
3832 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3833}
3834
3835
3836/**
3837 * Sets a debug (#DB) exception as pending-for-injection into the VM.
3838 *
3839 * @param pVCpu Pointer to the VMCPU.
3840 */
3841DECLINLINE(void) hmR0SvmSetPendingXcptDB(PVMCPU pVCpu)
3842{
3843 SVMEVENT Event;
3844 Event.u = 0;
3845 Event.n.u1Valid = 1;
3846 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3847 Event.n.u8Vector = X86_XCPT_DB;
3848 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3849}
3850
3851
3852/**
3853 * Sets a page fault (#PF) exception as pending-for-injection into the VM.
3854 *
3855 * @param pVCpu Pointer to the VMCPU.
3856 * @param pCtx Pointer to the guest-CPU context.
3857 * @param u32ErrCode The error-code for the page-fault.
3858 * @param uFaultAddress The page fault address (CR2).
3859 *
3860 * @remarks This updates the guest CR2 with @a uFaultAddress!
3861 */
3862DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)
3863{
3864 SVMEVENT Event;
3865 Event.u = 0;
3866 Event.n.u1Valid = 1;
3867 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3868 Event.n.u8Vector = X86_XCPT_PF;
3869 Event.n.u1ErrorCodeValid = 1;
3870 Event.n.u32ErrorCode = u32ErrCode;
3871
3872 /* Update CR2 of the guest. */
3873 if (pCtx->cr2 != uFaultAddress)
3874 {
3875 pCtx->cr2 = uFaultAddress;
3876 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR2);
3877 }
3878
3879 hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress);
3880}
3881
3882
3883/**
3884 * Sets a device-not-available (#NM) exception as pending-for-injection into the
3885 * VM.
3886 *
3887 * @param pVCpu Pointer to the VMCPU.
3888 */
3889DECLINLINE(void) hmR0SvmSetPendingXcptNM(PVMCPU pVCpu)
3890{
3891 SVMEVENT Event;
3892 Event.u = 0;
3893 Event.n.u1Valid = 1;
3894 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3895 Event.n.u8Vector = X86_XCPT_NM;
3896 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3897}
3898
3899
3900/**
3901 * Sets a math-fault (#MF) exception as pending-for-injection into the VM.
3902 *
3903 * @param pVCpu Pointer to the VMCPU.
3904 */
3905DECLINLINE(void) hmR0SvmSetPendingXcptMF(PVMCPU pVCpu)
3906{
3907 SVMEVENT Event;
3908 Event.u = 0;
3909 Event.n.u1Valid = 1;
3910 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3911 Event.n.u8Vector = X86_XCPT_MF;
3912 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3913}
3914
3915
3916/**
3917 * Sets a double fault (#DF) exception as pending-for-injection into the VM.
3918 *
3919 * @param pVCpu Pointer to the VMCPU.
3920 */
3921DECLINLINE(void) hmR0SvmSetPendingXcptDF(PVMCPU pVCpu)
3922{
3923 SVMEVENT Event;
3924 Event.u = 0;
3925 Event.n.u1Valid = 1;
3926 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3927 Event.n.u8Vector = X86_XCPT_DF;
3928 Event.n.u1ErrorCodeValid = 1;
3929 Event.n.u32ErrorCode = 0;
3930 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3931}
3932
3933
3934/**
3935 * Emulates a simple MOV TPR (CR8) instruction, used for TPR patching on 32-bit
3936 * guests. This simply looks up the patch record at EIP and does the required.
3937 *
3938 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
3939 * like how we want it to be (e.g. not followed by shr 4 as is usually done for
3940 * TPR). See hmR3ReplaceTprInstr() for the details.
3941 *
3942 * @returns VBox status code.
3943 * @retval VINF_SUCCESS if the access was handled successfully.
3944 * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
3945 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
3946 *
3947 * @param pVM Pointer to the VM.
3948 * @param pVCpu Pointer to the VMCPU.
3949 * @param pCtx Pointer to the guest-CPU context.
3950 */
3951static int hmR0SvmEmulateMovTpr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3952{
3953 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
3954
3955 /*
3956 * We do this in a loop as we increment the RIP after a successful emulation
3957 * and the new RIP may be a patched instruction which needs emulation as well.
3958 */
3959 bool fPatchFound = false;
3960 for (;;)
3961 {
3962 bool fPending;
3963 uint8_t u8Tpr;
3964
3965 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
3966 if (!pPatch)
3967 break;
3968
3969 fPatchFound = true;
3970 switch (pPatch->enmType)
3971 {
3972 case HMTPRINSTR_READ:
3973 {
3974 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
3975 AssertRC(rc);
3976
3977 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
3978 AssertRC(rc);
3979 pCtx->rip += pPatch->cbOp;
3980 break;
3981 }
3982
3983 case HMTPRINSTR_WRITE_REG:
3984 case HMTPRINSTR_WRITE_IMM:
3985 {
3986 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
3987 {
3988 uint32_t u32Val;
3989 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
3990 AssertRC(rc);
3991 u8Tpr = u32Val;
3992 }
3993 else
3994 u8Tpr = (uint8_t)pPatch->uSrcOperand;
3995
3996 int rc2 = PDMApicSetTPR(pVCpu, u8Tpr);
3997 AssertRC(rc2);
3998 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
3999
4000 pCtx->rip += pPatch->cbOp;
4001 break;
4002 }
4003
4004 default:
4005 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
4006 pVCpu->hm.s.u32HMError = pPatch->enmType;
4007 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
4008 }
4009 }
4010
4011 if (fPatchFound)
4012 return VINF_SUCCESS;
4013 return VERR_NOT_FOUND;
4014}
4015
4016
4017/**
4018 * Determines if an exception is a contributory exception.
4019 *
4020 * Contributory exceptions are ones which can cause double-faults unless the
4021 * original exception was a benign exception. Page-fault is intentionally not
4022 * included here as it's a conditional contributory exception.
4023 *
4024 * @returns true if the exception is contributory, false otherwise.
4025 * @param uVector The exception vector.
4026 */
4027DECLINLINE(bool) hmR0SvmIsContributoryXcpt(const uint32_t uVector)
4028{
4029 switch (uVector)
4030 {
4031 case X86_XCPT_GP:
4032 case X86_XCPT_SS:
4033 case X86_XCPT_NP:
4034 case X86_XCPT_TS:
4035 case X86_XCPT_DE:
4036 return true;
4037 default:
4038 break;
4039 }
4040 return false;
4041}
4042
4043
4044/**
4045 * Handle a condition that occurred while delivering an event through the guest
4046 * IDT.
4047 *
4048 * @returns VBox status code (informational error codes included).
4049 * @retval VINF_SUCCESS if we should continue handling the #VMEXIT.
4050 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
4051 * continue execution of the guest which will delivery the #DF.
4052 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4053 *
4054 * @param pVCpu Pointer to the VMCPU.
4055 * @param pCtx Pointer to the guest-CPU context.
4056 * @param pSvmTransient Pointer to the SVM transient structure.
4057 *
4058 * @remarks No-long-jump zone!!!
4059 */
4060static int hmR0SvmCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4061{
4062 int rc = VINF_SUCCESS;
4063 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4064
4065 /* See AMD spec. 15.7.3 "EXITINFO Pseudo-Code". The EXITINTINFO (if valid) contains the prior exception (IDT vector)
4066 * that was trying to be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector). */
4067 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid)
4068 {
4069 uint8_t uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
4070
4071 typedef enum
4072 {
4073 SVMREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4074 SVMREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4075 SVMREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
4076 SVMREFLECTXCPT_NONE /* Nothing to reflect. */
4077 } SVMREFLECTXCPT;
4078
4079 SVMREFLECTXCPT enmReflect = SVMREFLECTXCPT_NONE;
4080 bool fReflectingNmi = false;
4081 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION)
4082 {
4083 if (pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_1F)
4084 {
4085 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0);
4086
4087#ifdef VBOX_STRICT
4088 if ( hmR0SvmIsContributoryXcpt(uIdtVector)
4089 && uExitVector == X86_XCPT_PF)
4090 {
4091 Log4(("IDT: Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pCtx->cr2));
4092 }
4093#endif
4094 if ( uExitVector == X86_XCPT_PF
4095 && uIdtVector == X86_XCPT_PF)
4096 {
4097 pSvmTransient->fVectoringDoublePF = true;
4098 Log4(("IDT: Vectoring double #PF uCR2=%#RX64\n", pCtx->cr2));
4099 }
4100 else if ( (pVmcb->ctrl.u32InterceptException & HMSVM_CONTRIBUTORY_XCPT_MASK)
4101 && hmR0SvmIsContributoryXcpt(uExitVector)
4102 && ( hmR0SvmIsContributoryXcpt(uIdtVector)
4103 || uIdtVector == X86_XCPT_PF))
4104 {
4105 enmReflect = SVMREFLECTXCPT_DF;
4106 Log4(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
4107 uIdtVector, uExitVector));
4108 }
4109 else if (uIdtVector == X86_XCPT_DF)
4110 {
4111 enmReflect = SVMREFLECTXCPT_TF;
4112 Log4(("IDT: Pending vectoring triple-fault %#RX64 uIdtVector=%#x uExitVector=%#x\n",
4113 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
4114 }
4115 else
4116 enmReflect = SVMREFLECTXCPT_XCPT;
4117 }
4118 else
4119 {
4120 /*
4121 * If event delivery caused an #VMEXIT that is not an exception (e.g. #NPF) then reflect the original
4122 * exception to the guest after handling the #VMEXIT.
4123 */
4124 enmReflect = SVMREFLECTXCPT_XCPT;
4125 }
4126 }
4127 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXTERNAL_IRQ
4128 || pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)
4129 {
4130 enmReflect = SVMREFLECTXCPT_XCPT;
4131 fReflectingNmi = RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI);
4132
4133 if (pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_1F)
4134 {
4135 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0);
4136 if (uExitVector == X86_XCPT_PF)
4137 {
4138 pSvmTransient->fVectoringPF = true;
4139 Log4(("IDT: Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pCtx->cr2));
4140 }
4141 }
4142 }
4143 /* else: Ignore software interrupts (INT n) as they reoccur when restarting the instruction. */
4144
4145 switch (enmReflect)
4146 {
4147 case SVMREFLECTXCPT_XCPT:
4148 {
4149 /* If we are re-injecting the NMI, clear NMI blocking. */
4150 if (fReflectingNmi)
4151 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
4152
4153 Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);
4154 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, 0 /* GCPtrFaultAddress */);
4155
4156 /* If uExitVector is #PF, CR2 value will be updated from the VMCB if it's a guest #PF. See hmR0SvmExitXcptPF(). */
4157 Log4(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32\n", pVmcb->ctrl.ExitIntInfo.u,
4158 !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid, pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
4159 break;
4160 }
4161
4162 case SVMREFLECTXCPT_DF:
4163 {
4164 hmR0SvmSetPendingXcptDF(pVCpu);
4165 rc = VINF_HM_DOUBLE_FAULT;
4166 break;
4167 }
4168
4169 case SVMREFLECTXCPT_TF:
4170 {
4171 rc = VINF_EM_RESET;
4172 break;
4173 }
4174
4175 default:
4176 Assert(rc == VINF_SUCCESS);
4177 break;
4178 }
4179 }
4180 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
4181 NOREF(pCtx);
4182 return rc;
4183}
4184
4185
4186/**
4187 * Advances the guest RIP in the if the NRIP_SAVE feature is supported by the
4188 * CPU, otherwise advances the RIP by @a cb bytes.
4189 *
4190 * @param pVCpu Pointer to the VMCPU.
4191 * @param pCtx Pointer to the guest-CPU context.
4192 * @param cb RIP increment value in bytes.
4193 *
4194 * @remarks Use this function only from #VMEXIT's where the NRIP value is valid
4195 * when NRIP_SAVE is supported by the CPU!
4196 */
4197DECLINLINE(void) hmR0SvmUpdateRip(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t cb)
4198{
4199 if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
4200 {
4201 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4202 Assert(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb);
4203 pCtx->rip = pVmcb->ctrl.u64NextRIP;
4204 }
4205 else
4206 pCtx->rip += cb;
4207}
4208
4209
4210/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
4211/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
4212/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
4213
4214/** @name #VMEXIT handlers.
4215 * @{
4216 */
4217
4218/**
4219 * #VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT
4220 * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT).
4221 */
4222HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4223{
4224 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4225
4226 if (pSvmTransient->u64ExitCode == SVM_EXIT_NMI)
4227 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
4228 else if (pSvmTransient->u64ExitCode == SVM_EXIT_INTR)
4229 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
4230
4231 /*
4232 * AMD-V has no preemption timer and the generic periodic preemption timer has no way to signal -before- the timer
4233 * fires if the current interrupt is our own timer or a some other host interrupt. We also cannot examine what
4234 * interrupt it is until the host actually take the interrupt.
4235 *
4236 * Going back to executing guest code here unconditionally causes random scheduling problems (observed on an
4237 * AMD Phenom 9850 Quad-Core on Windows 64-bit host).
4238 */
4239 return VINF_EM_RAW_INTERRUPT;
4240}
4241
4242
4243/**
4244 * #VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional #VMEXIT.
4245 */
4246HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4247{
4248 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4249
4250 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4251 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
4252 int rc = VINF_SUCCESS;
4253 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4254 return rc;
4255}
4256
4257
4258/**
4259 * #VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional #VMEXIT.
4260 */
4261HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4262{
4263 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4264
4265 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4266 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
4267 int rc = VINF_SUCCESS;
4268 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4269 return rc;
4270}
4271
4272
4273/**
4274 * #VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional #VMEXIT.
4275 */
4276HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4277{
4278 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4279 PVM pVM = pVCpu->CTX_SUFF(pVM);
4280 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4281 if (RT_LIKELY(rc == VINF_SUCCESS))
4282 {
4283 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4284 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4285 }
4286 else
4287 {
4288 AssertMsgFailed(("hmR0SvmExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
4289 rc = VERR_EM_INTERPRETER;
4290 }
4291 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
4292 return rc;
4293}
4294
4295
4296/**
4297 * #VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional #VMEXIT.
4298 */
4299HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4300{
4301 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4302 PVM pVM = pVCpu->CTX_SUFF(pVM);
4303 int rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4304 if (RT_LIKELY(rc == VINF_SUCCESS))
4305 {
4306 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4307 pSvmTransient->fUpdateTscOffsetting = true;
4308
4309 /* Single step check. */
4310 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4311 }
4312 else
4313 {
4314 AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
4315 rc = VERR_EM_INTERPRETER;
4316 }
4317 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
4318 return rc;
4319}
4320
4321
4322/**
4323 * #VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional #VMEXIT.
4324 */
4325HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4326{
4327 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4328 int rc = EMInterpretRdtscp(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
4329 if (RT_LIKELY(rc == VINF_SUCCESS))
4330 {
4331 hmR0SvmUpdateRip(pVCpu, pCtx, 3);
4332 pSvmTransient->fUpdateTscOffsetting = true;
4333 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4334 }
4335 else
4336 {
4337 AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtscp failed with %Rrc\n", rc));
4338 rc = VERR_EM_INTERPRETER;
4339 }
4340 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
4341 return rc;
4342}
4343
4344
4345/**
4346 * #VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional #VMEXIT.
4347 */
4348HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4349{
4350 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4351 int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
4352 if (RT_LIKELY(rc == VINF_SUCCESS))
4353 {
4354 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4355 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4356 }
4357 else
4358 {
4359 AssertMsgFailed(("hmR0SvmExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
4360 rc = VERR_EM_INTERPRETER;
4361 }
4362 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
4363 return rc;
4364}
4365
4366
4367/**
4368 * #VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional #VMEXIT.
4369 */
4370HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4371{
4372 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4373 PVM pVM = pVCpu->CTX_SUFF(pVM);
4374 Assert(!pVM->hm.s.fNestedPaging);
4375
4376 /** @todo Decode Assist. */
4377 int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, pCtx); /* Updates RIP if successful. */
4378 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
4379 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
4380 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4381 return rc;
4382}
4383
4384
4385/**
4386 * #VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional #VMEXIT.
4387 */
4388HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4389{
4390 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4391
4392 hmR0SvmUpdateRip(pVCpu, pCtx, 1);
4393 int rc = EMShouldContinueAfterHalt(pVCpu, pCtx) ? VINF_SUCCESS : VINF_EM_HALT;
4394 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4395 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
4396 if (rc != VINF_SUCCESS)
4397 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
4398 return rc;
4399}
4400
4401
4402/**
4403 * #VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional #VMEXIT.
4404 */
4405HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4406{
4407 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4408 int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
4409 if (RT_LIKELY(rc == VINF_SUCCESS))
4410 {
4411 hmR0SvmUpdateRip(pVCpu, pCtx, 3);
4412 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4413 }
4414 else
4415 {
4416 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
4417 rc = VERR_EM_INTERPRETER;
4418 }
4419 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
4420 return rc;
4421}
4422
4423
4424/**
4425 * #VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional #VMEXIT.
4426 */
4427HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4428{
4429 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4430 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
4431 int rc = VBOXSTRICTRC_VAL(rc2);
4432 if ( rc == VINF_EM_HALT
4433 || rc == VINF_SUCCESS)
4434 {
4435 hmR0SvmUpdateRip(pVCpu, pCtx, 3);
4436
4437 if ( rc == VINF_EM_HALT
4438 && EMMonitorWaitShouldContinue(pVCpu, pCtx))
4439 {
4440 rc = VINF_SUCCESS;
4441 }
4442 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4443 }
4444 else
4445 {
4446 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
4447 rc = VERR_EM_INTERPRETER;
4448 }
4449 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
4450 ("hmR0SvmExitMwait: EMInterpretMWait failed rc=%Rrc\n", rc));
4451 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
4452 return rc;
4453}
4454
4455
4456/**
4457 * #VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN).
4458 * Conditional #VMEXIT.
4459 */
4460HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4461{
4462 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4463 return VINF_EM_RESET;
4464}
4465
4466
4467/**
4468 * #VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional #VMEXIT.
4469 */
4470HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4471{
4472 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4473
4474 Log4(("hmR0SvmExitReadCRx: CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
4475
4476 /** @todo Decode Assist. */
4477 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
4478 int rc = VBOXSTRICTRC_VAL(rc2);
4479 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3,
4480 ("hmR0SvmExitReadCRx: EMInterpretInstruction failed rc=%Rrc\n", rc));
4481 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15);
4482 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0]);
4483 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4484 return rc;
4485}
4486
4487
4488/**
4489 * #VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional #VMEXIT.
4490 */
4491HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4492{
4493 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4494
4495 /** @todo Decode Assist. */
4496 VBOXSTRICTRC rcStrict = IEMExecOneBypassEx(pVCpu, CPUMCTX2CORE(pCtx), NULL);
4497 if (RT_UNLIKELY( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
4498 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED))
4499 rcStrict = VERR_EM_INTERPRETER;
4500 if (rcStrict == VINF_SUCCESS)
4501 {
4502 /* RIP has been updated by EMInterpretInstruction(). */
4503 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0) <= 15);
4504 switch (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0)
4505 {
4506 case 0: /* CR0. */
4507 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
4508 break;
4509
4510 case 3: /* CR3. */
4511 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
4512 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
4513 break;
4514
4515 case 4: /* CR4. */
4516 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
4517 break;
4518
4519 case 8: /* CR8 (TPR). */
4520 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
4521 break;
4522
4523 default:
4524 AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x CRx=%#RX64\n",
4525 pSvmTransient->u64ExitCode, pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0));
4526 break;
4527 }
4528 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
4529 }
4530 else
4531 Assert(rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_PGM_CHANGE_MODE || rcStrict == VINF_PGM_SYNC_CR3);
4532 return VBOXSTRICTRC_TODO(rcStrict);
4533}
4534
4535
4536/**
4537 * #VMEXIT handler for instructions that result in a #UD exception delivered to
4538 * the guest.
4539 */
4540HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4541{
4542 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4543 hmR0SvmSetPendingXcptUD(pVCpu);
4544 return VINF_SUCCESS;
4545}
4546
4547
4548/**
4549 * #VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional #VMEXIT.
4550 */
4551HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4552{
4553 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4554 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4555 PVM pVM = pVCpu->CTX_SUFF(pVM);
4556
4557 int rc;
4558 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
4559 {
4560 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
4561
4562 /* Handle TPR patching; intercepted LSTAR write. */
4563 if ( pVM->hm.s.fTPRPatchingActive
4564 && pCtx->ecx == MSR_K8_LSTAR)
4565 {
4566 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr)
4567 {
4568 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */
4569 int rc2 = PDMApicSetTPR(pVCpu, pCtx->eax & 0xff);
4570 AssertRC(rc2);
4571 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
4572 }
4573 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4574 rc = VINF_SUCCESS;
4575 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4576 return rc;
4577 }
4578
4579 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
4580 {
4581 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4582 if (RT_LIKELY(rc == VINF_SUCCESS))
4583 {
4584 pCtx->rip = pVmcb->ctrl.u64NextRIP;
4585 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4586 }
4587 else
4588 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc));
4589 }
4590 else
4591 {
4592 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */));
4593 if (RT_LIKELY(rc == VINF_SUCCESS))
4594 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); /* RIP updated by EMInterpretInstruction(). */
4595 else
4596 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: WrMsr. EMInterpretInstruction failed rc=%Rrc\n", rc));
4597 }
4598
4599 if (rc == VINF_SUCCESS)
4600 {
4601 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
4602 if ( pCtx->ecx >= MSR_IA32_X2APIC_START
4603 && pCtx->ecx <= MSR_IA32_X2APIC_END)
4604 {
4605 /*
4606 * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest(). When full APIC register
4607 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCB before
4608 * EMInterpretWrmsr() changes it.
4609 */
4610 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
4611 }
4612 else if (pCtx->ecx == MSR_K6_EFER)
4613 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
4614 else if (pCtx->ecx == MSR_IA32_TSC)
4615 pSvmTransient->fUpdateTscOffsetting = true;
4616 }
4617 }
4618 else
4619 {
4620 /* MSR Read access. */
4621 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
4622 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ);
4623
4624 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
4625 {
4626 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4627 if (RT_LIKELY(rc == VINF_SUCCESS))
4628 {
4629 pCtx->rip = pVmcb->ctrl.u64NextRIP;
4630 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4631 }
4632 else
4633 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretRdmsr failed rc=%Rrc\n", rc));
4634 }
4635 else
4636 {
4637 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0));
4638 if (RT_UNLIKELY(rc != VINF_SUCCESS))
4639 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: RdMsr. EMInterpretInstruction failed rc=%Rrc\n", rc));
4640 /* RIP updated by EMInterpretInstruction(). */
4641 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4642 }
4643 }
4644
4645 /* RIP has been updated by EMInterpret[Rd|Wr]msr(). */
4646 return rc;
4647}
4648
4649
4650/**
4651 * #VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional #VMEXIT.
4652 */
4653HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4654{
4655 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4656 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
4657
4658 /* We should -not- get this #VMEXIT if the guest's debug registers were active. */
4659 if (pSvmTransient->fWasGuestDebugStateActive)
4660 {
4661 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
4662 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
4663 return VERR_SVM_UNEXPECTED_EXIT;
4664 }
4665
4666 /*
4667 * Lazy DR0-3 loading.
4668 */
4669 if (!pSvmTransient->fWasHyperDebugStateActive)
4670 {
4671 Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
4672 Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
4673
4674 /* Don't intercept DRx read and writes. */
4675 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4676 pVmcb->ctrl.u16InterceptRdDRx = 0;
4677 pVmcb->ctrl.u16InterceptWrDRx = 0;
4678 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
4679
4680 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
4681 VMMRZCallRing3Disable(pVCpu);
4682 HM_DISABLE_PREEMPT();
4683
4684 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
4685 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
4686 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
4687
4688 HM_RESTORE_PREEMPT();
4689 VMMRZCallRing3Enable(pVCpu);
4690
4691 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
4692 return VINF_SUCCESS;
4693 }
4694
4695 /*
4696 * Interpret the read/writing of DRx.
4697 */
4698 /** @todo Decode assist. */
4699 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
4700 Log5(("hmR0SvmExitReadDRx: Emulated DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
4701 if (RT_LIKELY(rc == VINF_SUCCESS))
4702 {
4703 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
4704 /** @todo CPUM should set this flag! */
4705 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
4706 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4707 }
4708 else
4709 Assert(rc == VERR_EM_INTERPRETER);
4710 return VBOXSTRICTRC_TODO(rc);
4711}
4712
4713
4714/**
4715 * #VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional #VMEXIT.
4716 */
4717HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4718{
4719 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4720 /* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
4721 int rc = hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
4722 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
4723 STAM_COUNTER_DEC(&pVCpu->hm.s.StatExitDRxRead);
4724 return rc;
4725}
4726
4727
4728/**
4729 * #VMEXIT handler for XCRx write (SVM_EXIT_XSETBV). Conditional #VMEXIT.
4730 */
4731HMSVM_EXIT_DECL hmR0SvmExitXsetbv(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4732{
4733 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4734
4735 /** @todo decode assists... */
4736 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
4737 if (rcStrict == VINF_IEM_RAISED_XCPT)
4738 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
4739
4740 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
4741 Log4(("hmR0SvmExitXsetbv: New XCR0=%#RX64 fLoadSaveGuestXcr0=%d (cr4=%RX64) rcStrict=%Rrc\n",
4742 pCtx->aXcr[0], pVCpu->hm.s.fLoadSaveGuestXcr0, pCtx->cr4, VBOXSTRICTRC_VAL(rcStrict)));
4743
4744 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
4745 return VBOXSTRICTRC_TODO(rcStrict);
4746}
4747
4748
4749/**
4750 * #VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional #VMEXIT.
4751 */
4752HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4753{
4754 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4755
4756 /* I/O operation lookup arrays. */
4757 static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
4758 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
4759 the result (in AL/AX/EAX). */
4760 Log4(("hmR0SvmExitIOInstr: CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
4761
4762 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4763 PVM pVM = pVCpu->CTX_SUFF(pVM);
4764
4765 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
4766 SVMIOIOEXIT IoExitInfo;
4767 IoExitInfo.u = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
4768 uint32_t uIOWidth = (IoExitInfo.u >> 4) & 0x7;
4769 uint32_t cbValue = s_aIOSize[uIOWidth];
4770 uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
4771
4772 if (RT_UNLIKELY(!cbValue))
4773 {
4774 AssertMsgFailed(("hmR0SvmExitIOInstr: Invalid IO operation. uIOWidth=%u\n", uIOWidth));
4775 return VERR_EM_INTERPRETER;
4776 }
4777
4778 VBOXSTRICTRC rcStrict;
4779 if (IoExitInfo.n.u1STR)
4780 {
4781 /* INS/OUTS - I/O String instruction. */
4782 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
4783
4784 /** @todo Huh? why can't we use the segment prefix information given by AMD-V
4785 * in EXITINFO1? Investigate once this thing is up and running. */
4786
4787 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
4788 if (rcStrict == VINF_SUCCESS)
4789 {
4790 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
4791 {
4792 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
4793 (DISCPUMODE)pDis->uAddrMode, cbValue);
4794 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
4795 }
4796 else
4797 {
4798 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
4799 (DISCPUMODE)pDis->uAddrMode, cbValue);
4800 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
4801 }
4802 }
4803 else
4804 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
4805 }
4806 else
4807 {
4808 /* IN/OUT - I/O instruction. */
4809 Assert(!IoExitInfo.n.u1REP);
4810
4811 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
4812 {
4813 rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue);
4814 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
4815 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, cbValue);
4816
4817 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
4818 }
4819 else
4820 {
4821 uint32_t u32Val = 0;
4822
4823 rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue);
4824 if (IOM_SUCCESS(rcStrict))
4825 {
4826 /* Save result of I/O IN instr. in AL/AX/EAX. */
4827 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
4828 }
4829 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
4830 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, cbValue);
4831
4832 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
4833 }
4834 }
4835
4836 if (IOM_SUCCESS(rcStrict))
4837 {
4838 /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */
4839 pCtx->rip = pVmcb->ctrl.u64ExitInfo2;
4840
4841 /*
4842 * If any I/O breakpoints are armed, we need to check if one triggered
4843 * and take appropriate action.
4844 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
4845 */
4846 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
4847 * execution engines about whether hyper BPs and such are pending. */
4848 uint32_t const uDr7 = pCtx->dr[7];
4849 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
4850 && X86_DR7_ANY_RW_IO(uDr7)
4851 && (pCtx->cr4 & X86_CR4_DE))
4852 || DBGFBpIsHwIoArmed(pVM)))
4853 {
4854 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
4855 VMMRZCallRing3Disable(pVCpu);
4856 HM_DISABLE_PREEMPT();
4857
4858 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
4859 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/);
4860
4861 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, IoExitInfo.n.u16Port, cbValue);
4862 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
4863 {
4864 /* Raise #DB. */
4865 pVmcb->guest.u64DR6 = pCtx->dr[6];
4866 pVmcb->guest.u64DR7 = pCtx->dr[7];
4867 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
4868 hmR0SvmSetPendingXcptDB(pVCpu);
4869 }
4870 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
4871 else if ( rcStrict2 != VINF_SUCCESS
4872 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
4873 rcStrict = rcStrict2;
4874
4875 HM_RESTORE_PREEMPT();
4876 VMMRZCallRing3Enable(pVCpu);
4877 }
4878
4879 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
4880 }
4881
4882#ifdef VBOX_STRICT
4883 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
4884 Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ);
4885 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
4886 Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE);
4887 else
4888 {
4889 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
4890 * statuses, that the VMM device and some others may return. See
4891 * IOM_SUCCESS() for guidance. */
4892 AssertMsg( RT_FAILURE(rcStrict)
4893 || rcStrict == VINF_SUCCESS
4894 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
4895 || rcStrict == VINF_EM_DBG_BREAKPOINT
4896 || rcStrict == VINF_EM_RAW_GUEST_TRAP
4897 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4898 }
4899#endif
4900 return VBOXSTRICTRC_TODO(rcStrict);
4901}
4902
4903
4904/**
4905 * #VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional
4906 * #VMEXIT.
4907 */
4908HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4909{
4910 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4911 PVM pVM = pVCpu->CTX_SUFF(pVM);
4912 Assert(pVM->hm.s.fNestedPaging);
4913
4914 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
4915
4916 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
4917 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4918 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1;
4919 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
4920
4921 Log4(("#NPF at CS:RIP=%04x:%#RX64 faultaddr=%RGp errcode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr, u32ErrCode));
4922
4923#ifdef VBOX_HM_WITH_GUEST_PATCHING
4924 /* TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions. */
4925 if ( pVM->hm.s.fTprPatchingAllowed
4926 && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == 0x80 /* TPR offset. */
4927 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */
4928 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */
4929 && !CPUMIsGuestInLongModeEx(pCtx)
4930 && !CPUMGetGuestCPL(pVCpu)
4931 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
4932 {
4933 RTGCPHYS GCPhysApicBase = pCtx->msrApicBase;
4934 GCPhysApicBase &= PAGE_BASE_GC_MASK;
4935
4936 if (GCPhysFaultAddr == GCPhysApicBase + 0x80)
4937 {
4938 /* Only attempt to patch the instruction once. */
4939 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
4940 if (!pPatch)
4941 return VINF_EM_HM_PATCH_TPR_INSTR;
4942 }
4943 }
4944#endif
4945
4946 /*
4947 * Determine the nested paging mode.
4948 */
4949 PGMMODE enmNestedPagingMode;
4950#if HC_ARCH_BITS == 32
4951 if (CPUMIsGuestInLongModeEx(pCtx))
4952 enmNestedPagingMode = PGMMODE_AMD64_NX;
4953 else
4954#endif
4955 enmNestedPagingMode = PGMGetHostMode(pVM);
4956
4957 /*
4958 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
4959 */
4960 int rc;
4961 Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
4962 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
4963 {
4964 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr,
4965 u32ErrCode);
4966 rc = VBOXSTRICTRC_VAL(rc2);
4967
4968 /*
4969 * If we succeed, resume guest execution.
4970 * If we fail in interpreting the instruction because we couldn't get the guest physical address
4971 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
4972 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
4973 * weird case. See @bugref{6043}.
4974 */
4975 if ( rc == VINF_SUCCESS
4976 || rc == VERR_PAGE_TABLE_NOT_PRESENT
4977 || rc == VERR_PAGE_NOT_PRESENT)
4978 {
4979 /* Successfully handled MMIO operation. */
4980 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
4981 rc = VINF_SUCCESS;
4982 }
4983 return rc;
4984 }
4985
4986 TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode);
4987 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr);
4988 TRPMResetTrap(pVCpu);
4989
4990 Log4(("#NPF: PGMR0Trap0eHandlerNestedPaging returned %Rrc CS:RIP=%04x:%#RX64\n", rc, pCtx->cs.Sel, pCtx->rip));
4991
4992 /*
4993 * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
4994 */
4995 if ( rc == VINF_SUCCESS
4996 || rc == VERR_PAGE_TABLE_NOT_PRESENT
4997 || rc == VERR_PAGE_NOT_PRESENT)
4998 {
4999 /* We've successfully synced our shadow page tables. */
5000 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
5001 rc = VINF_SUCCESS;
5002 }
5003
5004 return rc;
5005}
5006
5007
5008/**
5009 * #VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional #VMEXIT.
5010 */
5011HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5012{
5013 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5014
5015 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
5016 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 0; /* No virtual interrupts pending, we'll inject the current one/NMI before reentry. */
5017 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0;
5018
5019 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive interrupts/NMIs, it is now ready. */
5020 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_VINTR;
5021 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
5022
5023 /* Deliver the pending interrupt/NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
5024 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
5025 return VINF_SUCCESS;
5026}
5027
5028
5029/**
5030 * #VMEXIT handler for task switches (SVM_EXIT_TASK_SWITCH). Conditional #VMEXIT.
5031 */
5032HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5033{
5034 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5035
5036#ifndef HMSVM_ALWAYS_TRAP_TASK_SWITCH
5037 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
5038#endif
5039
5040 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
5041 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
5042 if ( !(pVmcb->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP))
5043 && pVCpu->hm.s.Event.fPending) /** @todo fPending cannot be 'true', see hmR0SvmInjectPendingEvent(). See @bugref{7362}.*/
5044 {
5045 /*
5046 * AMD-V does not provide us with the original exception but we have it in u64IntInfo since we
5047 * injected the event during VM-entry.
5048 */
5049 Log4(("hmR0SvmExitTaskSwitch: TS occurred during event delivery.\n"));
5050 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
5051 return VINF_EM_RAW_INJECT_TRPM_EVENT;
5052 }
5053
5054 /** @todo Emulate task switch someday, currently just going back to ring-3 for
5055 * emulation. */
5056 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
5057 return VERR_EM_INTERPRETER;
5058}
5059
5060
5061/**
5062 * #VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional #VMEXIT.
5063 */
5064HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5065{
5066 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5067 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
5068
5069 /* First check if this is a patched VMMCALL for mov TPR */
5070 int rc = hmR0SvmEmulateMovTpr(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
5071 if (rc == VINF_SUCCESS)
5072 {
5073 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
5074 return VINF_SUCCESS;
5075 }
5076 else if (rc == VERR_NOT_FOUND)
5077 {
5078 if (pVCpu->hm.s.fHypercallsEnabled)
5079 {
5080 rc = GIMHypercall(pVCpu, pCtx);
5081 if (RT_SUCCESS(rc))
5082 {
5083 /* If the hypercall changes anything other than guest general-purpose registers,
5084 we would need to reload the guest changed bits here before VM-reentry. */
5085 hmR0SvmUpdateRip(pVCpu, pCtx, 3);
5086 return VINF_SUCCESS;
5087 }
5088 }
5089 }
5090
5091 hmR0SvmSetPendingXcptUD(pVCpu);
5092 return VINF_SUCCESS;
5093}
5094
5095
5096/**
5097 * #VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional #VMEXIT.
5098 */
5099HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5100{
5101 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5102
5103 /* Clear NMI blocking. */
5104 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5105
5106 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
5107 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
5108 hmR0SvmClearIretIntercept(pVmcb);
5109
5110 /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
5111 return VINF_SUCCESS;
5112}
5113
5114
5115/**
5116 * #VMEXIT handler for page-fault exceptions (SVM_EXIT_EXCEPTION_E). Conditional
5117 * #VMEXIT.
5118 */
5119HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5120{
5121 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5122
5123 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
5124
5125 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
5126 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
5127 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1;
5128 RTGCUINTPTR uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
5129 PVM pVM = pVCpu->CTX_SUFF(pVM);
5130
5131#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(HMSVM_ALWAYS_TRAP_PF)
5132 if (pVM->hm.s.fNestedPaging)
5133 {
5134 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
5135 if (!pSvmTransient->fVectoringDoublePF)
5136 {
5137 /* A genuine guest #PF, reflect it to the guest. */
5138 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
5139 Log4(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RGv ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip,
5140 uFaultAddress, u32ErrCode));
5141 }
5142 else
5143 {
5144 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
5145 hmR0SvmSetPendingXcptDF(pVCpu);
5146 Log4(("Pending #DF due to vectoring #PF. NP\n"));
5147 }
5148 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
5149 return VINF_SUCCESS;
5150 }
5151#endif
5152
5153 Assert(!pVM->hm.s.fNestedPaging);
5154
5155#ifdef VBOX_HM_WITH_GUEST_PATCHING
5156 /* Shortcut for APIC TPR reads and writes; only applicable to 32-bit guests. */
5157 if ( pVM->hm.s.fTprPatchingAllowed
5158 && (uFaultAddress & 0xfff) == 0x80 /* TPR offset. */
5159 && !(u32ErrCode & X86_TRAP_PF_P) /* Not present. */
5160 && !CPUMIsGuestInLongModeEx(pCtx)
5161 && !CPUMGetGuestCPL(pVCpu)
5162 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
5163 {
5164 RTGCPHYS GCPhysApicBase;
5165 GCPhysApicBase = pCtx->msrApicBase;
5166 GCPhysApicBase &= PAGE_BASE_GC_MASK;
5167
5168 /* Check if the page at the fault-address is the APIC base. */
5169 RTGCPHYS GCPhysPage;
5170 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);
5171 if ( rc2 == VINF_SUCCESS
5172 && GCPhysPage == GCPhysApicBase)
5173 {
5174 /* Only attempt to patch the instruction once. */
5175 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
5176 if (!pPatch)
5177 return VINF_EM_HM_PATCH_TPR_INSTR;
5178 }
5179 }
5180#endif
5181
5182 Log4(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 u32ErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
5183 pCtx->rip, u32ErrCode, pCtx->cr3));
5184
5185 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
5186 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
5187 if (pSvmTransient->fVectoringPF)
5188 {
5189 Assert(pVCpu->hm.s.Event.fPending);
5190 return VINF_EM_RAW_INJECT_TRPM_EVENT;
5191 }
5192
5193 TRPMAssertXcptPF(pVCpu, uFaultAddress, u32ErrCode);
5194 int rc = PGMTrap0eHandler(pVCpu, u32ErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
5195
5196 Log4(("#PF rc=%Rrc\n", rc));
5197
5198 if (rc == VINF_SUCCESS)
5199 {
5200 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
5201 TRPMResetTrap(pVCpu);
5202 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
5203 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
5204 return rc;
5205 }
5206 else if (rc == VINF_EM_RAW_GUEST_TRAP)
5207 {
5208 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
5209
5210 if (!pSvmTransient->fVectoringDoublePF)
5211 {
5212 /* It's a guest page fault and needs to be reflected to the guest. */
5213 u32ErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */
5214 TRPMResetTrap(pVCpu);
5215 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
5216 }
5217 else
5218 {
5219 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
5220 TRPMResetTrap(pVCpu);
5221 hmR0SvmSetPendingXcptDF(pVCpu);
5222 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
5223 }
5224
5225 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
5226 return VINF_SUCCESS;
5227 }
5228
5229 TRPMResetTrap(pVCpu);
5230 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
5231 return rc;
5232}
5233
5234
5235/**
5236 * #VMEXIT handler for device-not-available exceptions (SVM_EXIT_EXCEPTION_7).
5237 * Conditional #VMEXIT.
5238 */
5239HMSVM_EXIT_DECL hmR0SvmExitXcptNM(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5240{
5241 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5242
5243 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
5244
5245 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
5246 VMMRZCallRing3Disable(pVCpu);
5247 HM_DISABLE_PREEMPT();
5248
5249 int rc;
5250 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
5251 if (pSvmTransient->fWasGuestFPUStateActive)
5252 {
5253 rc = VINF_EM_RAW_GUEST_TRAP;
5254 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
5255 }
5256 else
5257 {
5258#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
5259 Assert(!pSvmTransient->fWasGuestFPUStateActive);
5260#endif
5261 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
5262 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
5263 }
5264
5265 HM_RESTORE_PREEMPT();
5266 VMMRZCallRing3Enable(pVCpu);
5267
5268 if (rc == VINF_SUCCESS)
5269 {
5270 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
5271 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
5272 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
5273 pVCpu->hm.s.fPreloadGuestFpu = true;
5274 }
5275 else
5276 {
5277 /* Forward #NM to the guest. */
5278 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
5279 hmR0SvmSetPendingXcptNM(pVCpu);
5280 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
5281 }
5282 return VINF_SUCCESS;
5283}
5284
5285
5286/**
5287 * #VMEXIT handler for undefined opcode (SVM_EXIT_EXCEPTION_6).
5288 * Conditional #VMEXIT.
5289 */
5290HMSVM_EXIT_DECL hmR0SvmExitXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5291{
5292 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5293
5294 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
5295
5296 if (pVCpu->hm.s.fGIMTrapXcptUD)
5297 GIMXcptUD(pVCpu, pCtx, NULL /* pDis */);
5298 else
5299 hmR0SvmSetPendingXcptUD(pVCpu);
5300
5301 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
5302 return VINF_SUCCESS;
5303}
5304
5305
5306/**
5307 * #VMEXIT handler for math-fault exceptions (SVM_EXIT_EXCEPTION_10).
5308 * Conditional #VMEXIT.
5309 */
5310HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5311{
5312 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5313
5314 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
5315
5316 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
5317
5318 if (!(pCtx->cr0 & X86_CR0_NE))
5319 {
5320 PVM pVM = pVCpu->CTX_SUFF(pVM);
5321 PDISSTATE pDis = &pVCpu->hm.s.DisState;
5322 unsigned cbOp;
5323 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
5324 if (RT_SUCCESS(rc))
5325 {
5326 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
5327 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
5328 if (RT_SUCCESS(rc))
5329 pCtx->rip += cbOp;
5330 }
5331 else
5332 Log4(("hmR0SvmExitXcptMF: EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
5333 return rc;
5334 }
5335
5336 hmR0SvmSetPendingXcptMF(pVCpu);
5337 return VINF_SUCCESS;
5338}
5339
5340
5341/**
5342 * #VMEXIT handler for debug exceptions (SVM_EXIT_EXCEPTION_1). Conditional
5343 * #VMEXIT.
5344 */
5345HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5346{
5347 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5348
5349 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
5350
5351 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
5352
5353 /* This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data breakpoint). However, for both cases
5354 DR6 and DR7 are updated to what the exception handler expects. See AMD spec. 15.12.2 "#DB (Debug)". */
5355 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
5356 PVM pVM = pVCpu->CTX_SUFF(pVM);
5357 int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
5358 if (rc == VINF_EM_RAW_GUEST_TRAP)
5359 {
5360 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
5361 if (CPUMIsHyperDebugStateActive(pVCpu))
5362 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
5363
5364 /* Reflect the exception back to the guest. */
5365 hmR0SvmSetPendingXcptDB(pVCpu);
5366 rc = VINF_SUCCESS;
5367 }
5368
5369 /*
5370 * Update DR6.
5371 */
5372 if (CPUMIsHyperDebugStateActive(pVCpu))
5373 {
5374 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
5375 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
5376 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
5377 }
5378 else
5379 {
5380 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
5381 Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu));
5382 }
5383
5384 return rc;
5385}
5386
5387/** @} */
5388
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette