VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp@ 52654

Last change on this file since 52654 was 52654, checked in by vboxsync, 10 years ago

VMM/HMSVMR0: Use AMD-V terminology.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 199.6 KB
Line 
1/* $Id: HMSVMR0.cpp 52654 2014-09-09 13:51:43Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2013-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24
25#include "HMInternal.h"
26#include <VBox/vmm/vm.h>
27#include "HMSVMR0.h"
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/gim.h>
33
34#ifdef DEBUG_ramshankar
35# define HMSVM_SYNC_FULL_GUEST_STATE
36# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
37# define HMSVM_ALWAYS_TRAP_PF
38# define HMSVM_ALWAYS_TRAP_TASK_SWITCH
39#endif
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45#ifdef VBOX_WITH_STATISTICS
46# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
47 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \
48 if ((u64ExitCode) == SVM_EXIT_NPF) \
49 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
50 else \
51 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
52 } while (0)
53#else
54# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
55#endif
56
57/** If we decide to use a function table approach this can be useful to
58 * switch to a "static DECLCALLBACK(int)". */
59#define HMSVM_EXIT_DECL static int
60
61/** @name Segment attribute conversion between CPU and AMD-V VMCB format.
62 *
63 * The CPU format of the segment attribute is described in X86DESCATTRBITS
64 * which is 16-bits (i.e. includes 4 bits of the segment limit).
65 *
66 * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly
67 * only the attribute bits and nothing else). Upper 4-bits are unused.
68 *
69 * @{ */
70#define HMSVM_CPU_2_VMCB_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0xf000) >> 4) )
71#define HMSVM_VMCB_2_CPU_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0x0f00) << 4) )
72/** @} */
73
74/** @name Macros for loading, storing segment registers to/from the VMCB.
75 * @{ */
76#define HMSVM_LOAD_SEG_REG(REG, reg) \
77 do \
78 { \
79 Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \
80 Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \
81 pVmcb->guest.REG.u16Sel = pCtx->reg.Sel; \
82 pVmcb->guest.REG.u32Limit = pCtx->reg.u32Limit; \
83 pVmcb->guest.REG.u64Base = pCtx->reg.u64Base; \
84 pVmcb->guest.REG.u16Attr = HMSVM_CPU_2_VMCB_SEG_ATTR(pCtx->reg.Attr.u); \
85 } while (0)
86
87#define HMSVM_SAVE_SEG_REG(REG, reg) \
88 do \
89 { \
90 pMixedCtx->reg.Sel = pVmcb->guest.REG.u16Sel; \
91 pMixedCtx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \
92 pMixedCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \
93 pMixedCtx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \
94 pMixedCtx->reg.u64Base = pVmcb->guest.REG.u64Base; \
95 pMixedCtx->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \
96 } while (0)
97/** @} */
98
99/** Macro for checking and returning from the using function for
100 * \#VMEXIT intercepts that maybe caused during delivering of another
101 * event in the guest. */
102#define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY() \
103 do \
104 { \
105 int rc = hmR0SvmCheckExitDueToEventDelivery(pVCpu, pCtx, pSvmTransient); \
106 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT)) \
107 return VINF_SUCCESS; \
108 else if (RT_UNLIKELY(rc == VINF_EM_RESET)) \
109 return rc; \
110 } while (0)
111
112/** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an
113 * instruction that exited. */
114#define HMSVM_CHECK_SINGLE_STEP(a_pVCpu, a_rc) \
115 do { \
116 if ((a_pVCpu)->hm.s.fSingleInstruction && (a_rc) == VINF_SUCCESS) \
117 (a_rc) = VINF_EM_DBG_STEPPED; \
118 } while (0)
119
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121#define HMSVM_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126#define HMSVM_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
127 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 pVCpu->hm.s.idEnteredCpu, RTMpCpuId()));
130
131/** Exception bitmap mask for all contributory exceptions.
132 *
133 * Page fault is deliberately excluded here as it's conditional as to whether
134 * it's contributory or benign. Page faults are handled separately.
135 */
136#define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
137 | RT_BIT(X86_XCPT_DE))
138
139/** @name VMCB Clean Bits.
140 *
141 * These flags are used for VMCB-state caching. A set VMCB Clean bit indicates
142 * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
143 * memory.
144 *
145 * @{ */
146/** All intercepts vectors, TSC offset, PAUSE filter counter. */
147#define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
148/** I/O permission bitmap, MSR permission bitmap. */
149#define HMSVM_VMCB_CLEAN_IOPM_MSRPM RT_BIT(1)
150/** ASID. */
151#define HMSVM_VMCB_CLEAN_ASID RT_BIT(2)
152/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
153V_INTR_VECTOR. */
154#define HMSVM_VMCB_CLEAN_TPR RT_BIT(3)
155/** Nested Paging: Nested CR3 (nCR3), PAT. */
156#define HMSVM_VMCB_CLEAN_NP RT_BIT(4)
157/** Control registers (CR0, CR3, CR4, EFER). */
158#define HMSVM_VMCB_CLEAN_CRX_EFER RT_BIT(5)
159/** Debug registers (DR6, DR7). */
160#define HMSVM_VMCB_CLEAN_DRX RT_BIT(6)
161/** GDT, IDT limit and base. */
162#define HMSVM_VMCB_CLEAN_DT RT_BIT(7)
163/** Segment register: CS, SS, DS, ES limit and base. */
164#define HMSVM_VMCB_CLEAN_SEG RT_BIT(8)
165/** CR2.*/
166#define HMSVM_VMCB_CLEAN_CR2 RT_BIT(9)
167/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
168#define HMSVM_VMCB_CLEAN_LBR RT_BIT(10)
169/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
170PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
171#define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11)
172/** Mask of all valid VMCB Clean bits. */
173#define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \
174 | HMSVM_VMCB_CLEAN_IOPM_MSRPM \
175 | HMSVM_VMCB_CLEAN_ASID \
176 | HMSVM_VMCB_CLEAN_TPR \
177 | HMSVM_VMCB_CLEAN_NP \
178 | HMSVM_VMCB_CLEAN_CRX_EFER \
179 | HMSVM_VMCB_CLEAN_DRX \
180 | HMSVM_VMCB_CLEAN_DT \
181 | HMSVM_VMCB_CLEAN_SEG \
182 | HMSVM_VMCB_CLEAN_CR2 \
183 | HMSVM_VMCB_CLEAN_LBR \
184 | HMSVM_VMCB_CLEAN_AVIC)
185/** @} */
186
187/** @name SVM transient.
188 *
189 * A state structure for holding miscellaneous information across AMD-V
190 * VMRUN/#VMEXIT operation, restored after the transition.
191 *
192 * @{ */
193typedef struct SVMTRANSIENT
194{
195 /** The host's rflags/eflags. */
196 RTCCUINTREG uEflags;
197#if HC_ARCH_BITS == 32
198 uint32_t u32Alignment0;
199#endif
200
201 /** The #VMEXIT exit code (the EXITCODE field in the VMCB). */
202 uint64_t u64ExitCode;
203 /** The guest's TPR value used for TPR shadowing. */
204 uint8_t u8GuestTpr;
205 /** Alignment. */
206 uint8_t abAlignment0[7];
207
208 /** Whether the guest FPU state was active at the time of #VMEXIT. */
209 bool fWasGuestFPUStateActive;
210 /** Whether the guest debug state was active at the time of #VMEXIT. */
211 bool fWasGuestDebugStateActive;
212 /** Whether the hyper debug state was active at the time of #VMEXIT. */
213 bool fWasHyperDebugStateActive;
214 /** Whether the TSC offset mode needs to be updated. */
215 bool fUpdateTscOffsetting;
216 /** Whether the TSC_AUX MSR needs restoring on #VMEXIT. */
217 bool fRestoreTscAuxMsr;
218 /** Whether the #VMEXIT was caused by a page-fault during delivery of a
219 * contributary exception or a page-fault. */
220 bool fVectoringDoublePF;
221 /** Whether the #VMEXIT was caused by a page-fault during delivery of an
222 * external interrupt or NMI. */
223 bool fVectoringPF;
224} SVMTRANSIENT, *PSVMTRANSIENT;
225AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t));
226AssertCompileMemberAlignment(SVMTRANSIENT, fWasGuestFPUStateActive, sizeof(uint64_t));
227/** @} */
228
229/**
230 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
231 */
232typedef enum SVMMSREXITREAD
233{
234 /** Reading this MSR causes a #VMEXIT. */
235 SVMMSREXIT_INTERCEPT_READ = 0xb,
236 /** Reading this MSR does not cause a #VMEXIT. */
237 SVMMSREXIT_PASSTHRU_READ
238} SVMMSREXITREAD;
239
240/**
241 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
242 */
243typedef enum SVMMSREXITWRITE
244{
245 /** Writing to this MSR causes a #VMEXIT. */
246 SVMMSREXIT_INTERCEPT_WRITE = 0xd,
247 /** Writing to this MSR does not cause a #VMEXIT. */
248 SVMMSREXIT_PASSTHRU_WRITE
249} SVMMSREXITWRITE;
250
251/**
252 * SVM #VMEXIT handler.
253 *
254 * @returns VBox status code.
255 * @param pVCpu Pointer to the VMCPU.
256 * @param pMixedCtx Pointer to the guest-CPU context.
257 * @param pSvmTransient Pointer to the SVM-transient structure.
258 */
259typedef int FNSVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
260
261/*******************************************************************************
262* Internal Functions *
263*******************************************************************************/
264static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
265static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
266static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
267
268/** @name #VMEXIT handlers.
269 * @{
270 */
271static FNSVMEXITHANDLER hmR0SvmExitIntr;
272static FNSVMEXITHANDLER hmR0SvmExitWbinvd;
273static FNSVMEXITHANDLER hmR0SvmExitInvd;
274static FNSVMEXITHANDLER hmR0SvmExitCpuid;
275static FNSVMEXITHANDLER hmR0SvmExitRdtsc;
276static FNSVMEXITHANDLER hmR0SvmExitRdtscp;
277static FNSVMEXITHANDLER hmR0SvmExitRdpmc;
278static FNSVMEXITHANDLER hmR0SvmExitInvlpg;
279static FNSVMEXITHANDLER hmR0SvmExitHlt;
280static FNSVMEXITHANDLER hmR0SvmExitMonitor;
281static FNSVMEXITHANDLER hmR0SvmExitMwait;
282static FNSVMEXITHANDLER hmR0SvmExitShutdown;
283static FNSVMEXITHANDLER hmR0SvmExitReadCRx;
284static FNSVMEXITHANDLER hmR0SvmExitWriteCRx;
285static FNSVMEXITHANDLER hmR0SvmExitSetPendingXcptUD;
286static FNSVMEXITHANDLER hmR0SvmExitMsr;
287static FNSVMEXITHANDLER hmR0SvmExitReadDRx;
288static FNSVMEXITHANDLER hmR0SvmExitWriteDRx;
289static FNSVMEXITHANDLER hmR0SvmExitIOInstr;
290static FNSVMEXITHANDLER hmR0SvmExitNestedPF;
291static FNSVMEXITHANDLER hmR0SvmExitVIntr;
292static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch;
293static FNSVMEXITHANDLER hmR0SvmExitVmmCall;
294static FNSVMEXITHANDLER hmR0SvmExitIret;
295static FNSVMEXITHANDLER hmR0SvmExitXcptPF;
296static FNSVMEXITHANDLER hmR0SvmExitXcptNM;
297static FNSVMEXITHANDLER hmR0SvmExitXcptMF;
298static FNSVMEXITHANDLER hmR0SvmExitXcptDB;
299/** @} */
300
301DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
302
303/*******************************************************************************
304* Global Variables *
305*******************************************************************************/
306/** Ring-0 memory object for the IO bitmap. */
307RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
308/** Physical address of the IO bitmap. */
309RTHCPHYS g_HCPhysIOBitmap = 0;
310/** Virtual address of the IO bitmap. */
311R0PTRTYPE(void *) g_pvIOBitmap = NULL;
312
313
314/**
315 * Sets up and activates AMD-V on the current CPU.
316 *
317 * @returns VBox status code.
318 * @param pCpu Pointer to the CPU info struct.
319 * @param pVM Pointer to the VM (can be NULL after a resume!).
320 * @param pvCpuPage Pointer to the global CPU page.
321 * @param HCPhysCpuPage Physical address of the global CPU page.
322 * @param fEnabledByHost Whether the host OS has already initialized AMD-V.
323 * @param pvArg Unused on AMD-V.
324 */
325VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
326 void *pvArg)
327{
328 Assert(!fEnabledByHost);
329 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
330 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
331 Assert(pvCpuPage);
332 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
333
334 NOREF(pvArg);
335 NOREF(fEnabledByHost);
336
337 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */
338 RTCCUINTREG uEflags = ASMIntDisableFlags();
339
340 /*
341 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
342 */
343 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
344 if (u64HostEfer & MSR_K6_EFER_SVME)
345 {
346 /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
347 if ( pVM
348 && pVM->hm.s.svm.fIgnoreInUseError)
349 {
350 pCpu->fIgnoreAMDVInUseError = true;
351 }
352
353 if (!pCpu->fIgnoreAMDVInUseError)
354 {
355 ASMSetFlags(uEflags);
356 return VERR_SVM_IN_USE;
357 }
358 }
359
360 /* Turn on AMD-V in the EFER MSR. */
361 ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
362
363 /* Write the physical page address where the CPU will store the host state while executing the VM. */
364 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
365
366 /* Restore interrupts. */
367 ASMSetFlags(uEflags);
368
369 /*
370 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
371 * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
372 * upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB()
373 * to flush the TLB with before using a new ASID.
374 */
375 pCpu->fFlushAsidBeforeUse = true;
376
377 /*
378 * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
379 */
380 ++pCpu->cTlbFlushes;
381
382 return VINF_SUCCESS;
383}
384
385
386/**
387 * Deactivates AMD-V on the current CPU.
388 *
389 * @returns VBox status code.
390 * @param pCpu Pointer to the CPU info struct.
391 * @param pvCpuPage Pointer to the global CPU page.
392 * @param HCPhysCpuPage Physical address of the global CPU page.
393 */
394VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
395{
396 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
397 AssertReturn( HCPhysCpuPage
398 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
399 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
400 NOREF(pCpu);
401
402 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
403 RTCCUINTREG uEflags = ASMIntDisableFlags();
404
405 /* Turn off AMD-V in the EFER MSR. */
406 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
407 ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
408
409 /* Invalidate host state physical address. */
410 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
411
412 /* Restore interrupts. */
413 ASMSetFlags(uEflags);
414
415 return VINF_SUCCESS;
416}
417
418
419/**
420 * Does global AMD-V initialization (called during module initialization).
421 *
422 * @returns VBox status code.
423 */
424VMMR0DECL(int) SVMR0GlobalInit(void)
425{
426 /*
427 * Allocate 12 KB for the IO bitmap. Since this is non-optional and we always intercept all IO accesses, it's done
428 * once globally here instead of per-VM.
429 */
430 Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ);
431 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, 3 << PAGE_SHIFT, false /* fExecutable */);
432 if (RT_FAILURE(rc))
433 return rc;
434
435 g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
436 g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
437
438 /* Set all bits to intercept all IO accesses. */
439 ASMMemFill32(g_pvIOBitmap, 3 << PAGE_SHIFT, UINT32_C(0xffffffff));
440 return VINF_SUCCESS;
441}
442
443
444/**
445 * Does global AMD-V termination (called during module termination).
446 */
447VMMR0DECL(void) SVMR0GlobalTerm(void)
448{
449 if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
450 {
451 RTR0MemObjFree(g_hMemObjIOBitmap, true /* fFreeMappings */);
452 g_pvIOBitmap = NULL;
453 g_HCPhysIOBitmap = 0;
454 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
455 }
456}
457
458
459/**
460 * Frees any allocated per-VCPU structures for a VM.
461 *
462 * @param pVM Pointer to the VM.
463 */
464DECLINLINE(void) hmR0SvmFreeStructs(PVM pVM)
465{
466 for (uint32_t i = 0; i < pVM->cCpus; i++)
467 {
468 PVMCPU pVCpu = &pVM->aCpus[i];
469 AssertPtr(pVCpu);
470
471 if (pVCpu->hm.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
472 {
473 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false);
474 pVCpu->hm.s.svm.pvVmcbHost = 0;
475 pVCpu->hm.s.svm.HCPhysVmcbHost = 0;
476 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
477 }
478
479 if (pVCpu->hm.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ)
480 {
481 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false);
482 pVCpu->hm.s.svm.pvVmcb = 0;
483 pVCpu->hm.s.svm.HCPhysVmcb = 0;
484 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
485 }
486
487 if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
488 {
489 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
490 pVCpu->hm.s.svm.pvMsrBitmap = 0;
491 pVCpu->hm.s.svm.HCPhysMsrBitmap = 0;
492 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
493 }
494 }
495}
496
497
498/**
499 * Does per-VM AMD-V initialization.
500 *
501 * @returns VBox status code.
502 * @param pVM Pointer to the VM.
503 */
504VMMR0DECL(int) SVMR0InitVM(PVM pVM)
505{
506 int rc = VERR_INTERNAL_ERROR_5;
507
508 /*
509 * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
510 */
511 uint32_t u32Family;
512 uint32_t u32Model;
513 uint32_t u32Stepping;
514 if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
515 {
516 Log4(("SVMR0InitVM: AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
517 pVM->hm.s.svm.fAlwaysFlushTLB = true;
518 }
519
520 /*
521 * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
522 */
523 for (VMCPUID i = 0; i < pVM->cCpus; i++)
524 {
525 PVMCPU pVCpu = &pVM->aCpus[i];
526 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
527 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
528 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
529 }
530
531 for (VMCPUID i = 0; i < pVM->cCpus; i++)
532 {
533 PVMCPU pVCpu = &pVM->aCpus[i];
534
535 /*
536 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
537 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
538 */
539 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1 << PAGE_SHIFT, false /* fExecutable */);
540 if (RT_FAILURE(rc))
541 goto failure_cleanup;
542
543 pVCpu->hm.s.svm.pvVmcbHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost);
544 pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 /* iPage */);
545 Assert(pVCpu->hm.s.svm.HCPhysVmcbHost < _4G);
546 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcbHost);
547
548 /*
549 * Allocate one page for the guest-state VMCB.
550 */
551 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, 1 << PAGE_SHIFT, false /* fExecutable */);
552 if (RT_FAILURE(rc))
553 goto failure_cleanup;
554
555 pVCpu->hm.s.svm.pvVmcb = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);
556 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */);
557 Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G);
558 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcb);
559
560 /*
561 * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
562 * SVM to not require one.
563 */
564 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
565 if (RT_FAILURE(rc))
566 goto failure_cleanup;
567
568 pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
569 pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
570 /* Set all bits to intercept all MSR accesses (changed later on). */
571 ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, UINT32_C(0xffffffff));
572 }
573
574 return VINF_SUCCESS;
575
576failure_cleanup:
577 hmR0SvmFreeStructs(pVM);
578 return rc;
579}
580
581
582/**
583 * Does per-VM AMD-V termination.
584 *
585 * @returns VBox status code.
586 * @param pVM Pointer to the VM.
587 */
588VMMR0DECL(int) SVMR0TermVM(PVM pVM)
589{
590 hmR0SvmFreeStructs(pVM);
591 return VINF_SUCCESS;
592}
593
594
595/**
596 * Sets the permission bits for the specified MSR in the MSRPM.
597 *
598 * @param pVCpu Pointer to the VMCPU.
599 * @param uMsr The MSR for which the access permissions are being set.
600 * @param enmRead MSR read permissions.
601 * @param enmWrite MSR write permissions.
602 */
603static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
604{
605 unsigned ulBit;
606 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
607
608 /*
609 * Layout:
610 * Byte offset MSR range
611 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
612 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
613 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
614 * 0x1800 - 0x1fff Reserved
615 */
616 if (uMsr <= 0x00001FFF)
617 {
618 /* Pentium-compatible MSRs. */
619 ulBit = uMsr * 2;
620 }
621 else if ( uMsr >= 0xC0000000
622 && uMsr <= 0xC0001FFF)
623 {
624 /* AMD Sixth Generation x86 Processor MSRs. */
625 ulBit = (uMsr - 0xC0000000) * 2;
626 pbMsrBitmap += 0x800;
627 }
628 else if ( uMsr >= 0xC0010000
629 && uMsr <= 0xC0011FFF)
630 {
631 /* AMD Seventh and Eighth Generation Processor MSRs. */
632 ulBit = (uMsr - 0xC0001000) * 2;
633 pbMsrBitmap += 0x1000;
634 }
635 else
636 {
637 AssertFailed();
638 return;
639 }
640
641 Assert(ulBit < 0x3fff /* 16 * 1024 - 1 */);
642 if (enmRead == SVMMSREXIT_INTERCEPT_READ)
643 ASMBitSet(pbMsrBitmap, ulBit);
644 else
645 ASMBitClear(pbMsrBitmap, ulBit);
646
647 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
648 ASMBitSet(pbMsrBitmap, ulBit + 1);
649 else
650 ASMBitClear(pbMsrBitmap, ulBit + 1);
651
652 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
653 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
654}
655
656
657/**
658 * Sets up AMD-V for the specified VM.
659 * This function is only called once per-VM during initalization.
660 *
661 * @returns VBox status code.
662 * @param pVM Pointer to the VM.
663 */
664VMMR0DECL(int) SVMR0SetupVM(PVM pVM)
665{
666 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
667 AssertReturn(pVM, VERR_INVALID_PARAMETER);
668 Assert(pVM->hm.s.svm.fSupported);
669
670 for (VMCPUID i = 0; i < pVM->cCpus; i++)
671 {
672 PVMCPU pVCpu = &pVM->aCpus[i];
673 PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb;
674
675 AssertMsgReturn(pVmcb, ("Invalid pVmcb for vcpu[%u]\n", i), VERR_SVM_INVALID_PVMCB);
676
677 /* Initialize the #VMEXIT history array with end-of-array markers (UINT16_MAX). */
678 Assert(!pVCpu->hm.s.idxExitHistoryFree);
679 HMCPU_EXIT_HISTORY_RESET(pVCpu);
680
681 /* Trap exceptions unconditionally (debug purposes). */
682#ifdef HMSVM_ALWAYS_TRAP_PF
683 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
684#endif
685#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
686 /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
687 pVmcb->ctrl.u32InterceptException |= 0
688 | RT_BIT(X86_XCPT_BP)
689 | RT_BIT(X86_XCPT_DB)
690 | RT_BIT(X86_XCPT_DE)
691 | RT_BIT(X86_XCPT_NM)
692 | RT_BIT(X86_XCPT_UD)
693 | RT_BIT(X86_XCPT_NP)
694 | RT_BIT(X86_XCPT_SS)
695 | RT_BIT(X86_XCPT_GP)
696 | RT_BIT(X86_XCPT_PF)
697 | RT_BIT(X86_XCPT_MF)
698 ;
699#endif
700
701 /* Set up unconditional intercepts and conditions. */
702 pVmcb->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR /* External interrupt causes a #VMEXIT. */
703 | SVM_CTRL1_INTERCEPT_NMI /* Non-maskable interrupts causes a #VMEXIT. */
704 | SVM_CTRL1_INTERCEPT_INIT /* INIT signal causes a #VMEXIT. */
705 | SVM_CTRL1_INTERCEPT_RDPMC /* RDPMC causes a #VMEXIT. */
706 | SVM_CTRL1_INTERCEPT_CPUID /* CPUID causes a #VMEXIT. */
707 | SVM_CTRL1_INTERCEPT_RSM /* RSM causes a #VMEXIT. */
708 | SVM_CTRL1_INTERCEPT_HLT /* HLT causes a #VMEXIT. */
709 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO #VMEXITs. */
710 | SVM_CTRL1_INTERCEPT_MSR_SHADOW /* MSR access not covered by MSRPM causes a #VMEXIT.*/
711 | SVM_CTRL1_INTERCEPT_INVLPGA /* INVLPGA causes a #VMEXIT. */
712 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* Shutdown events causes a #VMEXIT. */
713 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Intercept "freezing" during legacy FPU handling. */
714
715 pVmcb->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* VMRUN causes a #VMEXIT. */
716 | SVM_CTRL2_INTERCEPT_VMMCALL /* VMMCALL causes a #VMEXIT. */
717 | SVM_CTRL2_INTERCEPT_VMLOAD /* VMLOAD causes a #VMEXIT. */
718 | SVM_CTRL2_INTERCEPT_VMSAVE /* VMSAVE causes a #VMEXIT. */
719 | SVM_CTRL2_INTERCEPT_STGI /* STGI causes a #VMEXIT. */
720 | SVM_CTRL2_INTERCEPT_CLGI /* CLGI causes a #VMEXIT. */
721 | SVM_CTRL2_INTERCEPT_SKINIT /* SKINIT causes a #VMEXIT. */
722 | SVM_CTRL2_INTERCEPT_WBINVD /* WBINVD causes a #VMEXIT. */
723 | SVM_CTRL2_INTERCEPT_MONITOR /* MONITOR causes a #VMEXIT. */
724 | SVM_CTRL2_INTERCEPT_MWAIT; /* MWAIT causes a #VMEXIT. */
725
726 /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
727 pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
728
729 /* CR0, CR4 writes must be intercepted for the same reasons as above. */
730 pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
731
732 /* Intercept all DRx reads and writes by default. Changed later on. */
733 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
734 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
735
736 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
737 pVmcb->ctrl.IntCtrl.n.u1VIrqMasking = 1;
738
739 /* Ignore the priority in the TPR. This is necessary for delivering PIC style (ExtInt) interrupts and we currently
740 deliver both PIC and APIC interrupts alike. See hmR0SvmInjectPendingEvent() */
741 pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR = 1;
742
743 /* Set IO and MSR bitmap permission bitmap physical addresses. */
744 pVmcb->ctrl.u64IOPMPhysAddr = g_HCPhysIOBitmap;
745 pVmcb->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
746
747 /* No LBR virtualization. */
748 pVmcb->ctrl.u64LBRVirt = 0;
749
750 /* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from the VMCB in memory. */
751 pVmcb->ctrl.u64VmcbCleanBits = 0;
752
753 /* The host ASID MBZ, for the guest start with 1. */
754 pVmcb->ctrl.TLBCtrl.n.u32ASID = 1;
755
756 /*
757 * Setup the PAT MSR (applicable for Nested Paging only).
758 * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
759 * so choose type 6 for all PAT slots.
760 */
761 pVmcb->guest.u64GPAT = UINT64_C(0x0006060606060606);
762
763 /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
764 pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
765
766 /* Without Nested Paging, we need additionally intercepts. */
767 if (!pVM->hm.s.fNestedPaging)
768 {
769 /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
770 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(3);
771 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(3);
772
773 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
774 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG
775 | SVM_CTRL1_INTERCEPT_TASK_SWITCH;
776
777 /* Page faults must be intercepted to implement shadow paging. */
778 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
779 }
780
781#ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
782 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_TASK_SWITCH;
783#endif
784
785 /*
786 * The following MSRs are saved/restored automatically during the world-switch.
787 * Don't intercept guest read/write accesses to these MSRs.
788 */
789 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
790 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
791 hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
792 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
793 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
794 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
795 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
796 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
797 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
798 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
799 }
800
801 return VINF_SUCCESS;
802}
803
804
805/**
806 * Invalidates a guest page by guest virtual address.
807 *
808 * @returns VBox status code.
809 * @param pVM Pointer to the VM.
810 * @param pVCpu Pointer to the VMCPU.
811 * @param GCVirt Guest virtual address of the page to invalidate.
812 */
813VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
814{
815 AssertReturn(pVM, VERR_INVALID_PARAMETER);
816 Assert(pVM->hm.s.svm.fSupported);
817
818 bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
819
820 /* Skip it if a TLB flush is already pending. */
821 if (!fFlushPending)
822 {
823 Log4(("SVMR0InvalidatePage %RGv\n", GCVirt));
824
825 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
826 AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
827
828#if HC_ARCH_BITS == 32
829 /* If we get a flush in 64-bit guest mode, then force a full TLB flush. INVLPGA takes only 32-bit addresses. */
830 if (CPUMIsGuestInLongMode(pVCpu))
831 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
832 else
833#endif
834 {
835 SVMR0InvlpgA(GCVirt, pVmcb->ctrl.TLBCtrl.n.u32ASID);
836 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
837 }
838 }
839 return VINF_SUCCESS;
840}
841
842
843/**
844 * Flushes the appropriate tagged-TLB entries.
845 *
846 * @param pVM Pointer to the VM.
847 * @param pVCpu Pointer to the VMCPU.
848 */
849static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu)
850{
851 PVM pVM = pVCpu->CTX_SUFF(pVM);
852 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
853 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
854
855 /*
856 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
857 * This can happen both for start & resume due to long jumps back to ring-3.
858 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
859 * so we cannot reuse the ASIDs without flushing.
860 */
861 bool fNewAsid = false;
862 Assert(pCpu->idCpu != NIL_RTCPUID);
863 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
864 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
865 {
866 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
867 pVCpu->hm.s.fForceTLBFlush = true;
868 fNewAsid = true;
869 }
870
871 /* Set TLB flush state as checked until we return from the world switch. */
872 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
873
874 /* Check for explicit TLB shootdowns. */
875 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
876 {
877 pVCpu->hm.s.fForceTLBFlush = true;
878 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
879 }
880
881 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
882
883 if (pVM->hm.s.svm.fAlwaysFlushTLB)
884 {
885 /*
886 * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
887 */
888 pCpu->uCurrentAsid = 1;
889 pVCpu->hm.s.uCurrentAsid = 1;
890 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
891 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
892
893 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
894 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
895 }
896 else if (pVCpu->hm.s.fForceTLBFlush)
897 {
898 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
899 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
900
901 if (fNewAsid)
902 {
903 ++pCpu->uCurrentAsid;
904 bool fHitASIDLimit = false;
905 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
906 {
907 pCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */
908 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
909 fHitASIDLimit = true;
910
911 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
912 {
913 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
914 pCpu->fFlushAsidBeforeUse = true;
915 }
916 else
917 {
918 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
919 pCpu->fFlushAsidBeforeUse = false;
920 }
921 }
922
923 if ( !fHitASIDLimit
924 && pCpu->fFlushAsidBeforeUse)
925 {
926 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
927 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
928 else
929 {
930 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
931 pCpu->fFlushAsidBeforeUse = false;
932 }
933 }
934
935 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
936 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
937 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
938 }
939 else
940 {
941 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
942 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
943 else
944 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
945 }
946
947 pVCpu->hm.s.fForceTLBFlush = false;
948 }
949 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
950 * not be executed. See hmQueueInvlPage() where it is commented
951 * out. Support individual entry flushing someday. */
952#if 0
953 else
954 {
955 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
956 {
957 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
958 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
959 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
960 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVmcb->ctrl.TLBCtrl.n.u32ASID);
961
962 pVCpu->hm.s.TlbShootdown.cPages = 0;
963 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
964 }
965 }
966#endif
967
968
969 /* Update VMCB with the ASID. */
970 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hm.s.uCurrentAsid)
971 {
972 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
973 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
974 }
975
976 AssertMsg(pVCpu->hm.s.idLastCpu == pCpu->idCpu,
977 ("vcpu idLastCpu=%x pcpu idCpu=%x\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu));
978 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
979 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
980 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
981 ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
982 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
983 ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
984
985#ifdef VBOX_WITH_STATISTICS
986 if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
987 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
988 else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
989 || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
990 {
991 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
992 }
993 else
994 {
995 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE);
996 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushEntire);
997 }
998#endif
999}
1000
1001
1002/** @name 64-bit guest on 32-bit host OS helper functions.
1003 *
1004 * The host CPU is still 64-bit capable but the host OS is running in 32-bit
1005 * mode (code segment, paging). These wrappers/helpers perform the necessary
1006 * bits for the 32->64 switcher.
1007 *
1008 * @{ */
1009#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1010/**
1011 * Prepares for and executes VMRUN (64-bit guests on a 32-bit host).
1012 *
1013 * @returns VBox status code.
1014 * @param HCPhysVmcbHost Physical address of host VMCB.
1015 * @param HCPhysVmcb Physical address of the VMCB.
1016 * @param pCtx Pointer to the guest-CPU context.
1017 * @param pVM Pointer to the VM.
1018 * @param pVCpu Pointer to the VMCPU.
1019 */
1020DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
1021{
1022 uint32_t aParam[4];
1023 aParam[0] = (uint32_t)(HCPhysVmcbHost); /* Param 1: HCPhysVmcbHost - Lo. */
1024 aParam[1] = (uint32_t)(HCPhysVmcbHost >> 32); /* Param 1: HCPhysVmcbHost - Hi. */
1025 aParam[2] = (uint32_t)(HCPhysVmcb); /* Param 2: HCPhysVmcb - Lo. */
1026 aParam[3] = (uint32_t)(HCPhysVmcb >> 32); /* Param 2: HCPhysVmcb - Hi. */
1027
1028 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, 4, &aParam[0]);
1029}
1030
1031
1032/**
1033 * Executes the specified VMRUN handler in 64-bit mode.
1034 *
1035 * @returns VBox status code.
1036 * @param pVM Pointer to the VM.
1037 * @param pVCpu Pointer to the VMCPU.
1038 * @param pCtx Pointer to the guest-CPU context.
1039 * @param enmOp The operation to perform.
1040 * @param cbParam Number of parameters.
1041 * @param paParam Array of 32-bit parameters.
1042 */
1043VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
1044 uint32_t *paParam)
1045{
1046 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
1047 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
1048
1049 /* Disable interrupts. */
1050 RTHCUINTREG uOldEFlags = ASMIntDisableFlags();
1051
1052#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1053 RTCPUID idHostCpu = RTMpCpuId();
1054 CPUMR0SetLApic(pVCpu, idHostCpu);
1055#endif
1056
1057 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
1058 CPUMSetHyperEIP(pVCpu, enmOp);
1059 for (int i = (int)cbParam - 1; i >= 0; i--)
1060 CPUMPushHyper(pVCpu, paParam[i]);
1061
1062 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
1063 /* Call the switcher. */
1064 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
1065 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
1066
1067 /* Restore interrupts. */
1068 ASMSetFlags(uOldEFlags);
1069 return rc;
1070}
1071
1072#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
1073/** @} */
1074
1075
1076/**
1077 * Adds an exception to the intercept exception bitmap in the VMCB and updates
1078 * the corresponding VMCB Clean bit.
1079 *
1080 * @param pVmcb Pointer to the VM control block.
1081 * @param u32Xcpt The value of the exception (X86_XCPT_*).
1082 */
1083DECLINLINE(void) hmR0SvmAddXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt)
1084{
1085 if (!(pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt)))
1086 {
1087 pVmcb->ctrl.u32InterceptException |= RT_BIT(u32Xcpt);
1088 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1089 }
1090}
1091
1092
1093/**
1094 * Removes an exception from the intercept-exception bitmap in the VMCB and
1095 * updates the corresponding VMCB Clean bit.
1096 *
1097 * @param pVmcb Pointer to the VM control block.
1098 * @param u32Xcpt The value of the exception (X86_XCPT_*).
1099 */
1100DECLINLINE(void) hmR0SvmRemoveXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt)
1101{
1102#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
1103 if (pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt))
1104 {
1105 pVmcb->ctrl.u32InterceptException &= ~RT_BIT(u32Xcpt);
1106 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1107 }
1108#endif
1109}
1110
1111
1112/**
1113 * Loads the guest CR0 control register into the guest-state area in the VMCB.
1114 * Although the guest CR0 is a separate field in the VMCB we have to consider
1115 * the FPU state itself which is shared between the host and the guest.
1116 *
1117 * @returns VBox status code.
1118 * @param pVM Pointer to the VMCPU.
1119 * @param pVmcb Pointer to the VM control block.
1120 * @param pCtx Pointer to the guest-CPU context.
1121 *
1122 * @remarks No-long-jump zone!!!
1123 */
1124static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1125{
1126 /*
1127 * Guest CR0.
1128 */
1129 PVM pVM = pVCpu->CTX_SUFF(pVM);
1130 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
1131 {
1132 uint64_t u64GuestCR0 = pCtx->cr0;
1133
1134 /* Always enable caching. */
1135 u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);
1136
1137 /*
1138 * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
1139 */
1140 if (!pVM->hm.s.fNestedPaging)
1141 {
1142 u64GuestCR0 |= X86_CR0_PG; /* When Nested Paging is not available, use shadow page tables. */
1143 u64GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
1144 }
1145
1146 /*
1147 * Guest FPU bits.
1148 */
1149 bool fInterceptNM = false;
1150 bool fInterceptMF = false;
1151 u64GuestCR0 |= X86_CR0_NE; /* Use internal x87 FPU exceptions handling rather than external interrupts. */
1152 if (CPUMIsGuestFPUStateActive(pVCpu))
1153 {
1154 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
1155 if (!(u64GuestCR0 & X86_CR0_NE))
1156 {
1157 Log4(("hmR0SvmLoadGuestControlRegs: Intercepting Guest CR0.MP Old-style FPU handling!!!\n"));
1158 fInterceptMF = true;
1159 }
1160 }
1161 else
1162 {
1163 fInterceptNM = true; /* Guest FPU inactive, #VMEXIT on #NM for lazy FPU loading. */
1164 u64GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
1165 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
1166 }
1167
1168 /*
1169 * Update the exception intercept bitmap.
1170 */
1171 if (fInterceptNM)
1172 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_NM);
1173 else
1174 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_NM);
1175
1176 if (fInterceptMF)
1177 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_MF);
1178 else
1179 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_MF);
1180
1181 pVmcb->guest.u64CR0 = u64GuestCR0;
1182 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1183 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
1184 }
1185}
1186
1187
1188/**
1189 * Loads the guest control registers (CR2, CR3, CR4) into the VMCB.
1190 *
1191 * @returns VBox status code.
1192 * @param pVCpu Pointer to the VMCPU.
1193 * @param pVmcb Pointer to the VM control block.
1194 * @param pCtx Pointer to the guest-CPU context.
1195 *
1196 * @remarks No-long-jump zone!!!
1197 */
1198static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1199{
1200 PVM pVM = pVCpu->CTX_SUFF(pVM);
1201
1202 /*
1203 * Guest CR2.
1204 */
1205 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR2))
1206 {
1207 pVmcb->guest.u64CR2 = pCtx->cr2;
1208 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
1209 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
1210 }
1211
1212 /*
1213 * Guest CR3.
1214 */
1215 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
1216 {
1217 if (pVM->hm.s.fNestedPaging)
1218 {
1219 PGMMODE enmShwPagingMode;
1220#if HC_ARCH_BITS == 32
1221 if (CPUMIsGuestInLongModeEx(pCtx))
1222 enmShwPagingMode = PGMMODE_AMD64_NX;
1223 else
1224#endif
1225 enmShwPagingMode = PGMGetHostMode(pVM);
1226
1227 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
1228 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1229 Assert(pVmcb->ctrl.u64NestedPagingCR3);
1230 pVmcb->guest.u64CR3 = pCtx->cr3;
1231 }
1232 else
1233 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
1234
1235 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1236 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
1237 }
1238
1239 /*
1240 * Guest CR4.
1241 */
1242 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
1243 {
1244 uint64_t u64GuestCR4 = pCtx->cr4;
1245 if (!pVM->hm.s.fNestedPaging)
1246 {
1247 switch (pVCpu->hm.s.enmShadowMode)
1248 {
1249 case PGMMODE_REAL:
1250 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
1251 AssertFailed();
1252 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1253
1254 case PGMMODE_32_BIT: /* 32-bit paging. */
1255 u64GuestCR4 &= ~X86_CR4_PAE;
1256 break;
1257
1258 case PGMMODE_PAE: /* PAE paging. */
1259 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
1260 /** Must use PAE paging as we could use physical memory > 4 GB */
1261 u64GuestCR4 |= X86_CR4_PAE;
1262 break;
1263
1264 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
1265 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
1266#ifdef VBOX_ENABLE_64_BITS_GUESTS
1267 break;
1268#else
1269 AssertFailed();
1270 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1271#endif
1272
1273 default: /* shut up gcc */
1274 AssertFailed();
1275 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1276 }
1277 }
1278
1279 pVmcb->guest.u64CR4 = u64GuestCR4;
1280 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1281 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
1282 }
1283
1284 return VINF_SUCCESS;
1285}
1286
1287
1288/**
1289 * Loads the guest segment registers into the VMCB.
1290 *
1291 * @returns VBox status code.
1292 * @param pVCpu Pointer to the VMCPU.
1293 * @param pVmcb Pointer to the VM control block.
1294 * @param pCtx Pointer to the guest-CPU context.
1295 *
1296 * @remarks No-long-jump zone!!!
1297 */
1298static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1299{
1300 /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
1301 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
1302 {
1303 HMSVM_LOAD_SEG_REG(CS, cs);
1304 HMSVM_LOAD_SEG_REG(SS, ss);
1305 HMSVM_LOAD_SEG_REG(DS, ds);
1306 HMSVM_LOAD_SEG_REG(ES, es);
1307 HMSVM_LOAD_SEG_REG(FS, fs);
1308 HMSVM_LOAD_SEG_REG(GS, gs);
1309
1310 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
1311 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
1312 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
1313 }
1314
1315 /* Guest TR. */
1316 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
1317 {
1318 HMSVM_LOAD_SEG_REG(TR, tr);
1319 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
1320 }
1321
1322 /* Guest LDTR. */
1323 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
1324 {
1325 HMSVM_LOAD_SEG_REG(LDTR, ldtr);
1326 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
1327 }
1328
1329 /* Guest GDTR. */
1330 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
1331 {
1332 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
1333 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
1334 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1335 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
1336 }
1337
1338 /* Guest IDTR. */
1339 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
1340 {
1341 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
1342 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
1343 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1344 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
1345 }
1346}
1347
1348
1349/**
1350 * Loads the guest MSRs into the VMCB.
1351 *
1352 * @param pVCpu Pointer to the VMCPU.
1353 * @param pVmcb Pointer to the VM control block.
1354 * @param pCtx Pointer to the guest-CPU context.
1355 *
1356 * @remarks No-long-jump zone!!!
1357 */
1358static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1359{
1360 /* Guest Sysenter MSRs. */
1361 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
1362 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
1363 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
1364
1365 /*
1366 * Guest EFER MSR.
1367 * AMD-V requires guest EFER.SVME to be set. Weird.
1368 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
1369 */
1370 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
1371 {
1372 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
1373 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1374 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
1375 }
1376
1377 /* 64-bit MSRs. */
1378 if (CPUMIsGuestInLongModeEx(pCtx))
1379 {
1380 pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;
1381 pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;
1382 }
1383 else
1384 {
1385 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */
1386 if (pCtx->msrEFER & MSR_K6_EFER_LME)
1387 {
1388 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
1389 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1390 }
1391 }
1392
1393
1394 /** @todo The following are used in 64-bit only (SYSCALL/SYSRET) but they might
1395 * be writable in 32-bit mode. Clarify with AMD spec. */
1396 pVmcb->guest.u64STAR = pCtx->msrSTAR;
1397 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
1398 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
1399 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
1400 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1401}
1402
1403
1404/**
1405 * Loads the guest state into the VMCB and programs the necessary intercepts
1406 * accordingly.
1407 *
1408 * @param pVCpu Pointer to the VMCPU.
1409 * @param pVmcb Pointer to the VM control block.
1410 * @param pCtx Pointer to the guest-CPU context.
1411 *
1412 * @remarks No-long-jump zone!!!
1413 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
1414 */
1415static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1416{
1417 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
1418 return;
1419 Assert((pCtx->dr[6] & X86_DR6_RA1_MASK) == X86_DR6_RA1_MASK); Assert((pCtx->dr[6] & X86_DR6_RAZ_MASK) == 0);
1420 Assert((pCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); Assert((pCtx->dr[7] & X86_DR7_RAZ_MASK) == 0);
1421
1422 bool fInterceptDB = false;
1423 bool fInterceptMovDRx = false;
1424
1425 /*
1426 * Anyone single stepping on the host side? If so, we'll have to use the
1427 * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
1428 * the VMM level like the VT-x implementations does.
1429 */
1430 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
1431 if (fStepping)
1432 {
1433 pVCpu->hm.s.fClearTrapFlag = true;
1434 pVmcb->guest.u64RFlags |= X86_EFL_TF;
1435 fInterceptDB = true;
1436 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
1437 }
1438
1439 if ( fStepping
1440 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
1441 {
1442 /*
1443 * Use the combined guest and host DRx values found in the hypervisor
1444 * register set because the debugger has breakpoints active or someone
1445 * is single stepping on the host side.
1446 *
1447 * Note! DBGF expects a clean DR6 state before executing guest code.
1448 */
1449#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1450 if ( CPUMIsGuestInLongModeEx(pCtx)
1451 && !CPUMIsHyperDebugStateActivePending(pVCpu))
1452 {
1453 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1454 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
1455 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
1456 }
1457 else
1458#endif
1459 if (!CPUMIsHyperDebugStateActive(pVCpu))
1460 {
1461 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1462 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1463 Assert(CPUMIsHyperDebugStateActive(pVCpu));
1464 }
1465
1466 /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
1467 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
1468 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu))
1469 {
1470 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
1471 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
1472 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1473 pVCpu->hm.s.fUsingHyperDR7 = true;
1474 }
1475
1476 /** @todo If we cared, we could optimize to allow the guest to read registers
1477 * with the same values. */
1478 fInterceptDB = true;
1479 fInterceptMovDRx = true;
1480 Log5(("hmR0SvmLoadSharedDebugState: Loaded hyper DRx\n"));
1481 }
1482 else
1483 {
1484 /*
1485 * Update DR6, DR7 with the guest values if necessary.
1486 */
1487 if ( pVmcb->guest.u64DR7 != pCtx->dr[7]
1488 || pVmcb->guest.u64DR6 != pCtx->dr[6])
1489 {
1490 pVmcb->guest.u64DR7 = pCtx->dr[7];
1491 pVmcb->guest.u64DR6 = pCtx->dr[6];
1492 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1493 pVCpu->hm.s.fUsingHyperDR7 = false;
1494 }
1495
1496 /*
1497 * If the guest has enabled debug registers, we need to load them prior to
1498 * executing guest code so they'll trigger at the right time.
1499 */
1500 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
1501 {
1502#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1503 if ( CPUMIsGuestInLongModeEx(pCtx)
1504 && !CPUMIsGuestDebugStateActivePending(pVCpu))
1505 {
1506 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
1507 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1508 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
1509 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
1510 }
1511 else
1512#endif
1513 if (!CPUMIsGuestDebugStateActive(pVCpu))
1514 {
1515 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
1516 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1517 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1518 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1519 }
1520 Log5(("hmR0SvmLoadSharedDebugState: Loaded guest DRx\n"));
1521 }
1522 /*
1523 * If no debugging enabled, we'll lazy load DR0-3. We don't need to
1524 * intercept #DB as DR6 is updated in the VMCB.
1525 */
1526#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1527 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
1528 && !CPUMIsGuestDebugStateActive(pVCpu))
1529#else
1530 else if (!CPUMIsGuestDebugStateActive(pVCpu))
1531#endif
1532 {
1533 fInterceptMovDRx = true;
1534 }
1535 }
1536
1537 /*
1538 * Set up the intercepts.
1539 */
1540 if (fInterceptDB)
1541 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_DB);
1542 else
1543 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_DB);
1544
1545 if (fInterceptMovDRx)
1546 {
1547 if ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
1548 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
1549 {
1550 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
1551 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
1552 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1553 }
1554 }
1555 else
1556 {
1557 if ( pVmcb->ctrl.u16InterceptRdDRx
1558 || pVmcb->ctrl.u16InterceptWrDRx)
1559 {
1560 pVmcb->ctrl.u16InterceptRdDRx = 0;
1561 pVmcb->ctrl.u16InterceptWrDRx = 0;
1562 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1563 }
1564 }
1565
1566 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
1567}
1568
1569
1570/**
1571 * Loads the guest APIC state (currently just the TPR).
1572 *
1573 * @returns VBox status code.
1574 * @param pVCpu Pointer to the VMCPU.
1575 * @param pVmcb Pointer to the VM control block.
1576 * @param pCtx Pointer to the guest-CPU context.
1577 */
1578static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1579{
1580 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE))
1581 return VINF_SUCCESS;
1582
1583 bool fPendingIntr;
1584 uint8_t u8Tpr;
1585 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
1586 AssertRCReturn(rc, rc);
1587
1588 /* Assume that we need to trap all TPR accesses and thus need not check on
1589 every #VMEXIT if we should update the TPR. */
1590 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking);
1591 pVCpu->hm.s.svm.fSyncVTpr = false;
1592
1593 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
1594 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
1595 {
1596 pCtx->msrLSTAR = u8Tpr;
1597
1598 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
1599 if (fPendingIntr)
1600 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
1601 else
1602 {
1603 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1604 pVCpu->hm.s.svm.fSyncVTpr = true;
1605 }
1606 }
1607 else
1608 {
1609 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
1610 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
1611
1612 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
1613 if (fPendingIntr)
1614 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
1615 else
1616 {
1617 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
1618 pVCpu->hm.s.svm.fSyncVTpr = true;
1619 }
1620
1621 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
1622 }
1623
1624 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
1625 return rc;
1626}
1627
1628
1629/**
1630 * Sets up the appropriate function to run guest code.
1631 *
1632 * @returns VBox status code.
1633 * @param pVCpu Pointer to the VMCPU.
1634 * @param pCtx Pointer to the guest-CPU context.
1635 *
1636 * @remarks No-long-jump zone!!!
1637 */
1638static int hmR0SvmSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pCtx)
1639{
1640 if (CPUMIsGuestInLongModeEx(pCtx))
1641 {
1642#ifndef VBOX_ENABLE_64_BITS_GUESTS
1643 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1644#endif
1645 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
1646#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1647 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
1648 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
1649#else
1650 /* 64-bit host or hybrid host. */
1651 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64;
1652#endif
1653 }
1654 else
1655 {
1656 /* Guest is not in long mode, use the 32-bit handler. */
1657 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
1658 }
1659 return VINF_SUCCESS;
1660}
1661
1662
1663/**
1664 * Enters the AMD-V session.
1665 *
1666 * @returns VBox status code.
1667 * @param pVM Pointer to the VM.
1668 * @param pVCpu Pointer to the VMCPU.
1669 * @param pCpu Pointer to the CPU info struct.
1670 */
1671VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1672{
1673 AssertPtr(pVM);
1674 AssertPtr(pVCpu);
1675 Assert(pVM->hm.s.svm.fSupported);
1676 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1677 NOREF(pVM); NOREF(pCpu);
1678
1679 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
1680 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
1681
1682 pVCpu->hm.s.fLeaveDone = false;
1683 return VINF_SUCCESS;
1684}
1685
1686
1687/**
1688 * Thread-context callback for AMD-V.
1689 *
1690 * @param enmEvent The thread-context event.
1691 * @param pVCpu Pointer to the VMCPU.
1692 * @param fGlobalInit Whether global VT-x/AMD-V init. is used.
1693 * @thread EMT(pVCpu)
1694 */
1695VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
1696{
1697 NOREF(fGlobalInit);
1698
1699 switch (enmEvent)
1700 {
1701 case RTTHREADCTXEVENT_PREEMPTING:
1702 {
1703 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1704 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
1705 VMCPU_ASSERT_EMT(pVCpu);
1706
1707 PVM pVM = pVCpu->CTX_SUFF(pVM);
1708 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1709
1710 /* No longjmps (log-flush, locks) in this fragile context. */
1711 VMMRZCallRing3Disable(pVCpu);
1712
1713 if (!pVCpu->hm.s.fLeaveDone)
1714 {
1715 hmR0SvmLeave(pVM, pVCpu, pCtx);
1716 pVCpu->hm.s.fLeaveDone = true;
1717 }
1718
1719 /* Leave HM context, takes care of local init (term). */
1720 int rc = HMR0LeaveCpu(pVCpu);
1721 AssertRC(rc); NOREF(rc);
1722
1723 /* Restore longjmp state. */
1724 VMMRZCallRing3Enable(pVCpu);
1725 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptPreempting);
1726 break;
1727 }
1728
1729 case RTTHREADCTXEVENT_RESUMED:
1730 {
1731 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1732 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
1733 VMCPU_ASSERT_EMT(pVCpu);
1734
1735 /* No longjmps (log-flush, locks) in this fragile context. */
1736 VMMRZCallRing3Disable(pVCpu);
1737
1738 /*
1739 * Initialize the bare minimum state required for HM. This takes care of
1740 * initializing AMD-V if necessary (onlined CPUs, local init etc.)
1741 */
1742 int rc = HMR0EnterCpu(pVCpu);
1743 AssertRC(rc); NOREF(rc);
1744 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
1745
1746 pVCpu->hm.s.fLeaveDone = false;
1747
1748 /* Restore longjmp state. */
1749 VMMRZCallRing3Enable(pVCpu);
1750 break;
1751 }
1752
1753 default:
1754 break;
1755 }
1756}
1757
1758
1759/**
1760 * Saves the host state.
1761 *
1762 * @returns VBox status code.
1763 * @param pVM Pointer to the VM.
1764 * @param pVCpu Pointer to the VMCPU.
1765 *
1766 * @remarks No-long-jump zone!!!
1767 */
1768VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
1769{
1770 NOREF(pVM);
1771 NOREF(pVCpu);
1772 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
1773 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
1774 return VINF_SUCCESS;
1775}
1776
1777
1778/**
1779 * Loads the guest state into the VMCB. The CPU state will be loaded from these
1780 * fields on every successful VM-entry.
1781 *
1782 * Also sets up the appropriate VMRUN function to execute guest code based on
1783 * the guest CPU mode.
1784 *
1785 * @returns VBox status code.
1786 * @param pVM Pointer to the VM.
1787 * @param pVCpu Pointer to the VMCPU.
1788 * @param pCtx Pointer to the guest-CPU context.
1789 *
1790 * @remarks No-long-jump zone!!!
1791 */
1792static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1793{
1794 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1795 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
1796
1797 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
1798
1799 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcb, pCtx);
1800 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1801
1802 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcb, pCtx);
1803 hmR0SvmLoadGuestMsrs(pVCpu, pVmcb, pCtx);
1804
1805 pVmcb->guest.u64RIP = pCtx->rip;
1806 pVmcb->guest.u64RSP = pCtx->rsp;
1807 pVmcb->guest.u64RFlags = pCtx->eflags.u32;
1808 pVmcb->guest.u64RAX = pCtx->rax;
1809
1810 rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx);
1811 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1812
1813 rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx);
1814 AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1815
1816 /* Clear any unused and reserved bits. */
1817 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */
1818 | HM_CHANGED_GUEST_RSP
1819 | HM_CHANGED_GUEST_RFLAGS
1820 | HM_CHANGED_GUEST_SYSENTER_CS_MSR
1821 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
1822 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
1823 | HM_CHANGED_GUEST_LAZY_MSRS /* Unused. */
1824 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */
1825 | HM_CHANGED_SVM_RESERVED2
1826 | HM_CHANGED_SVM_RESERVED3
1827 | HM_CHANGED_SVM_RESERVED4);
1828
1829 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
1830 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
1831 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
1832 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
1833
1834 Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss, pCtx->rsp));
1835 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
1836 return rc;
1837}
1838
1839
1840/**
1841 * Loads the state shared between the host and guest into the
1842 * VMCB.
1843 *
1844 * @param pVCpu Pointer to the VMCPU.
1845 * @param pVmcb Pointer to the VM control block.
1846 * @param pCtx Pointer to the guest-CPU context.
1847 *
1848 * @remarks No-long-jump zone!!!
1849 */
1850static void hmR0SvmLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1851{
1852 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1853 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1854
1855 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
1856 hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx);
1857
1858 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
1859 hmR0SvmLoadSharedDebugState(pVCpu, pVmcb, pCtx);
1860
1861 /* Unused on AMD-V. */
1862 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
1863
1864 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
1865 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
1866}
1867
1868
1869/**
1870 * Saves the entire guest state from the VMCB into the
1871 * guest-CPU context. Currently there is no residual state left in the CPU that
1872 * is not updated in the VMCB.
1873 *
1874 * @returns VBox status code.
1875 * @param pVCpu Pointer to the VMCPU.
1876 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1877 * out-of-sync. Make sure to update the required fields
1878 * before using them.
1879 */
1880static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1881{
1882 Assert(VMMRZCallRing3IsEnabled(pVCpu));
1883
1884 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1885
1886 pMixedCtx->rip = pVmcb->guest.u64RIP;
1887 pMixedCtx->rsp = pVmcb->guest.u64RSP;
1888 pMixedCtx->eflags.u32 = pVmcb->guest.u64RFlags;
1889 pMixedCtx->rax = pVmcb->guest.u64RAX;
1890
1891 /*
1892 * Guest interrupt shadow.
1893 */
1894 if (pVmcb->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
1895 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
1896 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1897 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1898
1899 /*
1900 * Guest Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
1901 */
1902 pMixedCtx->cr2 = pVmcb->guest.u64CR2;
1903
1904 /*
1905 * Guest MSRs.
1906 */
1907 pMixedCtx->msrSTAR = pVmcb->guest.u64STAR; /* legacy syscall eip, cs & ss */
1908 pMixedCtx->msrLSTAR = pVmcb->guest.u64LSTAR; /* 64-bit mode syscall rip */
1909 pMixedCtx->msrCSTAR = pVmcb->guest.u64CSTAR; /* compatibility mode syscall rip */
1910 pMixedCtx->msrSFMASK = pVmcb->guest.u64SFMASK; /* syscall flag mask */
1911 pMixedCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; /* swapgs exchange value */
1912 pMixedCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS;
1913 pMixedCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP;
1914 pMixedCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP;
1915
1916 /*
1917 * Guest segment registers (includes FS, GS base MSRs for 64-bit guests).
1918 */
1919 HMSVM_SAVE_SEG_REG(CS, cs);
1920 HMSVM_SAVE_SEG_REG(SS, ss);
1921 HMSVM_SAVE_SEG_REG(DS, ds);
1922 HMSVM_SAVE_SEG_REG(ES, es);
1923 HMSVM_SAVE_SEG_REG(FS, fs);
1924 HMSVM_SAVE_SEG_REG(GS, gs);
1925
1926 /*
1927 * Correct the hidden CS granularity bit. Haven't seen it being wrong in any other
1928 * register (yet).
1929 */
1930 /** @todo SELM might need to be fixed as it too should not care about the
1931 * granularity bit. See @bugref{6785}. */
1932 if ( !pMixedCtx->cs.Attr.n.u1Granularity
1933 && pMixedCtx->cs.Attr.n.u1Present
1934 && pMixedCtx->cs.u32Limit > UINT32_C(0xfffff))
1935 {
1936 Assert((pMixedCtx->cs.u32Limit & 0xfff) == 0xfff);
1937 pMixedCtx->cs.Attr.n.u1Granularity = 1;
1938 }
1939
1940#ifdef VBOX_STRICT
1941# define HMSVM_ASSERT_SEG_GRANULARITY(reg) \
1942 AssertMsg( !pMixedCtx->reg.Attr.n.u1Present \
1943 || ( pMixedCtx->reg.Attr.n.u1Granularity \
1944 ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \
1945 : pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \
1946 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", pMixedCtx->reg.u32Limit, \
1947 pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base))
1948
1949 HMSVM_ASSERT_SEG_GRANULARITY(cs);
1950 HMSVM_ASSERT_SEG_GRANULARITY(ss);
1951 HMSVM_ASSERT_SEG_GRANULARITY(ds);
1952 HMSVM_ASSERT_SEG_GRANULARITY(es);
1953 HMSVM_ASSERT_SEG_GRANULARITY(fs);
1954 HMSVM_ASSERT_SEG_GRANULARITY(gs);
1955
1956# undef HMSVM_ASSERT_SEL_GRANULARITY
1957#endif
1958
1959 /*
1960 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the VMCB and uses that
1961 * and thus it's possible that when the CPL changes during guest execution that the SS DPL
1962 * isn't updated by AMD-V. Observed on some AMD Fusion CPUs with 64-bit guests.
1963 * See AMD spec. 15.5.1 "Basic operation".
1964 */
1965 Assert(!(pVmcb->guest.u8CPL & ~0x3));
1966 pMixedCtx->ss.Attr.n.u2Dpl = pVmcb->guest.u8CPL & 0x3;
1967
1968 /*
1969 * Guest TR.
1970 * Fixup TR attributes so it's compatible with Intel. Important when saved-states are used
1971 * between Intel and AMD. See @bugref{6208} comment #39.
1972 */
1973 HMSVM_SAVE_SEG_REG(TR, tr);
1974 if (CPUMIsGuestInLongModeEx(pMixedCtx))
1975 pMixedCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1976
1977 /*
1978 * Guest Descriptor-Table registers.
1979 */
1980 HMSVM_SAVE_SEG_REG(LDTR, ldtr);
1981 pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit;
1982 pMixedCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base;
1983
1984 pMixedCtx->idtr.cbIdt = pVmcb->guest.IDTR.u32Limit;
1985 pMixedCtx->idtr.pIdt = pVmcb->guest.IDTR.u64Base;
1986
1987 /*
1988 * Guest Debug registers.
1989 */
1990 if (!pVCpu->hm.s.fUsingHyperDR7)
1991 {
1992 pMixedCtx->dr[6] = pVmcb->guest.u64DR6;
1993 pMixedCtx->dr[7] = pVmcb->guest.u64DR7;
1994 }
1995 else
1996 {
1997 Assert(pVmcb->guest.u64DR7 == CPUMGetHyperDR7(pVCpu));
1998 CPUMSetHyperDR6(pVCpu, pVmcb->guest.u64DR6);
1999 }
2000
2001 /*
2002 * With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now.
2003 * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
2004 */
2005 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
2006 && pMixedCtx->cr3 != pVmcb->guest.u64CR3)
2007 {
2008 CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3);
2009 PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3);
2010 }
2011}
2012
2013
2014/**
2015 * Does the necessary state syncing before returning to ring-3 for any reason
2016 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
2017 *
2018 * @param pVM Pointer to the VM.
2019 * @param pVCpu Pointer to the VMCPU.
2020 * @param pMixedCtx Pointer to the guest-CPU context.
2021 *
2022 * @remarks No-long-jmp zone!!!
2023 */
2024static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2025{
2026 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2027 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2028 Assert(VMMR0IsLogFlushDisabled(pVCpu));
2029
2030 /*
2031 * !!! IMPORTANT !!!
2032 * If you modify code here, make sure to check whether hmR0SvmCallRing3Callback() needs to be updated too.
2033 */
2034
2035 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
2036 if (CPUMIsGuestFPUStateActive(pVCpu))
2037 {
2038 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
2039 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
2040 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
2041 }
2042
2043 /*
2044 * Restore host debug registers if necessary and resync on next R0 reentry.
2045 */
2046#ifdef VBOX_STRICT
2047 if (CPUMIsHyperDebugStateActive(pVCpu))
2048 {
2049 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2050 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
2051 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
2052 }
2053#endif
2054 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
2055 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
2056
2057 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
2058 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
2059
2060 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
2061 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
2062 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
2063 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
2064 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
2065
2066 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
2067}
2068
2069
2070/**
2071 * Leaves the AMD-V session.
2072 *
2073 * @returns VBox status code.
2074 * @param pVM Pointer to the VM.
2075 * @param pVCpu Pointer to the VMCPU.
2076 * @param pCtx Pointer to the guest-CPU context.
2077 */
2078static int hmR0SvmLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2079{
2080 HM_DISABLE_PREEMPT_IF_NEEDED();
2081 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2082 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2083
2084 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
2085 and done this from the SVMR0ThreadCtxCallback(). */
2086 if (!pVCpu->hm.s.fLeaveDone)
2087 {
2088 hmR0SvmLeave(pVM, pVCpu, pCtx);
2089 pVCpu->hm.s.fLeaveDone = true;
2090 }
2091
2092 /*
2093 * !!! IMPORTANT !!!
2094 * If you modify code here, make sure to check whether hmR0SvmCallRing3Callback() needs to be updated too.
2095 */
2096
2097 /* Deregister hook now that we've left HM context before re-enabling preemption. */
2098 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
2099 VMMR0ThreadCtxHooksDeregister(pVCpu);
2100
2101 /* Leave HM context. This takes care of local init (term). */
2102 int rc = HMR0LeaveCpu(pVCpu);
2103
2104 HM_RESTORE_PREEMPT_IF_NEEDED();
2105 return rc;
2106}
2107
2108
2109/**
2110 * Does the necessary state syncing before doing a longjmp to ring-3.
2111 *
2112 * @returns VBox status code.
2113 * @param pVM Pointer to the VM.
2114 * @param pVCpu Pointer to the VMCPU.
2115 * @param pCtx Pointer to the guest-CPU context.
2116 *
2117 * @remarks No-long-jmp zone!!!
2118 */
2119static int hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2120{
2121 return hmR0SvmLeaveSession(pVM, pVCpu, pCtx);
2122}
2123
2124
2125/**
2126 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
2127 * any remaining host state) before we longjump to ring-3 and possibly get
2128 * preempted.
2129 *
2130 * @param pVCpu Pointer to the VMCPU.
2131 * @param enmOperation The operation causing the ring-3 longjump.
2132 * @param pvUser The user argument (pointer to the possibly
2133 * out-of-date guest-CPU context).
2134 */
2135DECLCALLBACK(int) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
2136{
2137 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
2138 {
2139 /*
2140 * !!! IMPORTANT !!!
2141 * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
2142 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
2143 */
2144 VMMRZCallRing3RemoveNotification(pVCpu);
2145 VMMRZCallRing3Disable(pVCpu);
2146 HM_DISABLE_PREEMPT_IF_NEEDED();
2147
2148 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
2149 if (CPUMIsGuestFPUStateActive(pVCpu))
2150 CPUMR0SaveGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
2151
2152 /* Restore host debug registers if necessary and resync on next R0 reentry. */
2153 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
2154
2155 /* Deregister hook now that we've left HM context before re-enabling preemption. */
2156 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
2157 VMMR0ThreadCtxHooksDeregister(pVCpu);
2158
2159 /* Leave HM context. This takes care of local init (term). */
2160 HMR0LeaveCpu(pVCpu);
2161
2162 HM_RESTORE_PREEMPT_IF_NEEDED();
2163 return VINF_SUCCESS;
2164 }
2165
2166 Assert(pVCpu);
2167 Assert(pvUser);
2168 Assert(VMMRZCallRing3IsEnabled(pVCpu));
2169 HMSVM_ASSERT_PREEMPT_SAFE();
2170
2171 VMMRZCallRing3Disable(pVCpu);
2172 Assert(VMMR0IsLogFlushDisabled(pVCpu));
2173
2174 Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));
2175 int rc = hmR0SvmLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
2176 AssertRCReturn(rc, rc);
2177
2178 VMMRZCallRing3Enable(pVCpu);
2179 return VINF_SUCCESS;
2180}
2181
2182
2183/**
2184 * Take necessary actions before going back to ring-3.
2185 *
2186 * An action requires us to go back to ring-3. This function does the necessary
2187 * steps before we can safely return to ring-3. This is not the same as longjmps
2188 * to ring-3, this is voluntary.
2189 *
2190 * @param pVM Pointer to the VM.
2191 * @param pVCpu Pointer to the VMCPU.
2192 * @param pCtx Pointer to the guest-CPU context.
2193 * @param rcExit The reason for exiting to ring-3. Can be
2194 * VINF_VMM_UNKNOWN_RING3_CALL.
2195 */
2196static void hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)
2197{
2198 Assert(pVM);
2199 Assert(pVCpu);
2200 Assert(pCtx);
2201 HMSVM_ASSERT_PREEMPT_SAFE();
2202
2203 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
2204 VMMRZCallRing3Disable(pVCpu);
2205 Log4(("hmR0SvmExitToRing3: rcExit=%d\n", rcExit));
2206
2207 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
2208 if (pVCpu->hm.s.Event.fPending)
2209 {
2210 hmR0SvmPendingEventToTrpmTrap(pVCpu);
2211 Assert(!pVCpu->hm.s.Event.fPending);
2212 }
2213
2214 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
2215 and if we're injecting an event we should have a TRPM trap pending. */
2216 Assert(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu));
2217 Assert(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu));
2218
2219 /* Sync. the necessary state for going back to ring-3. */
2220 hmR0SvmLeaveSession(pVM, pVCpu, pCtx);
2221 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
2222
2223 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
2224 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
2225 | CPUM_CHANGED_LDTR
2226 | CPUM_CHANGED_GDTR
2227 | CPUM_CHANGED_IDTR
2228 | CPUM_CHANGED_TR
2229 | CPUM_CHANGED_HIDDEN_SEL_REGS);
2230 if ( pVM->hm.s.fNestedPaging
2231 && CPUMIsGuestPagingEnabledEx(pCtx))
2232 {
2233 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
2234 }
2235
2236 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
2237 if (rcExit != VINF_EM_RAW_INTERRUPT)
2238 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
2239
2240 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
2241
2242 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
2243 VMMRZCallRing3RemoveNotification(pVCpu);
2244 VMMRZCallRing3Enable(pVCpu);
2245}
2246
2247
2248/**
2249 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
2250 * intercepts.
2251 *
2252 * @param pVCpu Pointer to the VMCPU.
2253 *
2254 * @remarks No-long-jump zone!!!
2255 */
2256static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu)
2257{
2258 bool fParavirtTsc = false;
2259 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2260 if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc))
2261 {
2262 uint64_t u64CurTSC = ASMReadTSC();
2263 uint64_t u64LastTick = TMCpuTickGetLastSeen(pVCpu);
2264 if (fParavirtTsc)
2265 {
2266#if 0
2267 if (u64CurTSC + pVmcb->ctrl.u64TSCOffset > u64LastTick)
2268 {
2269 pVmcb->ctrl.u64TSCOffset = u64LastTick - u64CurTSC;
2270 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffsetAdjusted);
2271 }
2272 int rc = GIMR0UpdateParavirtTsc(pVCpu->CTX_SUFF(pVM), pVmcb->ctrl.u64TSCOffset);
2273 AssertRC(rc);
2274#endif
2275 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
2276 }
2277
2278 if (u64CurTSC + pVmcb->ctrl.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
2279 {
2280 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
2281 pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
2282 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
2283 }
2284 else
2285 {
2286 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
2287 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
2288 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
2289 }
2290 }
2291 else
2292 {
2293 Assert(!fParavirtTsc);
2294 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
2295 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
2296 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
2297 }
2298
2299 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2300}
2301
2302
2303/**
2304 * Sets an event as a pending event to be injected into the guest.
2305 *
2306 * @param pVCpu Pointer to the VMCPU.
2307 * @param pEvent Pointer to the SVM event.
2308 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
2309 * page-fault.
2310 *
2311 * @remarks Statistics counter assumes this is a guest event being reflected to
2312 * the guest i.e. 'StatInjectPendingReflect' is incremented always.
2313 */
2314DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
2315{
2316 Assert(!pVCpu->hm.s.Event.fPending);
2317 Assert(pEvent->n.u1Valid);
2318
2319 pVCpu->hm.s.Event.u64IntInfo = pEvent->u;
2320 pVCpu->hm.s.Event.fPending = true;
2321 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
2322
2323 Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,
2324 pEvent->n.u8Vector, (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
2325
2326 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
2327}
2328
2329
2330/**
2331 * Injects an event into the guest upon VMRUN by updating the relevant field
2332 * in the VMCB.
2333 *
2334 * @param pVCpu Pointer to the VMCPU.
2335 * @param pVmcb Pointer to the guest VM control block.
2336 * @param pCtx Pointer to the guest-CPU context.
2337 * @param pEvent Pointer to the event.
2338 *
2339 * @remarks No-long-jump zone!!!
2340 * @remarks Requires CR0!
2341 */
2342DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent)
2343{
2344 NOREF(pVCpu); NOREF(pCtx);
2345
2346 pVmcb->ctrl.EventInject.u = pEvent->u;
2347 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
2348
2349 Log4(("hmR0SvmInjectEventVmcb: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,
2350 pEvent->n.u8Vector, (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
2351}
2352
2353
2354
2355/**
2356 * Converts any TRPM trap into a pending HM event. This is typically used when
2357 * entering from ring-3 (not longjmp returns).
2358 *
2359 * @param pVCpu Pointer to the VMCPU.
2360 */
2361static void hmR0SvmTrpmTrapToPendingEvent(PVMCPU pVCpu)
2362{
2363 Assert(TRPMHasTrap(pVCpu));
2364 Assert(!pVCpu->hm.s.Event.fPending);
2365
2366 uint8_t uVector;
2367 TRPMEVENT enmTrpmEvent;
2368 RTGCUINT uErrCode;
2369 RTGCUINTPTR GCPtrFaultAddress;
2370 uint8_t cbInstr;
2371
2372 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
2373 AssertRC(rc);
2374
2375 SVMEVENT Event;
2376 Event.u = 0;
2377 Event.n.u1Valid = 1;
2378 Event.n.u8Vector = uVector;
2379
2380 /* Refer AMD spec. 15.20 "Event Injection" for the format. */
2381 if (enmTrpmEvent == TRPM_TRAP)
2382 {
2383 Event.n.u3Type = SVM_EVENT_EXCEPTION;
2384 switch (uVector)
2385 {
2386 case X86_XCPT_NMI:
2387 {
2388 Event.n.u3Type = SVM_EVENT_NMI;
2389 break;
2390 }
2391
2392 case X86_XCPT_PF:
2393 case X86_XCPT_DF:
2394 case X86_XCPT_TS:
2395 case X86_XCPT_NP:
2396 case X86_XCPT_SS:
2397 case X86_XCPT_GP:
2398 case X86_XCPT_AC:
2399 {
2400 Event.n.u1ErrorCodeValid = 1;
2401 Event.n.u32ErrorCode = uErrCode;
2402 break;
2403 }
2404 }
2405 }
2406 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
2407 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
2408 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
2409 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
2410 else
2411 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
2412
2413 rc = TRPMResetTrap(pVCpu);
2414 AssertRC(rc);
2415
2416 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
2417 !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode));
2418
2419 hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress);
2420 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
2421}
2422
2423
2424/**
2425 * Converts any pending SVM event into a TRPM trap. Typically used when leaving
2426 * AMD-V to execute any instruction.
2427 *
2428 * @param pvCpu Pointer to the VMCPU.
2429 */
2430static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu)
2431{
2432 Assert(pVCpu->hm.s.Event.fPending);
2433 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
2434
2435 SVMEVENT Event;
2436 Event.u = pVCpu->hm.s.Event.u64IntInfo;
2437
2438 uint8_t uVector = Event.n.u8Vector;
2439 uint8_t uVectorType = Event.n.u3Type;
2440
2441 TRPMEVENT enmTrapType;
2442 switch (uVectorType)
2443 {
2444 case SVM_EVENT_EXTERNAL_IRQ:
2445 enmTrapType = TRPM_HARDWARE_INT;
2446 break;
2447 case SVM_EVENT_SOFTWARE_INT:
2448 enmTrapType = TRPM_SOFTWARE_INT;
2449 break;
2450 case SVM_EVENT_EXCEPTION:
2451 case SVM_EVENT_NMI:
2452 enmTrapType = TRPM_TRAP;
2453 break;
2454 default:
2455 AssertMsgFailed(("Invalid pending-event type %#x\n", uVectorType));
2456 enmTrapType = TRPM_32BIT_HACK;
2457 break;
2458 }
2459
2460 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, uVectorType));
2461
2462 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
2463 AssertRC(rc);
2464
2465 if (Event.n.u1ErrorCodeValid)
2466 TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode);
2467
2468 if ( uVectorType == SVM_EVENT_EXCEPTION
2469 && uVector == X86_XCPT_PF)
2470 {
2471 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
2472 Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
2473 }
2474 else if (uVectorType == SVM_EVENT_SOFTWARE_INT)
2475 {
2476 AssertMsg( uVectorType == SVM_EVENT_SOFTWARE_INT
2477 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
2478 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
2479 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
2480 }
2481 pVCpu->hm.s.Event.fPending = false;
2482}
2483
2484
2485/**
2486 * Gets the guest's interrupt-shadow.
2487 *
2488 * @returns The guest's interrupt-shadow.
2489 * @param pVCpu Pointer to the VMCPU.
2490 * @param pCtx Pointer to the guest-CPU context.
2491 *
2492 * @remarks No-long-jump zone!!!
2493 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2494 */
2495DECLINLINE(uint32_t) hmR0SvmGetGuestIntrShadow(PVMCPU pVCpu, PCPUMCTX pCtx)
2496{
2497 /*
2498 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2499 * inhibit interrupts or clear any existing interrupt-inhibition.
2500 */
2501 uint32_t uIntrState = 0;
2502 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2503 {
2504 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2505 {
2506 /*
2507 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2508 * AMD-V, the flag's condition to be cleared is met and thus the cleared state is correct.
2509 */
2510 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2511 }
2512 else
2513 uIntrState = SVM_INTERRUPT_SHADOW_ACTIVE;
2514 }
2515 return uIntrState;
2516}
2517
2518
2519/**
2520 * Sets the virtual interrupt intercept control in the VMCB which
2521 * instructs AMD-V to cause a #VMEXIT as soon as the guest is in a state to
2522 * receive interrupts.
2523 *
2524 * @param pVmcb Pointer to the VM control block.
2525 */
2526DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
2527{
2528 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_VINTR))
2529 {
2530 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 1; /* A virtual interrupt is pending. */
2531 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* Not necessary as we #VMEXIT for delivering the interrupt. */
2532 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
2533 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
2534
2535 Log4(("Setting VINTR intercept\n"));
2536 }
2537}
2538
2539
2540/**
2541 * Sets the IRET intercept control in the VMCB which instructs AMD-V to cause a
2542 * #VMEXIT as soon as a guest starts executing an IRET. This is used to unblock
2543 * virtual NMIs.
2544 *
2545 * @param pVmcb Pointer to the VM control block.
2546 */
2547DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb)
2548{
2549 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET))
2550 {
2551 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_IRET;
2552 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
2553
2554 Log4(("Setting IRET intercept\n"));
2555 }
2556}
2557
2558
2559/**
2560 * Clears the IRET intercept control in the VMCB.
2561 *
2562 * @param pVmcb Pointer to the VM control block.
2563 */
2564DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb)
2565{
2566 if (pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET)
2567 {
2568 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_IRET;
2569 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
2570
2571 Log4(("Clearing IRET intercept\n"));
2572 }
2573}
2574
2575
2576/**
2577 * Evaluates the event to be delivered to the guest and sets it as the pending
2578 * event.
2579 *
2580 * @param pVCpu Pointer to the VMCPU.
2581 * @param pCtx Pointer to the guest-CPU context.
2582 */
2583static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
2584{
2585 Assert(!pVCpu->hm.s.Event.fPending);
2586 Log4Func(("\n"));
2587
2588 bool const fIntShadow = RT_BOOL(hmR0SvmGetGuestIntrShadow(pVCpu, pCtx));
2589 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
2590 bool const fBlockNmi = RT_BOOL(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
2591 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2592
2593 SVMEVENT Event;
2594 Event.u = 0;
2595 /** @todo SMI. SMIs take priority over NMIs. */
2596 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
2597 {
2598 if (fBlockNmi)
2599 hmR0SvmSetIretIntercept(pVmcb);
2600 else if (fIntShadow)
2601 hmR0SvmSetVirtIntrIntercept(pVmcb);
2602 else
2603 {
2604 Log4(("Pending NMI\n"));
2605
2606 Event.n.u1Valid = 1;
2607 Event.n.u8Vector = X86_XCPT_NMI;
2608 Event.n.u3Type = SVM_EVENT_NMI;
2609
2610 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
2611 hmR0SvmSetIretIntercept(pVmcb);
2612 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
2613 }
2614 }
2615 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
2616 {
2617 /*
2618 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
2619 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt which is why it is
2620 * evaluated here and not set as pending, solely based on the force-flags.
2621 */
2622 if ( !fBlockInt
2623 && !fIntShadow)
2624 {
2625 uint8_t u8Interrupt;
2626 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
2627 if (RT_SUCCESS(rc))
2628 {
2629 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
2630
2631 Event.n.u1Valid = 1;
2632 Event.n.u8Vector = u8Interrupt;
2633 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
2634
2635 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
2636 }
2637 else
2638 {
2639 /** @todo Does this actually happen? If not turn it into an assertion. */
2640 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
2641 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
2642 }
2643 }
2644 else
2645 hmR0SvmSetVirtIntrIntercept(pVmcb);
2646 }
2647}
2648
2649
2650/**
2651 * Injects any pending events into the guest if the guest is in a state to
2652 * receive them.
2653 *
2654 * @param pVCpu Pointer to the VMCPU.
2655 * @param pCtx Pointer to the guest-CPU context.
2656 */
2657static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
2658{
2659 Assert(!TRPMHasTrap(pVCpu));
2660 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2661 Log4Func(("\n"));
2662
2663 bool const fIntShadow = RT_BOOL(hmR0SvmGetGuestIntrShadow(pVCpu, pCtx));
2664 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
2665 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2666
2667 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
2668 {
2669 SVMEVENT Event;
2670 Event.u = pVCpu->hm.s.Event.u64IntInfo;
2671 Assert(Event.n.u1Valid);
2672#ifdef VBOX_STRICT
2673 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
2674 {
2675 Assert(!fBlockInt);
2676 Assert(!fIntShadow);
2677 }
2678 else if (Event.n.u3Type == SVM_EVENT_NMI)
2679 Assert(!fIntShadow);
2680#endif
2681
2682 Log4(("Injecting pending HM event.\n"));
2683 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
2684 pVCpu->hm.s.Event.fPending = false;
2685
2686#ifdef VBOX_WITH_STATISTICS
2687 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
2688 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
2689 else
2690 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
2691#endif
2692 }
2693
2694 /* Update the guest interrupt shadow in the VMCB. */
2695 pVmcb->ctrl.u64IntShadow = !!fIntShadow;
2696 NOREF(fBlockInt);
2697}
2698
2699
2700/**
2701 * Reports world-switch error and dumps some useful debug info.
2702 *
2703 * @param pVM Pointer to the VM.
2704 * @param pVCpu Pointer to the VMCPU.
2705 * @param rcVMRun The return code from VMRUN (or
2706 * VERR_SVM_INVALID_GUEST_STATE for invalid
2707 * guest-state).
2708 * @param pCtx Pointer to the guest-CPU context.
2709 */
2710static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx)
2711{
2712 NOREF(pCtx);
2713 HMSVM_ASSERT_PREEMPT_SAFE();
2714 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2715
2716 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
2717 {
2718 HMDumpRegs(pVM, pVCpu, pCtx); NOREF(pVM);
2719#ifdef VBOX_STRICT
2720 Log4(("ctrl.u64VmcbCleanBits %#RX64\n", pVmcb->ctrl.u64VmcbCleanBits));
2721 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx));
2722 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx));
2723 Log4(("ctrl.u16InterceptRdDRx %#x\n", pVmcb->ctrl.u16InterceptRdDRx));
2724 Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx));
2725 Log4(("ctrl.u32InterceptException %#x\n", pVmcb->ctrl.u32InterceptException));
2726 Log4(("ctrl.u32InterceptCtrl1 %#x\n", pVmcb->ctrl.u32InterceptCtrl1));
2727 Log4(("ctrl.u32InterceptCtrl2 %#x\n", pVmcb->ctrl.u32InterceptCtrl2));
2728 Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr));
2729 Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr));
2730 Log4(("ctrl.u64TSCOffset %#RX64\n", pVmcb->ctrl.u64TSCOffset));
2731
2732 Log4(("ctrl.TLBCtrl.u32ASID %#x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID));
2733 Log4(("ctrl.TLBCtrl.u8TLBFlush %#x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush));
2734 Log4(("ctrl.TLBCtrl.u24Reserved %#x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved));
2735
2736 Log4(("ctrl.IntCtrl.u8VTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR));
2737 Log4(("ctrl.IntCtrl.u1VIrqValid %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqValid));
2738 Log4(("ctrl.IntCtrl.u7Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u7Reserved));
2739 Log4(("ctrl.IntCtrl.u4VIrqPriority %#x\n", pVmcb->ctrl.IntCtrl.n.u4VIrqPriority));
2740 Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR));
2741 Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved));
2742 Log4(("ctrl.IntCtrl.u1VIrqMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqMasking));
2743 Log4(("ctrl.IntCtrl.u6Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved));
2744 Log4(("ctrl.IntCtrl.u8VIrqVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIrqVector));
2745 Log4(("ctrl.IntCtrl.u24Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved));
2746
2747 Log4(("ctrl.u64IntShadow %#RX64\n", pVmcb->ctrl.u64IntShadow));
2748 Log4(("ctrl.u64ExitCode %#RX64\n", pVmcb->ctrl.u64ExitCode));
2749 Log4(("ctrl.u64ExitInfo1 %#RX64\n", pVmcb->ctrl.u64ExitInfo1));
2750 Log4(("ctrl.u64ExitInfo2 %#RX64\n", pVmcb->ctrl.u64ExitInfo2));
2751 Log4(("ctrl.ExitIntInfo.u8Vector %#x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector));
2752 Log4(("ctrl.ExitIntInfo.u3Type %#x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type));
2753 Log4(("ctrl.ExitIntInfo.u1ErrorCodeValid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
2754 Log4(("ctrl.ExitIntInfo.u19Reserved %#x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved));
2755 Log4(("ctrl.ExitIntInfo.u1Valid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid));
2756 Log4(("ctrl.ExitIntInfo.u32ErrorCode %#x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
2757 Log4(("ctrl.NestedPaging %#RX64\n", pVmcb->ctrl.NestedPaging.u));
2758 Log4(("ctrl.EventInject.u8Vector %#x\n", pVmcb->ctrl.EventInject.n.u8Vector));
2759 Log4(("ctrl.EventInject.u3Type %#x\n", pVmcb->ctrl.EventInject.n.u3Type));
2760 Log4(("ctrl.EventInject.u1ErrorCodeValid %#x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid));
2761 Log4(("ctrl.EventInject.u19Reserved %#x\n", pVmcb->ctrl.EventInject.n.u19Reserved));
2762 Log4(("ctrl.EventInject.u1Valid %#x\n", pVmcb->ctrl.EventInject.n.u1Valid));
2763 Log4(("ctrl.EventInject.u32ErrorCode %#x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode));
2764
2765 Log4(("ctrl.u64NestedPagingCR3 %#RX64\n", pVmcb->ctrl.u64NestedPagingCR3));
2766 Log4(("ctrl.u64LBRVirt %#RX64\n", pVmcb->ctrl.u64LBRVirt));
2767
2768 Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel));
2769 Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr));
2770 Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit));
2771 Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base));
2772 Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel));
2773 Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr));
2774 Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit));
2775 Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base));
2776 Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel));
2777 Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr));
2778 Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit));
2779 Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base));
2780 Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel));
2781 Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr));
2782 Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit));
2783 Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base));
2784 Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel));
2785 Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr));
2786 Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit));
2787 Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base));
2788
2789 Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit));
2790 Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base));
2791
2792 Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel));
2793 Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr));
2794 Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit));
2795 Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base));
2796
2797 Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit));
2798 Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base));
2799
2800 Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel));
2801 Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr));
2802 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit));
2803 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base));
2804
2805 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL));
2806 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0));
2807 Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2));
2808 Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3));
2809 Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4));
2810 Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6));
2811 Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7));
2812
2813 Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP));
2814 Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP));
2815 Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX));
2816 Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags));
2817
2818 Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS));
2819 Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP));
2820 Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP));
2821
2822 Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER));
2823 Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR));
2824 Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR));
2825 Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR));
2826 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK));
2827 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase));
2828 Log4(("guest.u64GPAT %#RX64\n", pVmcb->guest.u64GPAT));
2829 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL));
2830 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM));
2831 Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO));
2832 Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM));
2833 Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO));
2834#else
2835 NOREF(pVmcb);
2836#endif /* VBOX_STRICT */
2837 }
2838 else
2839 Log4(("hmR0SvmReportWorldSwitchError: rcVMRun=%d\n", rcVMRun));
2840}
2841
2842
2843/**
2844 * Check per-VM and per-VCPU force flag actions that require us to go back to
2845 * ring-3 for one reason or another.
2846 *
2847 * @returns VBox status code (information status code included).
2848 * @retval VINF_SUCCESS if we don't have any actions that require going back to
2849 * ring-3.
2850 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
2851 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
2852 * interrupts)
2853 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
2854 * all EMTs to be in ring-3.
2855 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
2856 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
2857 * to the EM loop.
2858 *
2859 * @param pVM Pointer to the VM.
2860 * @param pVCpu Pointer to the VMCPU.
2861 * @param pCtx Pointer to the guest-CPU context.
2862 */
2863static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2864{
2865 Assert(VMMRZCallRing3IsEnabled(pVCpu));
2866
2867 /* On AMD-V we don't need to update CR3, PAE PDPES lazily. See hmR0SvmSaveGuestState(). */
2868 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
2869 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
2870
2871 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
2872 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2873 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
2874 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2875 {
2876 /* Pending PGM C3 sync. */
2877 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
2878 {
2879 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2880 if (rc != VINF_SUCCESS)
2881 {
2882 Log4(("hmR0SvmCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
2883 return rc;
2884 }
2885 }
2886
2887 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
2888 /* -XXX- what was that about single stepping? */
2889 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
2890 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2891 {
2892 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
2893 int rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
2894 Log4(("hmR0SvmCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
2895 return rc;
2896 }
2897
2898 /* Pending VM request packets, such as hardware interrupts. */
2899 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
2900 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2901 {
2902 Log4(("hmR0SvmCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
2903 return VINF_EM_PENDING_REQUEST;
2904 }
2905
2906 /* Pending PGM pool flushes. */
2907 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
2908 {
2909 Log4(("hmR0SvmCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
2910 return VINF_PGM_POOL_FLUSH_PENDING;
2911 }
2912
2913 /* Pending DMA requests. */
2914 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
2915 {
2916 Log4(("hmR0SvmCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
2917 return VINF_EM_RAW_TO_R3;
2918 }
2919 }
2920
2921 return VINF_SUCCESS;
2922}
2923
2924
2925/**
2926 * Does the preparations before executing guest code in AMD-V.
2927 *
2928 * This may cause longjmps to ring-3 and may even result in rescheduling to the
2929 * recompiler. We must be cautious what we do here regarding committing
2930 * guest-state information into the the VMCB assuming we assuredly execute the
2931 * guest in AMD-V. If we fall back to the recompiler after updating the VMCB and
2932 * clearing the common-state (TRPM/forceflags), we must undo those changes so
2933 * that the recompiler can (and should) use them when it resumes guest
2934 * execution. Otherwise such operations must be done when we can no longer
2935 * exit to ring-3.
2936 *
2937 * @returns VBox status code (informational status codes included).
2938 * @retval VINF_SUCCESS if we can proceed with running the guest.
2939 * @retval VINF_* scheduling changes, we have to go back to ring-3.
2940 *
2941 * @param pVM Pointer to the VM.
2942 * @param pVCpu Pointer to the VMCPU.
2943 * @param pCtx Pointer to the guest-CPU context.
2944 * @param pSvmTransient Pointer to the SVM transient structure.
2945 */
2946static int hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
2947{
2948 HMSVM_ASSERT_PREEMPT_SAFE();
2949
2950 /* Check force flag actions that might require us to go back to ring-3. */
2951 int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
2952 if (rc != VINF_SUCCESS)
2953 return rc;
2954
2955 if (TRPMHasTrap(pVCpu))
2956 hmR0SvmTrpmTrapToPendingEvent(pVCpu);
2957 else if (!pVCpu->hm.s.Event.fPending)
2958 hmR0SvmEvaluatePendingEvent(pVCpu, pCtx);
2959
2960#ifdef HMSVM_SYNC_FULL_GUEST_STATE
2961 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
2962#endif
2963
2964 /* Load the guest bits that are not shared with the host in any way since we can longjmp or get preempted. */
2965 rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx);
2966 AssertRCReturn(rc, rc);
2967 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
2968
2969 /*
2970 * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
2971 * so we can update it on the way back if the guest changed the TPR.
2972 */
2973 if (pVCpu->hm.s.svm.fSyncVTpr)
2974 {
2975 if (pVM->hm.s.fTPRPatchingActive)
2976 pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
2977 else
2978 {
2979 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2980 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
2981 }
2982 }
2983
2984 /*
2985 * No longjmps to ring-3 from this point on!!!
2986 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
2987 * This also disables flushing of the R0-logger instance (if any).
2988 */
2989 VMMRZCallRing3Disable(pVCpu);
2990
2991 /*
2992 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
2993 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
2994 *
2995 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
2996 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
2997 *
2998 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
2999 * executing guest code.
3000 */
3001 pSvmTransient->uEflags = ASMIntDisableFlags();
3002 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
3003 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3004 {
3005 ASMSetFlags(pSvmTransient->uEflags);
3006 VMMRZCallRing3Enable(pVCpu);
3007 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
3008 return VINF_EM_RAW_TO_R3;
3009 }
3010 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
3011 {
3012 ASMSetFlags(pSvmTransient->uEflags);
3013 VMMRZCallRing3Enable(pVCpu);
3014 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
3015 return VINF_EM_RAW_INTERRUPT;
3016 }
3017
3018 /*
3019 * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute
3020 * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
3021 * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.
3022 *
3023 * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the
3024 * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
3025 */
3026 if (pVCpu->hm.s.Event.fPending)
3027 {
3028 SVMEVENT Event;
3029 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3030 if ( Event.n.u1Valid
3031 && Event.n.u3Type == SVM_EVENT_NMI
3032 && Event.n.u8Vector == X86_XCPT_NMI
3033 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
3034 {
3035 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3036 }
3037 }
3038
3039 return VINF_SUCCESS;
3040}
3041
3042
3043/**
3044 * Prepares to run guest code in AMD-V and we've committed to doing so. This
3045 * means there is no backing out to ring-3 or anywhere else at this
3046 * point.
3047 *
3048 * @param pVM Pointer to the VM.
3049 * @param pVCpu Pointer to the VMCPU.
3050 * @param pCtx Pointer to the guest-CPU context.
3051 * @param pSvmTransient Pointer to the SVM transient structure.
3052 *
3053 * @remarks Called with preemption disabled.
3054 * @remarks No-long-jump zone!!!
3055 */
3056static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3057{
3058 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3059 Assert(VMMR0IsLogFlushDisabled(pVCpu));
3060 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3061
3062 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
3063 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
3064
3065 hmR0SvmInjectPendingEvent(pVCpu, pCtx);
3066
3067 if ( pVCpu->hm.s.fUseGuestFpu
3068 && !CPUMIsGuestFPUStateActive(pVCpu))
3069 {
3070 CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx);
3071 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
3072 }
3073
3074 /* Load the state shared between host and guest (FPU, debug). */
3075 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
3076 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
3077 hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx);
3078 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); /* Preemption might set this, nothing to do on AMD-V. */
3079 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
3080
3081 /* Setup TSC offsetting. */
3082 RTCPUID idCurrentCpu = HMR0GetCurrentCpu()->idCpu;
3083 if ( pSvmTransient->fUpdateTscOffsetting
3084 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
3085 {
3086 hmR0SvmUpdateTscOffsetting(pVCpu);
3087 pSvmTransient->fUpdateTscOffsetting = false;
3088 }
3089
3090 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
3091 if (idCurrentCpu != pVCpu->hm.s.idLastCpu)
3092 pVmcb->ctrl.u64VmcbCleanBits = 0;
3093
3094 /* Store status of the shared guest-host state at the time of VMRUN. */
3095#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3096 if (CPUMIsGuestInLongModeEx(pCtx))
3097 {
3098 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
3099 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
3100 }
3101 else
3102#endif
3103 {
3104 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
3105 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
3106 }
3107 pSvmTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
3108
3109 /* Flush the appropriate tagged-TLB entries. */
3110 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
3111 hmR0SvmFlushTaggedTlb(pVCpu);
3112 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
3113
3114 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
3115
3116 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
3117 to start executing. */
3118
3119 /*
3120 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
3121 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
3122 *
3123 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
3124 */
3125 if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
3126 && !(pVmcb->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
3127 {
3128 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
3129 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
3130 uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
3131 if (u64GuestTscAux != pVCpu->hm.s.u64HostTscAux)
3132 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
3133 pSvmTransient->fRestoreTscAuxMsr = true;
3134 }
3135 else
3136 {
3137 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
3138 pSvmTransient->fRestoreTscAuxMsr = false;
3139 }
3140
3141 /* If VMCB Clean bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
3142 if (!(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN))
3143 pVmcb->ctrl.u64VmcbCleanBits = 0;
3144}
3145
3146
3147/**
3148 * Wrapper for running the guest code in AMD-V.
3149 *
3150 * @returns VBox strict status code.
3151 * @param pVM Pointer to the VM.
3152 * @param pVCpu Pointer to the VMCPU.
3153 * @param pCtx Pointer to the guest-CPU context.
3154 *
3155 * @remarks No-long-jump zone!!!
3156 */
3157DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3158{
3159 /*
3160 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
3161 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
3162 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
3163 */
3164#ifdef VBOX_WITH_KERNEL_USING_XMM
3165 return HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
3166 pVCpu->hm.s.svm.pfnVMRun);
3167#else
3168 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
3169#endif
3170}
3171
3172
3173/**
3174 * Performs some essential restoration of state after running guest code in
3175 * AMD-V.
3176 *
3177 * @param pVM Pointer to the VM.
3178 * @param pVCpu Pointer to the VMCPU.
3179 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
3180 * out-of-sync. Make sure to update the required fields
3181 * before using them.
3182 * @param pSvmTransient Pointer to the SVM transient structure.
3183 * @param rcVMRun Return code of VMRUN.
3184 *
3185 * @remarks Called with interrupts disabled.
3186 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
3187 * unconditionally when it is safe to do so.
3188 */
3189static void hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
3190{
3191 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3192
3193 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
3194 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
3195
3196 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
3197 pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
3198
3199 if (pSvmTransient->fRestoreTscAuxMsr)
3200 {
3201 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
3202 CPUMR0SetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
3203 if (u64GuestTscAuxMsr != pVCpu->hm.s.u64HostTscAux)
3204 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
3205 }
3206
3207 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
3208 {
3209 /** @todo Find a way to fix hardcoding a guestimate. */
3210 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset - 0x400);
3211 }
3212
3213 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
3214 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
3215 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
3216
3217 Assert(!(ASMGetFlags() & X86_EFL_IF));
3218 ASMSetFlags(pSvmTransient->uEflags); /* Enable interrupts. */
3219 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
3220
3221 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
3222 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
3223 {
3224 Log4(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun));
3225 return;
3226 }
3227
3228 pSvmTransient->u64ExitCode = pVmcb->ctrl.u64ExitCode; /* Save the #VMEXIT reason. */
3229 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcb->ctrl.u64ExitCode); /* Update the #VMEXIT history array. */
3230 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
3231 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
3232
3233 hmR0SvmSaveGuestState(pVCpu, pMixedCtx); /* Save the guest state from the VMCB to the guest-CPU context. */
3234
3235 if (RT_LIKELY(pSvmTransient->u64ExitCode != (uint64_t)SVM_EXIT_INVALID))
3236 {
3237 if (pVCpu->hm.s.svm.fSyncVTpr)
3238 {
3239 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
3240 if ( pVM->hm.s.fTPRPatchingActive
3241 && (pMixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
3242 {
3243 int rc = PDMApicSetTPR(pVCpu, pMixedCtx->msrLSTAR & 0xff);
3244 AssertRC(rc);
3245 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
3246 }
3247 else if (pSvmTransient->u8GuestTpr != pVmcb->ctrl.IntCtrl.n.u8VTPR)
3248 {
3249 int rc = PDMApicSetTPR(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4);
3250 AssertRC(rc);
3251 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
3252 }
3253 }
3254 }
3255}
3256
3257
3258/**
3259 * Runs the guest code using AMD-V.
3260 *
3261 * @returns VBox status code.
3262 * @param pVM Pointer to the VM.
3263 * @param pVCpu Pointer to the VMCPU.
3264 */
3265static int hmR0SvmRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3266{
3267 SVMTRANSIENT SvmTransient;
3268 SvmTransient.fUpdateTscOffsetting = true;
3269 uint32_t cLoops = 0;
3270 int rc = VERR_INTERNAL_ERROR_5;
3271
3272 for (;; cLoops++)
3273 {
3274 Assert(!HMR0SuspendPending());
3275 HMSVM_ASSERT_CPU_SAFE();
3276
3277 /* Preparatory work for running guest code, this may force us to return
3278 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
3279 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
3280 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
3281 if (rc != VINF_SUCCESS)
3282 break;
3283
3284 /*
3285 * No longjmps to ring-3 from this point on!!!
3286 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
3287 * This also disables flushing of the R0-logger instance (if any).
3288 */
3289 hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
3290 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
3291
3292 /* Restore any residual host-state and save any bits shared between host
3293 and guest into the guest-CPU state. Re-enables interrupts! */
3294 hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
3295
3296 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
3297 || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
3298 {
3299 if (rc == VINF_SUCCESS)
3300 rc = VERR_SVM_INVALID_GUEST_STATE;
3301 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
3302 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
3303 break;
3304 }
3305
3306 /* Handle the #VMEXIT. */
3307 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
3308 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
3309 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
3310 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
3311 if (rc != VINF_SUCCESS)
3312 break;
3313 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
3314 {
3315 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
3316 rc = VINF_EM_RAW_INTERRUPT;
3317 break;
3318 }
3319 }
3320
3321 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
3322 return rc;
3323}
3324
3325
3326/**
3327 * Runs the guest code using AMD-V in single step mode.
3328 *
3329 * @returns VBox status code.
3330 * @param pVM Pointer to the VM.
3331 * @param pVCpu Pointer to the VMCPU.
3332 * @param pCtx Pointer to the guest-CPU context.
3333 */
3334static int hmR0SvmRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3335{
3336 SVMTRANSIENT SvmTransient;
3337 SvmTransient.fUpdateTscOffsetting = true;
3338 uint32_t cLoops = 0;
3339 int rc = VERR_INTERNAL_ERROR_5;
3340 uint16_t uCsStart = pCtx->cs.Sel;
3341 uint64_t uRipStart = pCtx->rip;
3342
3343 for (;; cLoops++)
3344 {
3345 Assert(!HMR0SuspendPending());
3346 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
3347 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
3348 (unsigned)RTMpCpuId(), cLoops));
3349
3350 /* Preparatory work for running guest code, this may force us to return
3351 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
3352 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
3353 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
3354 if (rc != VINF_SUCCESS)
3355 break;
3356
3357 /*
3358 * No longjmps to ring-3 from this point on!!!
3359 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
3360 * This also disables flushing of the R0-logger instance (if any).
3361 */
3362 VMMRZCallRing3Disable(pVCpu);
3363 VMMRZCallRing3RemoveNotification(pVCpu);
3364 hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
3365
3366 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
3367
3368 /*
3369 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
3370 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
3371 */
3372 hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
3373 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
3374 || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
3375 {
3376 if (rc == VINF_SUCCESS)
3377 rc = VERR_SVM_INVALID_GUEST_STATE;
3378 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
3379 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
3380 return rc;
3381 }
3382
3383 /* Handle the #VMEXIT. */
3384 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
3385 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
3386 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
3387 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
3388 if (rc != VINF_SUCCESS)
3389 break;
3390 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
3391 {
3392 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
3393 rc = VINF_EM_RAW_INTERRUPT;
3394 break;
3395 }
3396
3397 /*
3398 * Did the RIP change, if so, consider it a single step.
3399 * Otherwise, make sure one of the TFs gets set.
3400 */
3401 if ( pCtx->rip != uRipStart
3402 || pCtx->cs.Sel != uCsStart)
3403 {
3404 rc = VINF_EM_DBG_STEPPED;
3405 break;
3406 }
3407 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
3408 }
3409
3410 /*
3411 * Clear the X86_EFL_TF if necessary.
3412 */
3413 if (pVCpu->hm.s.fClearTrapFlag)
3414 {
3415 pVCpu->hm.s.fClearTrapFlag = false;
3416 pCtx->eflags.Bits.u1TF = 0;
3417 }
3418
3419 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
3420 return rc;
3421}
3422
3423
3424/**
3425 * Runs the guest code using AMD-V.
3426 *
3427 * @returns VBox status code.
3428 * @param pVM Pointer to the VM.
3429 * @param pVCpu Pointer to the VMCPU.
3430 * @param pCtx Pointer to the guest-CPU context.
3431 */
3432VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3433{
3434 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3435 HMSVM_ASSERT_PREEMPT_SAFE();
3436 VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pCtx);
3437
3438 int rc;
3439 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
3440 rc = hmR0SvmRunGuestCodeNormal(pVM, pVCpu, pCtx);
3441 else
3442 rc = hmR0SvmRunGuestCodeStep(pVM, pVCpu, pCtx);
3443
3444 if (rc == VERR_EM_INTERPRETER)
3445 rc = VINF_EM_RAW_EMULATE_INSTR;
3446 else if (rc == VINF_EM_RESET)
3447 rc = VINF_EM_TRIPLE_FAULT;
3448
3449 /* Prepare to return to ring-3. This will remove longjmp notifications. */
3450 hmR0SvmExitToRing3(pVM, pVCpu, pCtx, rc);
3451 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
3452 return rc;
3453}
3454
3455
3456/**
3457 * Handles a #VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
3458 *
3459 * @returns VBox status code (informational status codes included).
3460 * @param pVCpu Pointer to the VMCPU.
3461 * @param pCtx Pointer to the guest-CPU context.
3462 * @param pSvmTransient Pointer to the SVM transient structure.
3463 */
3464DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3465{
3466 Assert(pSvmTransient->u64ExitCode != (uint64_t)SVM_EXIT_INVALID);
3467 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
3468
3469 /*
3470 * The ordering of the case labels is based on most-frequently-occurring #VMEXITs for most guests under
3471 * normal workloads (for some definition of "normal").
3472 */
3473 uint32_t u32ExitCode = pSvmTransient->u64ExitCode;
3474 switch (pSvmTransient->u64ExitCode)
3475 {
3476 case SVM_EXIT_NPF:
3477 return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient);
3478
3479 case SVM_EXIT_IOIO:
3480 return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
3481
3482 case SVM_EXIT_RDTSC:
3483 return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
3484
3485 case SVM_EXIT_RDTSCP:
3486 return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
3487
3488 case SVM_EXIT_CPUID:
3489 return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient);
3490
3491 case SVM_EXIT_EXCEPTION_E: /* X86_XCPT_PF */
3492 return hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient);
3493
3494 case SVM_EXIT_EXCEPTION_7: /* X86_XCPT_NM */
3495 return hmR0SvmExitXcptNM(pVCpu, pCtx, pSvmTransient);
3496
3497 case SVM_EXIT_EXCEPTION_10: /* X86_XCPT_MF */
3498 return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient);
3499
3500 case SVM_EXIT_EXCEPTION_1: /* X86_XCPT_DB */
3501 return hmR0SvmExitXcptDB(pVCpu, pCtx, pSvmTransient);
3502
3503 case SVM_EXIT_MONITOR:
3504 return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient);
3505
3506 case SVM_EXIT_MWAIT:
3507 return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient);
3508
3509 case SVM_EXIT_HLT:
3510 return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient);
3511
3512 case SVM_EXIT_READ_CR0:
3513 case SVM_EXIT_READ_CR3:
3514 case SVM_EXIT_READ_CR4:
3515 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
3516
3517 case SVM_EXIT_WRITE_CR0:
3518 case SVM_EXIT_WRITE_CR3:
3519 case SVM_EXIT_WRITE_CR4:
3520 case SVM_EXIT_WRITE_CR8:
3521 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
3522
3523 case SVM_EXIT_VINTR:
3524 return hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient);
3525
3526 case SVM_EXIT_INTR:
3527 case SVM_EXIT_FERR_FREEZE:
3528 case SVM_EXIT_NMI:
3529 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
3530
3531 case SVM_EXIT_MSR:
3532 return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient);
3533
3534 case SVM_EXIT_INVLPG:
3535 return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient);
3536
3537 case SVM_EXIT_WBINVD:
3538 return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient);
3539
3540 case SVM_EXIT_INVD:
3541 return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient);
3542
3543 case SVM_EXIT_RDPMC:
3544 return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient);
3545
3546 default:
3547 {
3548 switch (pSvmTransient->u64ExitCode)
3549 {
3550 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
3551 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
3552 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
3553 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
3554 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
3555
3556 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
3557 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
3558 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
3559 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
3560 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
3561
3562 case SVM_EXIT_TASK_SWITCH:
3563 return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
3564
3565 case SVM_EXIT_VMMCALL:
3566 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
3567
3568 case SVM_EXIT_IRET:
3569 return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient);
3570
3571 case SVM_EXIT_SHUTDOWN:
3572 return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient);
3573
3574 case SVM_EXIT_SMI:
3575 case SVM_EXIT_INIT:
3576 {
3577 /*
3578 * We don't intercept NMIs. As for INIT signals, it really shouldn't ever happen here. If it ever does,
3579 * we want to know about it so log the exit code and bail.
3580 */
3581 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
3582 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
3583 return VERR_SVM_UNEXPECTED_EXIT;
3584 }
3585
3586 case SVM_EXIT_INVLPGA:
3587 case SVM_EXIT_RSM:
3588 case SVM_EXIT_VMRUN:
3589 case SVM_EXIT_VMLOAD:
3590 case SVM_EXIT_VMSAVE:
3591 case SVM_EXIT_STGI:
3592 case SVM_EXIT_CLGI:
3593 case SVM_EXIT_SKINIT:
3594 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
3595
3596#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
3597 case SVM_EXIT_EXCEPTION_0: /* X86_XCPT_DE */
3598 /* SVM_EXIT_EXCEPTION_1: */ /* X86_XCPT_DB - Handled above. */
3599 case SVM_EXIT_EXCEPTION_2: /* X86_XCPT_NMI */
3600 case SVM_EXIT_EXCEPTION_3: /* X86_XCPT_BP */
3601 case SVM_EXIT_EXCEPTION_4: /* X86_XCPT_OF */
3602 case SVM_EXIT_EXCEPTION_5: /* X86_XCPT_BR */
3603 case SVM_EXIT_EXCEPTION_6: /* X86_XCPT_UD */
3604 /* SVM_EXIT_EXCEPTION_7: */ /* X86_XCPT_NM - Handled above. */
3605 case SVM_EXIT_EXCEPTION_8: /* X86_XCPT_DF */
3606 case SVM_EXIT_EXCEPTION_9: /* X86_XCPT_CO_SEG_OVERRUN */
3607 case SVM_EXIT_EXCEPTION_A: /* X86_XCPT_TS */
3608 case SVM_EXIT_EXCEPTION_B: /* X86_XCPT_NP */
3609 case SVM_EXIT_EXCEPTION_C: /* X86_XCPT_SS */
3610 case SVM_EXIT_EXCEPTION_D: /* X86_XCPT_GP */
3611 /* SVM_EXIT_EXCEPTION_E: */ /* X86_XCPT_PF - Handled above. */
3612 /* SVM_EXIT_EXCEPTION_10: */ /* X86_XCPT_MF - Handled above. */
3613 case SVM_EXIT_EXCEPTION_11: /* X86_XCPT_AC */
3614 case SVM_EXIT_EXCEPTION_12: /* X86_XCPT_MC */
3615 case SVM_EXIT_EXCEPTION_13: /* X86_XCPT_XF */
3616 case SVM_EXIT_EXCEPTION_F: /* Reserved */
3617 case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16:
3618 case SVM_EXIT_EXCEPTION_17: case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19:
3619 case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B: case SVM_EXIT_EXCEPTION_1C:
3620 case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
3621 {
3622 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
3623 SVMEVENT Event;
3624 Event.u = 0;
3625 Event.n.u1Valid = 1;
3626 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3627 Event.n.u8Vector = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0;
3628
3629 switch (Event.n.u8Vector)
3630 {
3631 case X86_XCPT_DE:
3632 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
3633 break;
3634
3635 case X86_XCPT_BP:
3636 /** Saves the wrong EIP on the stack (pointing to the int3) instead of the
3637 * next instruction. */
3638 /** @todo Investigate this later. */
3639 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
3640 break;
3641
3642 case X86_XCPT_UD:
3643 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
3644 break;
3645
3646 case X86_XCPT_NP:
3647 Event.n.u1ErrorCodeValid = 1;
3648 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1;
3649 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
3650 break;
3651
3652 case X86_XCPT_SS:
3653 Event.n.u1ErrorCodeValid = 1;
3654 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1;
3655 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
3656 break;
3657
3658 case X86_XCPT_GP:
3659 Event.n.u1ErrorCodeValid = 1;
3660 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1;
3661 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
3662 break;
3663
3664 default:
3665 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit caused by exception %#x\n", Event.n.u8Vector));
3666 pVCpu->hm.s.u32HMError = Event.n.u8Vector;
3667 return VERR_SVM_UNEXPECTED_XCPT_EXIT;
3668 }
3669
3670 Log4(("#Xcpt: Vector=%#x at CS:RIP=%04x:%RGv\n", Event.n.u8Vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
3671 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3672 return VINF_SUCCESS;
3673 }
3674#endif /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */
3675
3676 default:
3677 {
3678 AssertMsgFailed(("hmR0SvmHandleExit: Unknown exit code %#x\n", u32ExitCode));
3679 pVCpu->hm.s.u32HMError = u32ExitCode;
3680 return VERR_SVM_UNKNOWN_EXIT;
3681 }
3682 }
3683 }
3684 }
3685 return VERR_INTERNAL_ERROR_5; /* Should never happen. */
3686}
3687
3688
3689#ifdef DEBUG
3690/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
3691# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
3692 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
3693
3694# define HMSVM_ASSERT_PREEMPT_CPUID() \
3695 do \
3696 { \
3697 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
3698 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
3699 } while (0)
3700
3701# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \
3702 do { \
3703 AssertPtr(pVCpu); \
3704 AssertPtr(pCtx); \
3705 AssertPtr(pSvmTransient); \
3706 Assert(ASMIntAreEnabled()); \
3707 HMSVM_ASSERT_PREEMPT_SAFE(); \
3708 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
3709 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
3710 HMSVM_ASSERT_PREEMPT_SAFE(); \
3711 if (VMMR0IsLogFlushDisabled(pVCpu)) \
3712 HMSVM_ASSERT_PREEMPT_CPUID(); \
3713 } while (0)
3714#else /* Release builds */
3715# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { NOREF(pVCpu); NOREF(pCtx); NOREF(pSvmTransient); } while (0)
3716#endif
3717
3718
3719/**
3720 * Worker for hmR0SvmInterpretInvlpg().
3721 *
3722 * @return VBox status code.
3723 * @param pVCpu Pointer to the VMCPU.
3724 * @param pCpu Pointer to the disassembler state.
3725 * @param pRegFrame Pointer to the register frame.
3726 */
3727static int hmR0SvmInterpretInvlPgEx(PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame)
3728{
3729 DISQPVPARAMVAL Param1;
3730 RTGCPTR GCPtrPage;
3731
3732 int rc = DISQueryParamVal(pRegFrame, pCpu, &pCpu->Param1, &Param1, DISQPVWHICH_SRC);
3733 if (RT_FAILURE(rc))
3734 return VERR_EM_INTERPRETER;
3735
3736 if ( Param1.type == DISQPV_TYPE_IMMEDIATE
3737 || Param1.type == DISQPV_TYPE_ADDRESS)
3738 {
3739 if (!(Param1.flags & (DISQPV_FLAG_32 | DISQPV_FLAG_64)))
3740 return VERR_EM_INTERPRETER;
3741
3742 GCPtrPage = Param1.val.val64;
3743 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVCpu->CTX_SUFF(pVM), pVCpu, pRegFrame, GCPtrPage);
3744 rc = VBOXSTRICTRC_VAL(rc2);
3745 }
3746 else
3747 {
3748 Log4(("hmR0SvmInterpretInvlPgEx invalid parameter type %#x\n", Param1.type));
3749 rc = VERR_EM_INTERPRETER;
3750 }
3751
3752 return rc;
3753}
3754
3755
3756/**
3757 * Interprets INVLPG.
3758 *
3759 * @returns VBox status code.
3760 * @retval VINF_* Scheduling instructions.
3761 * @retval VERR_EM_INTERPRETER Something we can't cope with.
3762 * @retval VERR_* Fatal errors.
3763 *
3764 * @param pVM Pointer to the VM.
3765 * @param pRegFrame Pointer to the register frame.
3766 *
3767 * @remarks Updates the RIP if the instruction was executed successfully.
3768 */
3769static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
3770{
3771 /* Only allow 32 & 64 bit code. */
3772 if (CPUMGetGuestCodeBits(pVCpu) != 16)
3773 {
3774 PDISSTATE pDis = &pVCpu->hm.s.DisState;
3775 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
3776 if ( RT_SUCCESS(rc)
3777 && pDis->pCurInstr->uOpcode == OP_INVLPG)
3778 {
3779 rc = hmR0SvmInterpretInvlPgEx(pVCpu, pDis, pRegFrame);
3780 if (RT_SUCCESS(rc))
3781 pRegFrame->rip += pDis->cbInstr;
3782 return rc;
3783 }
3784 else
3785 Log4(("hmR0SvmInterpretInvlpg: EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
3786 }
3787 return VERR_EM_INTERPRETER;
3788}
3789
3790
3791/**
3792 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
3793 *
3794 * @param pVCpu Pointer to the VMCPU.
3795 */
3796DECLINLINE(void) hmR0SvmSetPendingXcptUD(PVMCPU pVCpu)
3797{
3798 SVMEVENT Event;
3799 Event.u = 0;
3800 Event.n.u1Valid = 1;
3801 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3802 Event.n.u8Vector = X86_XCPT_UD;
3803 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3804}
3805
3806
3807/**
3808 * Sets a debug (#DB) exception as pending-for-injection into the VM.
3809 *
3810 * @param pVCpu Pointer to the VMCPU.
3811 */
3812DECLINLINE(void) hmR0SvmSetPendingXcptDB(PVMCPU pVCpu)
3813{
3814 SVMEVENT Event;
3815 Event.u = 0;
3816 Event.n.u1Valid = 1;
3817 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3818 Event.n.u8Vector = X86_XCPT_DB;
3819 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3820}
3821
3822
3823/**
3824 * Sets a page fault (#PF) exception as pending-for-injection into the VM.
3825 *
3826 * @param pVCpu Pointer to the VMCPU.
3827 * @param pCtx Pointer to the guest-CPU context.
3828 * @param u32ErrCode The error-code for the page-fault.
3829 * @param uFaultAddress The page fault address (CR2).
3830 *
3831 * @remarks This updates the guest CR2 with @a uFaultAddress!
3832 */
3833DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)
3834{
3835 SVMEVENT Event;
3836 Event.u = 0;
3837 Event.n.u1Valid = 1;
3838 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3839 Event.n.u8Vector = X86_XCPT_PF;
3840 Event.n.u1ErrorCodeValid = 1;
3841 Event.n.u32ErrorCode = u32ErrCode;
3842
3843 /* Update CR2 of the guest. */
3844 if (pCtx->cr2 != uFaultAddress)
3845 {
3846 pCtx->cr2 = uFaultAddress;
3847 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR2);
3848 }
3849
3850 hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress);
3851}
3852
3853
3854/**
3855 * Sets a device-not-available (#NM) exception as pending-for-injection into the
3856 * VM.
3857 *
3858 * @param pVCpu Pointer to the VMCPU.
3859 */
3860DECLINLINE(void) hmR0SvmSetPendingXcptNM(PVMCPU pVCpu)
3861{
3862 SVMEVENT Event;
3863 Event.u = 0;
3864 Event.n.u1Valid = 1;
3865 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3866 Event.n.u8Vector = X86_XCPT_NM;
3867 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3868}
3869
3870
3871/**
3872 * Sets a math-fault (#MF) exception as pending-for-injection into the VM.
3873 *
3874 * @param pVCpu Pointer to the VMCPU.
3875 */
3876DECLINLINE(void) hmR0SvmSetPendingXcptMF(PVMCPU pVCpu)
3877{
3878 SVMEVENT Event;
3879 Event.u = 0;
3880 Event.n.u1Valid = 1;
3881 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3882 Event.n.u8Vector = X86_XCPT_MF;
3883 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3884}
3885
3886
3887/**
3888 * Sets a double fault (#DF) exception as pending-for-injection into the VM.
3889 *
3890 * @param pVCpu Pointer to the VMCPU.
3891 */
3892DECLINLINE(void) hmR0SvmSetPendingXcptDF(PVMCPU pVCpu)
3893{
3894 SVMEVENT Event;
3895 Event.u = 0;
3896 Event.n.u1Valid = 1;
3897 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3898 Event.n.u8Vector = X86_XCPT_DF;
3899 Event.n.u1ErrorCodeValid = 1;
3900 Event.n.u32ErrorCode = 0;
3901 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3902}
3903
3904
3905/**
3906 * Emulates a simple MOV TPR (CR8) instruction, used for TPR patching on 32-bit
3907 * guests. This simply looks up the patch record at EIP and does the required.
3908 *
3909 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
3910 * like how we want it to be (e.g. not followed by shr 4 as is usually done for
3911 * TPR). See hmR3ReplaceTprInstr() for the details.
3912 *
3913 * @returns VBox status code.
3914 * @retval VINF_SUCCESS if the access was handled successfully.
3915 * @retval VERR_NOT_FOUND if no patch record for this eip could be found.
3916 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
3917 *
3918 * @param pVM Pointer to the VM.
3919 * @param pVCpu Pointer to the VMCPU.
3920 * @param pCtx Pointer to the guest-CPU context.
3921 */
3922static int hmR0SvmEmulateMovTpr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3923{
3924 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
3925
3926 /*
3927 * We do this in a loop as we increment the RIP after a successful emulation
3928 * and the new RIP may be a patched instruction which needs emulation as well.
3929 */
3930 bool fPatchFound = false;
3931 for (;;)
3932 {
3933 bool fPending;
3934 uint8_t u8Tpr;
3935
3936 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
3937 if (!pPatch)
3938 break;
3939
3940 fPatchFound = true;
3941 switch (pPatch->enmType)
3942 {
3943 case HMTPRINSTR_READ:
3944 {
3945 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
3946 AssertRC(rc);
3947
3948 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
3949 AssertRC(rc);
3950 pCtx->rip += pPatch->cbOp;
3951 break;
3952 }
3953
3954 case HMTPRINSTR_WRITE_REG:
3955 case HMTPRINSTR_WRITE_IMM:
3956 {
3957 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
3958 {
3959 uint32_t u32Val;
3960 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
3961 AssertRC(rc);
3962 u8Tpr = u32Val;
3963 }
3964 else
3965 u8Tpr = (uint8_t)pPatch->uSrcOperand;
3966
3967 int rc2 = PDMApicSetTPR(pVCpu, u8Tpr);
3968 AssertRC(rc2);
3969 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
3970
3971 pCtx->rip += pPatch->cbOp;
3972 break;
3973 }
3974
3975 default:
3976 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
3977 pVCpu->hm.s.u32HMError = pPatch->enmType;
3978 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
3979 }
3980 }
3981
3982 if (fPatchFound)
3983 return VINF_SUCCESS;
3984 return VERR_NOT_FOUND;
3985}
3986
3987
3988/**
3989 * Determines if an exception is a contributory exception. Contributory
3990 * exceptions are ones which can cause double-faults. Page-fault is
3991 * intentionally not included here as it's a conditional contributory exception.
3992 *
3993 * @returns true if the exception is contributory, false otherwise.
3994 * @param uVector The exception vector.
3995 */
3996DECLINLINE(bool) hmR0SvmIsContributoryXcpt(const uint32_t uVector)
3997{
3998 switch (uVector)
3999 {
4000 case X86_XCPT_GP:
4001 case X86_XCPT_SS:
4002 case X86_XCPT_NP:
4003 case X86_XCPT_TS:
4004 case X86_XCPT_DE:
4005 return true;
4006 default:
4007 break;
4008 }
4009 return false;
4010}
4011
4012
4013/**
4014 * Handle a condition that occurred while delivering an event through the guest
4015 * IDT.
4016 *
4017 * @returns VBox status code (informational error codes included).
4018 * @retval VINF_SUCCESS if we should continue handling the #VMEXIT.
4019 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
4020 * continue execution of the guest which will delivery the #DF.
4021 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4022 *
4023 * @param pVCpu Pointer to the VMCPU.
4024 * @param pCtx Pointer to the guest-CPU context.
4025 * @param pSvmTransient Pointer to the SVM transient structure.
4026 *
4027 * @remarks No-long-jump zone!!!
4028 */
4029static int hmR0SvmCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4030{
4031 int rc = VINF_SUCCESS;
4032 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4033
4034 /* See AMD spec. 15.7.3 "EXITINFO Pseudo-Code". The EXITINTINFO (if valid) contains the prior exception (IDT vector)
4035 * that was trying to be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector). */
4036 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid)
4037 {
4038 uint8_t uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
4039
4040 typedef enum
4041 {
4042 SVMREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4043 SVMREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4044 SVMREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
4045 SVMREFLECTXCPT_NONE /* Nothing to reflect. */
4046 } SVMREFLECTXCPT;
4047
4048 SVMREFLECTXCPT enmReflect = SVMREFLECTXCPT_NONE;
4049 bool fReflectingNmi = false;
4050 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION)
4051 {
4052 if (pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_1F)
4053 {
4054 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0);
4055
4056#ifdef VBOX_STRICT
4057 if ( hmR0SvmIsContributoryXcpt(uIdtVector)
4058 && uExitVector == X86_XCPT_PF)
4059 {
4060 Log4(("IDT: Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pCtx->cr2));
4061 }
4062#endif
4063 if ( uExitVector == X86_XCPT_PF
4064 && uIdtVector == X86_XCPT_PF)
4065 {
4066 pSvmTransient->fVectoringDoublePF = true;
4067 Log4(("IDT: Vectoring double #PF uCR2=%#RX64\n", pCtx->cr2));
4068 }
4069 else if ( (pVmcb->ctrl.u32InterceptException & HMSVM_CONTRIBUTORY_XCPT_MASK)
4070 && hmR0SvmIsContributoryXcpt(uExitVector)
4071 && ( hmR0SvmIsContributoryXcpt(uIdtVector)
4072 || uIdtVector == X86_XCPT_PF))
4073 {
4074 enmReflect = SVMREFLECTXCPT_DF;
4075 Log4(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
4076 uIdtVector, uExitVector));
4077 }
4078 else if (uIdtVector == X86_XCPT_DF)
4079 {
4080 enmReflect = SVMREFLECTXCPT_TF;
4081 Log4(("IDT: Pending vectoring triple-fault %#RX64 uIdtVector=%#x uExitVector=%#x\n",
4082 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
4083 }
4084 else
4085 enmReflect = SVMREFLECTXCPT_XCPT;
4086 }
4087 else
4088 {
4089 /*
4090 * If event delivery caused an #VMEXIT that is not an exception (e.g. #NPF) then reflect the original
4091 * exception to the guest after handling the #VMEXIT.
4092 */
4093 enmReflect = SVMREFLECTXCPT_XCPT;
4094 }
4095 }
4096 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXTERNAL_IRQ
4097 || pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)
4098 {
4099 enmReflect = SVMREFLECTXCPT_XCPT;
4100 fReflectingNmi = RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI);
4101
4102 if (pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_1F)
4103 {
4104 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0);
4105 if (uExitVector == X86_XCPT_PF)
4106 {
4107 pSvmTransient->fVectoringPF = true;
4108 Log4(("IDT: Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pCtx->cr2));
4109 }
4110 }
4111 }
4112 /* else: Ignore software interrupts (INT n) as they reoccur when restarting the instruction. */
4113
4114 switch (enmReflect)
4115 {
4116 case SVMREFLECTXCPT_XCPT:
4117 {
4118 /* If we are re-injecting the NMI, clear NMI blocking. */
4119 if (fReflectingNmi)
4120 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
4121
4122 Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);
4123 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, 0 /* GCPtrFaultAddress */);
4124
4125 /* If uExitVector is #PF, CR2 value will be updated from the VMCB if it's a guest #PF. See hmR0SvmExitXcptPF(). */
4126 Log4(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32\n", pVmcb->ctrl.ExitIntInfo.u,
4127 !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid, pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
4128 break;
4129 }
4130
4131 case SVMREFLECTXCPT_DF:
4132 {
4133 hmR0SvmSetPendingXcptDF(pVCpu);
4134 rc = VINF_HM_DOUBLE_FAULT;
4135 break;
4136 }
4137
4138 case SVMREFLECTXCPT_TF:
4139 {
4140 rc = VINF_EM_RESET;
4141 break;
4142 }
4143
4144 default:
4145 Assert(rc == VINF_SUCCESS);
4146 break;
4147 }
4148 }
4149 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
4150 NOREF(pCtx);
4151 return rc;
4152}
4153
4154
4155/**
4156 * Advances the guest RIP in the if the NRIP_SAVE feature is supported by the
4157 * CPU, otherwise advances the RIP by @a cb bytes.
4158 *
4159 * @param pVCpu Pointer to the VMCPU.
4160 * @param pCtx Pointer to the guest-CPU context.
4161 * @param cb RIP increment value in bytes.
4162 *
4163 * @remarks Use this function only from #VMEXIT's where the NRIP value is valid
4164 * when NRIP_SAVE is supported by the CPU!
4165 */
4166DECLINLINE(void) hmR0SvmUpdateRip(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t cb)
4167{
4168 if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
4169 {
4170 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4171 pCtx->rip = pVmcb->ctrl.u64NextRIP;
4172 }
4173 else
4174 pCtx->rip += cb;
4175}
4176
4177
4178/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
4179/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
4180/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
4181
4182/** @name #VMEXIT handlers.
4183 * @{
4184 */
4185
4186/**
4187 * #VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT
4188 * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT).
4189 */
4190HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4191{
4192 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4193
4194 if (pSvmTransient->u64ExitCode == SVM_EXIT_NMI)
4195 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
4196 else if (pSvmTransient->u64ExitCode == SVM_EXIT_INTR)
4197 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
4198
4199 /*
4200 * AMD-V has no preemption timer and the generic periodic preemption timer has no way to signal -before- the timer
4201 * fires if the current interrupt is our own timer or a some other host interrupt. We also cannot examine what
4202 * interrupt it is until the host actually take the interrupt.
4203 *
4204 * Going back to executing guest code here unconditionally causes random scheduling problems (observed on an
4205 * AMD Phenom 9850 Quad-Core on Windows 64-bit host).
4206 */
4207 return VINF_EM_RAW_INTERRUPT;
4208}
4209
4210
4211/**
4212 * #VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional #VMEXIT.
4213 */
4214HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4215{
4216 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4217
4218 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4219 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
4220 int rc = VINF_SUCCESS;
4221 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4222 return rc;
4223}
4224
4225
4226/**
4227 * #VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional #VMEXIT.
4228 */
4229HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4230{
4231 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4232
4233 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4234 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
4235 int rc = VINF_SUCCESS;
4236 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4237 return rc;
4238}
4239
4240
4241/**
4242 * #VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional #VMEXIT.
4243 */
4244HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4245{
4246 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4247 PVM pVM = pVCpu->CTX_SUFF(pVM);
4248 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4249 if (RT_LIKELY(rc == VINF_SUCCESS))
4250 {
4251 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4252 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4253 }
4254 else
4255 {
4256 AssertMsgFailed(("hmR0SvmExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
4257 rc = VERR_EM_INTERPRETER;
4258 }
4259 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
4260 return rc;
4261}
4262
4263
4264/**
4265 * #VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional #VMEXIT.
4266 */
4267HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4268{
4269 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4270 PVM pVM = pVCpu->CTX_SUFF(pVM);
4271 int rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4272 if (RT_LIKELY(rc == VINF_SUCCESS))
4273 {
4274 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4275 pSvmTransient->fUpdateTscOffsetting = true;
4276
4277 /* Single step check. */
4278 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4279 }
4280 else
4281 {
4282 AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
4283 rc = VERR_EM_INTERPRETER;
4284 }
4285 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
4286 return rc;
4287}
4288
4289
4290/**
4291 * #VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional #VMEXIT.
4292 */
4293HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4294{
4295 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4296 int rc = EMInterpretRdtscp(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
4297 if (RT_LIKELY(rc == VINF_SUCCESS))
4298 {
4299 hmR0SvmUpdateRip(pVCpu, pCtx, 3);
4300 pSvmTransient->fUpdateTscOffsetting = true;
4301 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4302 }
4303 else
4304 {
4305 AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtscp failed with %Rrc\n", rc));
4306 rc = VERR_EM_INTERPRETER;
4307 }
4308 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
4309 return rc;
4310}
4311
4312
4313/**
4314 * #VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional #VMEXIT.
4315 */
4316HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4317{
4318 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4319 int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
4320 if (RT_LIKELY(rc == VINF_SUCCESS))
4321 {
4322 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4323 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4324 }
4325 else
4326 {
4327 AssertMsgFailed(("hmR0SvmExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
4328 rc = VERR_EM_INTERPRETER;
4329 }
4330 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
4331 return rc;
4332}
4333
4334
4335/**
4336 * #VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional #VMEXIT.
4337 */
4338HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4339{
4340 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4341 PVM pVM = pVCpu->CTX_SUFF(pVM);
4342 Assert(!pVM->hm.s.fNestedPaging);
4343
4344 /** @todo Decode Assist. */
4345 int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx)); /* Updates RIP if successful. */
4346 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
4347 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
4348 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4349 return rc;
4350}
4351
4352
4353/**
4354 * #VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional #VMEXIT.
4355 */
4356HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4357{
4358 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4359 hmR0SvmUpdateRip(pVCpu, pCtx, 1);
4360 int rc = EMShouldContinueAfterHalt(pVCpu, pCtx) ? VINF_SUCCESS : VINF_EM_HALT;
4361 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4362 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
4363 return rc;
4364}
4365
4366
4367/**
4368 * #VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional #VMEXIT.
4369 */
4370HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4371{
4372 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4373 int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
4374 if (RT_LIKELY(rc == VINF_SUCCESS))
4375 {
4376 hmR0SvmUpdateRip(pVCpu, pCtx, 3);
4377 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4378 }
4379 else
4380 {
4381 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
4382 rc = VERR_EM_INTERPRETER;
4383 }
4384 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
4385 return rc;
4386}
4387
4388
4389/**
4390 * #VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional #VMEXIT.
4391 */
4392HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4393{
4394 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4395 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
4396 int rc = VBOXSTRICTRC_VAL(rc2);
4397 if ( rc == VINF_EM_HALT
4398 || rc == VINF_SUCCESS)
4399 {
4400 hmR0SvmUpdateRip(pVCpu, pCtx, 3);
4401
4402 if ( rc == VINF_EM_HALT
4403 && EMMonitorWaitShouldContinue(pVCpu, pCtx))
4404 {
4405 rc = VINF_SUCCESS;
4406 }
4407 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4408 }
4409 else
4410 {
4411 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
4412 rc = VERR_EM_INTERPRETER;
4413 }
4414 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
4415 ("hmR0SvmExitMwait: EMInterpretMWait failed rc=%Rrc\n", rc));
4416 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
4417 return rc;
4418}
4419
4420
4421/**
4422 * #VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN).
4423 * Conditional #VMEXIT.
4424 */
4425HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4426{
4427 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4428 return VINF_EM_RESET;
4429}
4430
4431
4432/**
4433 * #VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional #VMEXIT.
4434 */
4435HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4436{
4437 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4438
4439 Log4(("hmR0SvmExitReadCRx: CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
4440
4441 /** @todo Decode Assist. */
4442 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
4443 int rc = VBOXSTRICTRC_VAL(rc2);
4444 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3,
4445 ("hmR0SvmExitReadCRx: EMInterpretInstruction failed rc=%Rrc\n", rc));
4446 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15);
4447 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0]);
4448 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4449 return rc;
4450}
4451
4452
4453/**
4454 * #VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional #VMEXIT.
4455 */
4456HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4457{
4458 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4459 /** @todo Decode Assist. */
4460 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
4461 int rc = VBOXSTRICTRC_VAL(rc2);
4462 if (rc == VINF_SUCCESS)
4463 {
4464 /* RIP has been updated by EMInterpretInstruction(). */
4465 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0) <= 15);
4466 switch (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0)
4467 {
4468 case 0: /* CR0. */
4469 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
4470 break;
4471
4472 case 3: /* CR3. */
4473 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
4474 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
4475 break;
4476
4477 case 4: /* CR4. */
4478 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
4479 break;
4480
4481 case 8: /* CR8 (TPR). */
4482 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
4483 break;
4484
4485 default:
4486 AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x CRx=%#RX64\n",
4487 pSvmTransient->u64ExitCode, pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0));
4488 break;
4489 }
4490 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4491 }
4492 else
4493 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
4494 return rc;
4495}
4496
4497
4498/**
4499 * #VMEXIT handler for instructions that result in a #UD exception delivered to
4500 * the guest.
4501 */
4502HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4503{
4504 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4505 hmR0SvmSetPendingXcptUD(pVCpu);
4506 return VINF_SUCCESS;
4507}
4508
4509
4510/**
4511 * #VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional #VMEXIT.
4512 */
4513HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4514{
4515 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4516 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4517 PVM pVM = pVCpu->CTX_SUFF(pVM);
4518
4519 int rc;
4520 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
4521 {
4522 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
4523
4524 /* Handle TPR patching; intercepted LSTAR write. */
4525 if ( pVM->hm.s.fTPRPatchingActive
4526 && pCtx->ecx == MSR_K8_LSTAR)
4527 {
4528 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr)
4529 {
4530 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */
4531 int rc2 = PDMApicSetTPR(pVCpu, pCtx->eax & 0xff);
4532 AssertRC(rc2);
4533 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
4534 }
4535 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4536 rc = VINF_SUCCESS;
4537 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4538 return rc;
4539 }
4540
4541 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
4542 {
4543 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4544 if (RT_LIKELY(rc == VINF_SUCCESS))
4545 {
4546 pCtx->rip = pVmcb->ctrl.u64NextRIP;
4547 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4548 }
4549 else
4550 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc));
4551 }
4552 else
4553 {
4554 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */));
4555 if (RT_LIKELY(rc == VINF_SUCCESS))
4556 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); /* RIP updated by EMInterpretInstruction(). */
4557 else
4558 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: WrMsr. EMInterpretInstruction failed rc=%Rrc\n", rc));
4559 }
4560
4561 if (rc == VINF_SUCCESS)
4562 {
4563 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
4564 if ( pCtx->ecx >= MSR_IA32_X2APIC_START
4565 && pCtx->ecx <= MSR_IA32_X2APIC_END)
4566 {
4567 /*
4568 * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest(). When full APIC register
4569 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCB before
4570 * EMInterpretWrmsr() changes it.
4571 */
4572 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
4573 }
4574 else if (pCtx->ecx == MSR_K6_EFER)
4575 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
4576 else if (pCtx->ecx == MSR_IA32_TSC)
4577 pSvmTransient->fUpdateTscOffsetting = true;
4578 }
4579 }
4580 else
4581 {
4582 /* MSR Read access. */
4583 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
4584 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ);
4585
4586 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
4587 {
4588 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4589 if (RT_LIKELY(rc == VINF_SUCCESS))
4590 {
4591 pCtx->rip = pVmcb->ctrl.u64NextRIP;
4592 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4593 }
4594 else
4595 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretRdmsr failed rc=%Rrc\n", rc));
4596 }
4597 else
4598 {
4599 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0));
4600 if (RT_UNLIKELY(rc != VINF_SUCCESS))
4601 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: RdMsr. EMInterpretInstruction failed rc=%Rrc\n", rc));
4602 /* RIP updated by EMInterpretInstruction(). */
4603 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4604 }
4605 }
4606
4607 /* RIP has been updated by EMInterpret[Rd|Wr]msr(). */
4608 return rc;
4609}
4610
4611
4612/**
4613 * #VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional #VMEXIT.
4614 */
4615HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4616{
4617 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4618 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
4619
4620 /* We should -not- get this #VMEXIT if the guest's debug registers were active. */
4621 if (pSvmTransient->fWasGuestDebugStateActive)
4622 {
4623 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
4624 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
4625 return VERR_SVM_UNEXPECTED_EXIT;
4626 }
4627
4628 /*
4629 * Lazy DR0-3 loading.
4630 */
4631 if (!pSvmTransient->fWasHyperDebugStateActive)
4632 {
4633 Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
4634 Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
4635
4636 /* Don't intercept DRx read and writes. */
4637 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4638 pVmcb->ctrl.u16InterceptRdDRx = 0;
4639 pVmcb->ctrl.u16InterceptWrDRx = 0;
4640 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
4641
4642 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
4643 VMMRZCallRing3Disable(pVCpu);
4644 HM_DISABLE_PREEMPT_IF_NEEDED();
4645
4646 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
4647 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
4648 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
4649
4650 HM_RESTORE_PREEMPT_IF_NEEDED();
4651 VMMRZCallRing3Enable(pVCpu);
4652
4653 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
4654 return VINF_SUCCESS;
4655 }
4656
4657 /*
4658 * Interpret the read/writing of DRx.
4659 */
4660 /** @todo Decode assist. */
4661 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
4662 Log5(("hmR0SvmExitReadDRx: Emulated DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
4663 if (RT_LIKELY(rc == VINF_SUCCESS))
4664 {
4665 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
4666 /** @todo CPUM should set this flag! */
4667 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
4668 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4669 }
4670 else
4671 Assert(rc == VERR_EM_INTERPRETER);
4672 return VBOXSTRICTRC_TODO(rc);
4673}
4674
4675
4676/**
4677 * #VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional #VMEXIT.
4678 */
4679HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4680{
4681 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4682 /* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
4683 int rc = hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
4684 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
4685 STAM_COUNTER_DEC(&pVCpu->hm.s.StatExitDRxRead);
4686 return rc;
4687}
4688
4689
4690/**
4691 * #VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional #VMEXIT.
4692 */
4693HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4694{
4695 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4696
4697 /* I/O operation lookup arrays. */
4698 static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
4699 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
4700 the result (in AL/AX/EAX). */
4701 Log4(("hmR0SvmExitIOInstr: CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
4702
4703 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4704 PVM pVM = pVCpu->CTX_SUFF(pVM);
4705
4706 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
4707 SVMIOIOEXIT IoExitInfo;
4708 IoExitInfo.u = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
4709 uint32_t uIOWidth = (IoExitInfo.u >> 4) & 0x7;
4710 uint32_t cbValue = s_aIOSize[uIOWidth];
4711 uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
4712
4713 if (RT_UNLIKELY(!cbValue))
4714 {
4715 AssertMsgFailed(("hmR0SvmExitIOInstr: Invalid IO operation. uIOWidth=%u\n", uIOWidth));
4716 return VERR_EM_INTERPRETER;
4717 }
4718
4719 VBOXSTRICTRC rcStrict;
4720 if (IoExitInfo.n.u1STR)
4721 {
4722 /* INS/OUTS - I/O String instruction. */
4723 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
4724
4725 /** @todo Huh? why can't we use the segment prefix information given by AMD-V
4726 * in EXITINFO1? Investigate once this thing is up and running. */
4727
4728 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
4729 if (rcStrict == VINF_SUCCESS)
4730 {
4731 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
4732 {
4733 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
4734 (DISCPUMODE)pDis->uAddrMode, cbValue);
4735 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
4736 }
4737 else
4738 {
4739 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
4740 (DISCPUMODE)pDis->uAddrMode, cbValue);
4741 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
4742 }
4743 }
4744 else
4745 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
4746 }
4747 else
4748 {
4749 /* IN/OUT - I/O instruction. */
4750 Assert(!IoExitInfo.n.u1REP);
4751
4752 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
4753 {
4754 rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue);
4755 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
4756 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, cbValue);
4757
4758 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
4759 }
4760 else
4761 {
4762 uint32_t u32Val = 0;
4763
4764 rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue);
4765 if (IOM_SUCCESS(rcStrict))
4766 {
4767 /* Save result of I/O IN instr. in AL/AX/EAX. */
4768 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
4769 }
4770 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
4771 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, cbValue);
4772
4773 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
4774 }
4775 }
4776
4777 if (IOM_SUCCESS(rcStrict))
4778 {
4779 /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */
4780 pCtx->rip = pVmcb->ctrl.u64ExitInfo2;
4781
4782 /*
4783 * If any I/O breakpoints are armed, we need to check if one triggered
4784 * and take appropriate action.
4785 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
4786 */
4787 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
4788 * execution engines about whether hyper BPs and such are pending. */
4789 uint32_t const uDr7 = pCtx->dr[7];
4790 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
4791 && X86_DR7_ANY_RW_IO(uDr7)
4792 && (pCtx->cr4 & X86_CR4_DE))
4793 || DBGFBpIsHwIoArmed(pVM)))
4794 {
4795 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
4796 VMMRZCallRing3Disable(pVCpu);
4797 HM_DISABLE_PREEMPT_IF_NEEDED();
4798
4799 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
4800 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/);
4801
4802 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, IoExitInfo.n.u16Port, cbValue);
4803 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
4804 {
4805 /* Raise #DB. */
4806 pVmcb->guest.u64DR6 = pCtx->dr[6];
4807 pVmcb->guest.u64DR7 = pCtx->dr[7];
4808 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
4809 hmR0SvmSetPendingXcptDB(pVCpu);
4810 }
4811 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
4812 else if ( rcStrict2 != VINF_SUCCESS
4813 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
4814 rcStrict = rcStrict2;
4815
4816 HM_RESTORE_PREEMPT_IF_NEEDED();
4817 VMMRZCallRing3Enable(pVCpu);
4818 }
4819
4820 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
4821 }
4822
4823#ifdef VBOX_STRICT
4824 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
4825 Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ);
4826 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
4827 Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE);
4828 else
4829 {
4830 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
4831 * statuses, that the VMM device and some others may return. See
4832 * IOM_SUCCESS() for guidance. */
4833 AssertMsg( RT_FAILURE(rcStrict)
4834 || rcStrict == VINF_SUCCESS
4835 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
4836 || rcStrict == VINF_EM_DBG_BREAKPOINT
4837 || rcStrict == VINF_EM_RAW_GUEST_TRAP
4838 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4839 }
4840#endif
4841 return VBOXSTRICTRC_TODO(rcStrict);
4842}
4843
4844
4845/**
4846 * #VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional
4847 * #VMEXIT.
4848 */
4849HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4850{
4851 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4852 PVM pVM = pVCpu->CTX_SUFF(pVM);
4853 Assert(pVM->hm.s.fNestedPaging);
4854
4855 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
4856
4857 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
4858 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4859 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1;
4860 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
4861
4862 Log4(("#NPF at CS:RIP=%04x:%#RX64 faultaddr=%RGp errcode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr, u32ErrCode));
4863
4864#ifdef VBOX_HM_WITH_GUEST_PATCHING
4865 /* TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions. */
4866 if ( pVM->hm.s.fTprPatchingAllowed
4867 && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == 0x80 /* TPR offset. */
4868 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */
4869 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */
4870 && !CPUMIsGuestInLongModeEx(pCtx)
4871 && !CPUMGetGuestCPL(pVCpu)
4872 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
4873 {
4874 RTGCPHYS GCPhysApicBase = pCtx->msrApicBase;
4875 GCPhysApicBase &= PAGE_BASE_GC_MASK;
4876
4877 if (GCPhysFaultAddr == GCPhysApicBase + 0x80)
4878 {
4879 /* Only attempt to patch the instruction once. */
4880 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
4881 if (!pPatch)
4882 return VINF_EM_HM_PATCH_TPR_INSTR;
4883 }
4884 }
4885#endif
4886
4887 /*
4888 * Determine the nested paging mode.
4889 */
4890 PGMMODE enmNestedPagingMode;
4891#if HC_ARCH_BITS == 32
4892 if (CPUMIsGuestInLongModeEx(pCtx))
4893 enmNestedPagingMode = PGMMODE_AMD64_NX;
4894 else
4895#endif
4896 enmNestedPagingMode = PGMGetHostMode(pVM);
4897
4898 /*
4899 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
4900 */
4901 int rc;
4902 Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
4903 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
4904 {
4905 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr,
4906 u32ErrCode);
4907 rc = VBOXSTRICTRC_VAL(rc2);
4908
4909 /*
4910 * If we succeed, resume guest execution.
4911 * If we fail in interpreting the instruction because we couldn't get the guest physical address
4912 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
4913 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
4914 * weird case. See @bugref{6043}.
4915 */
4916 if ( rc == VINF_SUCCESS
4917 || rc == VERR_PAGE_TABLE_NOT_PRESENT
4918 || rc == VERR_PAGE_NOT_PRESENT)
4919 {
4920 /* Successfully handled MMIO operation. */
4921 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
4922 rc = VINF_SUCCESS;
4923 }
4924 return rc;
4925 }
4926
4927 TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode);
4928 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr);
4929 TRPMResetTrap(pVCpu);
4930
4931 Log4(("#NPF: PGMR0Trap0eHandlerNestedPaging returned %Rrc CS:RIP=%04x:%#RX64\n", rc, pCtx->cs.Sel, pCtx->rip));
4932
4933 /*
4934 * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
4935 */
4936 if ( rc == VINF_SUCCESS
4937 || rc == VERR_PAGE_TABLE_NOT_PRESENT
4938 || rc == VERR_PAGE_NOT_PRESENT)
4939 {
4940 /* We've successfully synced our shadow page tables. */
4941 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
4942 rc = VINF_SUCCESS;
4943 }
4944
4945 return rc;
4946}
4947
4948
4949/**
4950 * #VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional #VMEXIT.
4951 */
4952HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4953{
4954 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4955
4956 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4957 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 0; /* No virtual interrupts pending, we'll inject the current one/NMI before reentry. */
4958 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0;
4959
4960 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive interrupts/NMIs, it is now ready. */
4961 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_VINTR;
4962 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
4963
4964 /* Deliver the pending interrupt/NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
4965 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
4966 return VINF_SUCCESS;
4967}
4968
4969
4970/**
4971 * #VMEXIT handler for task switches (SVM_EXIT_TASK_SWITCH). Conditional #VMEXIT.
4972 */
4973HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4974{
4975 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4976
4977#ifndef HMSVM_ALWAYS_TRAP_TASK_SWITCH
4978 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
4979#endif
4980
4981 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
4982 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4983 if ( !(pVmcb->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP))
4984 && pVCpu->hm.s.Event.fPending) /** @todo fPending cannot be 'true', see hmR0SvmInjectPendingEvent(). See @bugref{7362}.*/
4985 {
4986 /*
4987 * AMD-V does not provide us with the original exception but we have it in u64IntInfo since we
4988 * injected the event during VM-entry.
4989 */
4990 Log4(("hmR0SvmExitTaskSwitch: TS occurred during event delivery.\n"));
4991 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
4992 return VINF_EM_RAW_INJECT_TRPM_EVENT;
4993 }
4994
4995 /** @todo Emulate task switch someday, currently just going back to ring-3 for
4996 * emulation. */
4997 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
4998 return VERR_EM_INTERPRETER;
4999}
5000
5001
5002/**
5003 * #VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional #VMEXIT.
5004 */
5005HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5006{
5007 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5008
5009 /* First check if this is a patched VMMCALL for mov TPR */
5010 int rc = hmR0SvmEmulateMovTpr(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
5011 if (rc == VINF_SUCCESS)
5012 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
5013 else if (rc == VERR_NOT_FOUND)
5014 {
5015 /* Handle GIM provider hypercalls. */
5016 rc = VERR_NOT_SUPPORTED;
5017 if (GIMAreHypercallsEnabled(pVCpu))
5018 rc = GIMHypercall(pVCpu, pCtx);
5019 }
5020
5021 if (rc != VINF_SUCCESS)
5022 hmR0SvmSetPendingXcptUD(pVCpu);
5023 return VINF_SUCCESS;
5024}
5025
5026
5027/**
5028 * #VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional #VMEXIT.
5029 */
5030HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5031{
5032 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5033
5034 /* Clear NMI blocking. */
5035 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5036
5037 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
5038 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
5039 hmR0SvmClearIretIntercept(pVmcb);
5040
5041 /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
5042 return VINF_SUCCESS;
5043}
5044
5045
5046/**
5047 * #VMEXIT handler for page-fault exceptions (SVM_EXIT_EXCEPTION_E). Conditional
5048 * #VMEXIT.
5049 */
5050HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5051{
5052 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5053
5054 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
5055
5056 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
5057 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
5058 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1;
5059 RTGCUINTPTR uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
5060 PVM pVM = pVCpu->CTX_SUFF(pVM);
5061
5062#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(HMSVM_ALWAYS_TRAP_PF)
5063 if (pVM->hm.s.fNestedPaging)
5064 {
5065 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
5066 if (!pSvmTransient->fVectoringDoublePF)
5067 {
5068 /* A genuine guest #PF, reflect it to the guest. */
5069 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
5070 Log4(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RGv ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip,
5071 uFaultAddress, u32ErrCode));
5072 }
5073 else
5074 {
5075 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
5076 hmR0SvmSetPendingXcptDF(pVCpu);
5077 Log4(("Pending #DF due to vectoring #PF. NP\n"));
5078 }
5079 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
5080 return VINF_SUCCESS;
5081 }
5082#endif
5083
5084 Assert(!pVM->hm.s.fNestedPaging);
5085
5086#ifdef VBOX_HM_WITH_GUEST_PATCHING
5087 /* Shortcut for APIC TPR reads and writes; only applicable to 32-bit guests. */
5088 if ( pVM->hm.s.fTprPatchingAllowed
5089 && (uFaultAddress & 0xfff) == 0x80 /* TPR offset. */
5090 && !(u32ErrCode & X86_TRAP_PF_P) /* Not present. */
5091 && !CPUMIsGuestInLongModeEx(pCtx)
5092 && !CPUMGetGuestCPL(pVCpu)
5093 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
5094 {
5095 RTGCPHYS GCPhysApicBase;
5096 GCPhysApicBase = pCtx->msrApicBase;
5097 GCPhysApicBase &= PAGE_BASE_GC_MASK;
5098
5099 /* Check if the page at the fault-address is the APIC base. */
5100 RTGCPHYS GCPhysPage;
5101 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);
5102 if ( rc2 == VINF_SUCCESS
5103 && GCPhysPage == GCPhysApicBase)
5104 {
5105 /* Only attempt to patch the instruction once. */
5106 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
5107 if (!pPatch)
5108 return VINF_EM_HM_PATCH_TPR_INSTR;
5109 }
5110 }
5111#endif
5112
5113 Log4(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 u32ErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
5114 pCtx->rip, u32ErrCode, pCtx->cr3));
5115
5116 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
5117 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{}. */
5118 if (pSvmTransient->fVectoringPF)
5119 {
5120 Assert(pVCpu->hm.s.Event.fPending);
5121 return VINF_EM_RAW_INJECT_TRPM_EVENT;
5122 }
5123
5124 TRPMAssertXcptPF(pVCpu, uFaultAddress, u32ErrCode);
5125 int rc = PGMTrap0eHandler(pVCpu, u32ErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
5126
5127 Log4(("#PF rc=%Rrc\n", rc));
5128
5129 if (rc == VINF_SUCCESS)
5130 {
5131 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
5132 TRPMResetTrap(pVCpu);
5133 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
5134 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
5135 return rc;
5136 }
5137 else if (rc == VINF_EM_RAW_GUEST_TRAP)
5138 {
5139 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
5140
5141 if (!pSvmTransient->fVectoringDoublePF)
5142 {
5143 /* It's a guest page fault and needs to be reflected to the guest. */
5144 u32ErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */
5145 TRPMResetTrap(pVCpu);
5146 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
5147 }
5148 else
5149 {
5150 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
5151 TRPMResetTrap(pVCpu);
5152 hmR0SvmSetPendingXcptDF(pVCpu);
5153 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
5154 }
5155
5156 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
5157 return VINF_SUCCESS;
5158 }
5159
5160 TRPMResetTrap(pVCpu);
5161 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
5162 return rc;
5163}
5164
5165
5166/**
5167 * #VMEXIT handler for device-not-available exceptions (SVM_EXIT_EXCEPTION_7).
5168 * Conditional #VMEXIT.
5169 */
5170HMSVM_EXIT_DECL hmR0SvmExitXcptNM(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5171{
5172 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5173
5174 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
5175
5176 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
5177 VMMRZCallRing3Disable(pVCpu);
5178 HM_DISABLE_PREEMPT_IF_NEEDED();
5179
5180 int rc;
5181 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
5182 if (pSvmTransient->fWasGuestFPUStateActive)
5183 {
5184 rc = VINF_EM_RAW_GUEST_TRAP;
5185 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
5186 }
5187 else
5188 {
5189#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
5190 Assert(!pSvmTransient->fWasGuestFPUStateActive);
5191#endif
5192 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
5193 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
5194 }
5195
5196 HM_RESTORE_PREEMPT_IF_NEEDED();
5197 VMMRZCallRing3Enable(pVCpu);
5198
5199 if (rc == VINF_SUCCESS)
5200 {
5201 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
5202 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
5203 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
5204 pVCpu->hm.s.fUseGuestFpu = true;
5205 }
5206 else
5207 {
5208 /* Forward #NM to the guest. */
5209 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
5210 hmR0SvmSetPendingXcptNM(pVCpu);
5211 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
5212 }
5213 return VINF_SUCCESS;
5214}
5215
5216
5217/**
5218 * #VMEXIT handler for math-fault exceptions (SVM_EXIT_EXCEPTION_10).
5219 * Conditional #VMEXIT.
5220 */
5221HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5222{
5223 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5224
5225 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
5226
5227 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
5228
5229 if (!(pCtx->cr0 & X86_CR0_NE))
5230 {
5231 PVM pVM = pVCpu->CTX_SUFF(pVM);
5232 PDISSTATE pDis = &pVCpu->hm.s.DisState;
5233 unsigned cbOp;
5234 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
5235 if (RT_SUCCESS(rc))
5236 {
5237 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
5238 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
5239 if (RT_SUCCESS(rc))
5240 pCtx->rip += cbOp;
5241 }
5242 else
5243 Log4(("hmR0SvmExitXcptMF: EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
5244 return rc;
5245 }
5246
5247 hmR0SvmSetPendingXcptMF(pVCpu);
5248 return VINF_SUCCESS;
5249}
5250
5251
5252/**
5253 * #VMEXIT handler for debug exceptions (SVM_EXIT_EXCEPTION_1). Conditional
5254 * #VMEXIT.
5255 */
5256HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5257{
5258 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
5259
5260 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
5261
5262 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
5263
5264 /* This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data breakpoint). However, for both cases
5265 DR6 and DR7 are updated to what the exception handler expects. See AMD spec. 15.12.2 "#DB (Debug)". */
5266 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
5267 PVM pVM = pVCpu->CTX_SUFF(pVM);
5268 int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
5269 if (rc == VINF_EM_RAW_GUEST_TRAP)
5270 {
5271 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
5272 if (CPUMIsHyperDebugStateActive(pVCpu))
5273 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
5274
5275 /* Reflect the exception back to the guest. */
5276 hmR0SvmSetPendingXcptDB(pVCpu);
5277 rc = VINF_SUCCESS;
5278 }
5279
5280 /*
5281 * Update DR6.
5282 */
5283 if (CPUMIsHyperDebugStateActive(pVCpu))
5284 {
5285 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
5286 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
5287 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
5288 }
5289 else
5290 {
5291 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
5292 Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu));
5293 }
5294
5295 return rc;
5296}
5297
5298/** @} */
5299
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette