VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp@ 71214

Last change on this file since 71214 was 71188, checked in by vboxsync, 7 years ago

VMM/HMSVMR0: Nested hw.virt: Logging.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 305.5 KB
Line 
1/* $Id: HMSVMR0.cpp 71188 2018-03-05 05:30:48Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2013-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/asm-amd64-x86.h>
25#include <iprt/thread.h>
26
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/dbgf.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/gim.h>
33#include <VBox/vmm/apic.h>
34#include "HMInternal.h"
35#include <VBox/vmm/vm.h>
36#include "HMSVMR0.h"
37#include "dtrace/VBoxVMM.h"
38
39#define HMSVM_USE_IEM_EVENT_REFLECTION
40#ifdef DEBUG_ramshankar
41# define HMSVM_SYNC_FULL_GUEST_STATE
42# define HMSVM_SYNC_FULL_NESTED_GUEST_STATE
43# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
44# define HMSVM_ALWAYS_TRAP_PF
45# define HMSVM_ALWAYS_TRAP_TASK_SWITCH
46#endif
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52#ifdef VBOX_WITH_STATISTICS
53# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
54 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \
55 if ((u64ExitCode) == SVM_EXIT_NPF) \
56 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
57 else \
58 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
59 } while (0)
60#else
61# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
62#endif
63
64/** If we decide to use a function table approach this can be useful to
65 * switch to a "static DECLCALLBACK(int)". */
66#define HMSVM_EXIT_DECL static int
67
68/** Macro for checking and returning from the using function for
69 * \#VMEXIT intercepts that maybe caused during delivering of another
70 * event in the guest. */
71#ifdef VBOX_WITH_NESTED_HWVIRT
72# define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY() \
73 do \
74 { \
75 int rc = hmR0SvmCheckExitDueToEventDelivery(pVCpu, pCtx, pSvmTransient); \
76 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
77 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
78 else if ( rc == VINF_EM_RESET \
79 && HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \
80 return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_SHUTDOWN, 0, 0)); \
81 else \
82 return rc; \
83 } while (0)
84#else
85# define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY() \
86 do \
87 { \
88 int rc = hmR0SvmCheckExitDueToEventDelivery(pVCpu, pCtx, pSvmTransient); \
89 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
90 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
91 else \
92 return rc; \
93 } while (0)
94#endif
95
96/**
97 * Updates interrupt shadow for the current RIP.
98 */
99#define HMSVM_UPDATE_INTR_SHADOW(pVCpu, pCtx) \
100 do { \
101 /* Update interrupt shadow. */ \
102 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) \
103 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) \
104 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); \
105 } while (0)
106
107/** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an
108 * instruction that exited. */
109#define HMSVM_CHECK_SINGLE_STEP(a_pVCpu, a_rc) \
110 do { \
111 if ((a_pVCpu)->hm.s.fSingleInstruction && (a_rc) == VINF_SUCCESS) \
112 (a_rc) = VINF_EM_DBG_STEPPED; \
113 } while (0)
114
115/** Assert that preemption is disabled or covered by thread-context hooks. */
116#define HMSVM_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
117 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
118
119/** Assert that we haven't migrated CPUs when thread-context hooks are not
120 * used. */
121#define HMSVM_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
122 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
123 ("Illegal migration! Entered on CPU %u Current %u\n", \
124 pVCpu->hm.s.idEnteredCpu, RTMpCpuId()));
125
126/** Assert that we're not executing a nested-guest. */
127#ifdef VBOX_WITH_NESTED_HWVIRT
128# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
129#else
130# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
131#endif
132
133/** Assert that we're executing a nested-guest. */
134#ifdef VBOX_WITH_NESTED_HWVIRT
135# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
136#else
137# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
138#endif
139
140/** Validate segment descriptor granularity bit. */
141#ifdef VBOX_STRICT
142# define HMSVM_ASSERT_SEG_GRANULARITY(reg) \
143 AssertMsg( !pMixedCtx->reg.Attr.n.u1Present \
144 || ( pMixedCtx->reg.Attr.n.u1Granularity \
145 ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \
146 : pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \
147 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", pMixedCtx->reg.u32Limit, \
148 pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base))
149#else
150# define HMSVM_ASSERT_SEG_GRANULARITY(reg) do { } while (0)
151#endif
152
153/**
154 * Exception bitmap mask for all contributory exceptions.
155 *
156 * Page fault is deliberately excluded here as it's conditional as to whether
157 * it's contributory or benign. Page faults are handled separately.
158 */
159#define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
160 | RT_BIT(X86_XCPT_DE))
161
162/**
163 * Mandatory/unconditional guest control intercepts.
164 *
165 * SMIs can and do happen in normal operation. We need not intercept them
166 * while executing the guest or nested-guest.
167 */
168#define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS ( SVM_CTRL_INTERCEPT_INTR \
169 | SVM_CTRL_INTERCEPT_NMI \
170 | SVM_CTRL_INTERCEPT_INIT \
171 | SVM_CTRL_INTERCEPT_RDPMC \
172 | SVM_CTRL_INTERCEPT_CPUID \
173 | SVM_CTRL_INTERCEPT_RSM \
174 | SVM_CTRL_INTERCEPT_HLT \
175 | SVM_CTRL_INTERCEPT_IOIO_PROT \
176 | SVM_CTRL_INTERCEPT_MSR_PROT \
177 | SVM_CTRL_INTERCEPT_INVLPGA \
178 | SVM_CTRL_INTERCEPT_SHUTDOWN \
179 | SVM_CTRL_INTERCEPT_FERR_FREEZE \
180 | SVM_CTRL_INTERCEPT_VMRUN \
181 | SVM_CTRL_INTERCEPT_SKINIT \
182 | SVM_CTRL_INTERCEPT_WBINVD \
183 | SVM_CTRL_INTERCEPT_MONITOR \
184 | SVM_CTRL_INTERCEPT_MWAIT \
185 | SVM_CTRL_INTERCEPT_XSETBV)
186
187/** @name VMCB Clean Bits.
188 *
189 * These flags are used for VMCB-state caching. A set VMCB Clean bit indicates
190 * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
191 * memory.
192 *
193 * @{ */
194/** All intercepts vectors, TSC offset, PAUSE filter counter. */
195#define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
196/** I/O permission bitmap, MSR permission bitmap. */
197#define HMSVM_VMCB_CLEAN_IOPM_MSRPM RT_BIT(1)
198/** ASID. */
199#define HMSVM_VMCB_CLEAN_ASID RT_BIT(2)
200/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
201V_INTR_VECTOR. */
202#define HMSVM_VMCB_CLEAN_TPR RT_BIT(3)
203/** Nested Paging: Nested CR3 (nCR3), PAT. */
204#define HMSVM_VMCB_CLEAN_NP RT_BIT(4)
205/** Control registers (CR0, CR3, CR4, EFER). */
206#define HMSVM_VMCB_CLEAN_CRX_EFER RT_BIT(5)
207/** Debug registers (DR6, DR7). */
208#define HMSVM_VMCB_CLEAN_DRX RT_BIT(6)
209/** GDT, IDT limit and base. */
210#define HMSVM_VMCB_CLEAN_DT RT_BIT(7)
211/** Segment register: CS, SS, DS, ES limit and base. */
212#define HMSVM_VMCB_CLEAN_SEG RT_BIT(8)
213/** CR2.*/
214#define HMSVM_VMCB_CLEAN_CR2 RT_BIT(9)
215/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
216#define HMSVM_VMCB_CLEAN_LBR RT_BIT(10)
217/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
218PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
219#define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11)
220/** Mask of all valid VMCB Clean bits. */
221#define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \
222 | HMSVM_VMCB_CLEAN_IOPM_MSRPM \
223 | HMSVM_VMCB_CLEAN_ASID \
224 | HMSVM_VMCB_CLEAN_TPR \
225 | HMSVM_VMCB_CLEAN_NP \
226 | HMSVM_VMCB_CLEAN_CRX_EFER \
227 | HMSVM_VMCB_CLEAN_DRX \
228 | HMSVM_VMCB_CLEAN_DT \
229 | HMSVM_VMCB_CLEAN_SEG \
230 | HMSVM_VMCB_CLEAN_CR2 \
231 | HMSVM_VMCB_CLEAN_LBR \
232 | HMSVM_VMCB_CLEAN_AVIC)
233/** @} */
234
235/** @name SVM transient.
236 *
237 * A state structure for holding miscellaneous information across AMD-V
238 * VMRUN/\#VMEXIT operation, restored after the transition.
239 *
240 * @{ */
241typedef struct SVMTRANSIENT
242{
243 /** The host's rflags/eflags. */
244 RTCCUINTREG fEFlags;
245#if HC_ARCH_BITS == 32
246 uint32_t u32Alignment0;
247#endif
248
249 /** The \#VMEXIT exit code (the EXITCODE field in the VMCB). */
250 uint64_t u64ExitCode;
251 /** The guest's TPR value used for TPR shadowing. */
252 uint8_t u8GuestTpr;
253 /** Alignment. */
254 uint8_t abAlignment0[7];
255
256 /** Whether the guest FPU state was active at the time of \#VMEXIT. */
257 bool fWasGuestFPUStateActive;
258 /** Whether the guest debug state was active at the time of \#VMEXIT. */
259 bool fWasGuestDebugStateActive;
260 /** Whether the hyper debug state was active at the time of \#VMEXIT. */
261 bool fWasHyperDebugStateActive;
262 /** Whether the TSC offset mode needs to be updated. */
263 bool fUpdateTscOffsetting;
264 /** Whether the TSC_AUX MSR needs restoring on \#VMEXIT. */
265 bool fRestoreTscAuxMsr;
266 /** Whether the \#VMEXIT was caused by a page-fault during delivery of a
267 * contributary exception or a page-fault. */
268 bool fVectoringDoublePF;
269 /** Whether the \#VMEXIT was caused by a page-fault during delivery of an
270 * external interrupt or NMI. */
271 bool fVectoringPF;
272} SVMTRANSIENT, *PSVMTRANSIENT;
273AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t));
274AssertCompileMemberAlignment(SVMTRANSIENT, fWasGuestFPUStateActive, sizeof(uint64_t));
275/** @} */
276
277/**
278 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
279 */
280typedef enum SVMMSREXITREAD
281{
282 /** Reading this MSR causes a \#VMEXIT. */
283 SVMMSREXIT_INTERCEPT_READ = 0xb,
284 /** Reading this MSR does not cause a \#VMEXIT. */
285 SVMMSREXIT_PASSTHRU_READ
286} SVMMSREXITREAD;
287
288/**
289 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
290 */
291typedef enum SVMMSREXITWRITE
292{
293 /** Writing to this MSR causes a \#VMEXIT. */
294 SVMMSREXIT_INTERCEPT_WRITE = 0xd,
295 /** Writing to this MSR does not cause a \#VMEXIT. */
296 SVMMSREXIT_PASSTHRU_WRITE
297} SVMMSREXITWRITE;
298
299/**
300 * SVM \#VMEXIT handler.
301 *
302 * @returns VBox status code.
303 * @param pVCpu The cross context virtual CPU structure.
304 * @param pMixedCtx Pointer to the guest-CPU context.
305 * @param pSvmTransient Pointer to the SVM-transient structure.
306 */
307typedef int FNSVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
308
309
310/*********************************************************************************************************************************
311* Internal Functions *
312*********************************************************************************************************************************/
313static void hmR0SvmSetMsrPermission(PSVMVMCB pVmcb, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,
314 SVMMSREXITWRITE enmWrite);
315static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
316static void hmR0SvmLeave(PVMCPU pVCpu);
317
318/** @name \#VMEXIT handlers.
319 * @{
320 */
321static FNSVMEXITHANDLER hmR0SvmExitIntr;
322static FNSVMEXITHANDLER hmR0SvmExitWbinvd;
323static FNSVMEXITHANDLER hmR0SvmExitInvd;
324static FNSVMEXITHANDLER hmR0SvmExitCpuid;
325static FNSVMEXITHANDLER hmR0SvmExitRdtsc;
326static FNSVMEXITHANDLER hmR0SvmExitRdtscp;
327static FNSVMEXITHANDLER hmR0SvmExitRdpmc;
328static FNSVMEXITHANDLER hmR0SvmExitInvlpg;
329static FNSVMEXITHANDLER hmR0SvmExitHlt;
330static FNSVMEXITHANDLER hmR0SvmExitMonitor;
331static FNSVMEXITHANDLER hmR0SvmExitMwait;
332static FNSVMEXITHANDLER hmR0SvmExitShutdown;
333static FNSVMEXITHANDLER hmR0SvmExitUnexpected;
334static FNSVMEXITHANDLER hmR0SvmExitReadCRx;
335static FNSVMEXITHANDLER hmR0SvmExitWriteCRx;
336static FNSVMEXITHANDLER hmR0SvmExitMsr;
337static FNSVMEXITHANDLER hmR0SvmExitReadDRx;
338static FNSVMEXITHANDLER hmR0SvmExitWriteDRx;
339static FNSVMEXITHANDLER hmR0SvmExitXsetbv;
340static FNSVMEXITHANDLER hmR0SvmExitIOInstr;
341static FNSVMEXITHANDLER hmR0SvmExitNestedPF;
342static FNSVMEXITHANDLER hmR0SvmExitVIntr;
343static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch;
344static FNSVMEXITHANDLER hmR0SvmExitVmmCall;
345static FNSVMEXITHANDLER hmR0SvmExitPause;
346static FNSVMEXITHANDLER hmR0SvmExitIret;
347static FNSVMEXITHANDLER hmR0SvmExitXcptPF;
348static FNSVMEXITHANDLER hmR0SvmExitXcptNM;
349static FNSVMEXITHANDLER hmR0SvmExitXcptUD;
350static FNSVMEXITHANDLER hmR0SvmExitXcptMF;
351static FNSVMEXITHANDLER hmR0SvmExitXcptDB;
352static FNSVMEXITHANDLER hmR0SvmExitXcptAC;
353static FNSVMEXITHANDLER hmR0SvmExitXcptBP;
354#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT)
355static FNSVMEXITHANDLER hmR0SvmExitXcptGeneric;
356#endif
357#ifdef VBOX_WITH_NESTED_HWVIRT
358static FNSVMEXITHANDLER hmR0SvmExitXcptPFNested;
359static FNSVMEXITHANDLER hmR0SvmExitClgi;
360static FNSVMEXITHANDLER hmR0SvmExitStgi;
361static FNSVMEXITHANDLER hmR0SvmExitVmload;
362static FNSVMEXITHANDLER hmR0SvmExitVmsave;
363static FNSVMEXITHANDLER hmR0SvmExitInvlpga;
364static FNSVMEXITHANDLER hmR0SvmExitVmrun;
365static FNSVMEXITHANDLER hmR0SvmNestedExitXcptDB;
366static FNSVMEXITHANDLER hmR0SvmNestedExitXcptBP;
367#endif
368/** @} */
369
370static int hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
371#ifdef VBOX_WITH_NESTED_HWVIRT
372static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
373#endif
374
375
376/*********************************************************************************************************************************
377* Global Variables *
378*********************************************************************************************************************************/
379/** Ring-0 memory object for the IO bitmap. */
380RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
381/** Physical address of the IO bitmap. */
382RTHCPHYS g_HCPhysIOBitmap = 0;
383/** Pointer to the IO bitmap. */
384R0PTRTYPE(void *) g_pvIOBitmap = NULL;
385
386#ifdef VBOX_WITH_NESTED_HWVIRT
387/** Ring-0 memory object for the nested-guest MSRPM bitmap. */
388RTR0MEMOBJ g_hMemObjNstGstMsrBitmap = NIL_RTR0MEMOBJ;
389/** Physical address of the nested-guest MSRPM bitmap. */
390RTHCPHYS g_HCPhysNstGstMsrBitmap = 0;
391/** Pointer to the nested-guest MSRPM bitmap. */
392R0PTRTYPE(void *) g_pvNstGstMsrBitmap = NULL;
393#endif
394
395
396#ifdef VBOX_STRICT
397# define HMSVM_LOG_CS RT_BIT_32(0)
398# define HMSVM_LOG_SS RT_BIT_32(1)
399# define HMSVM_LOG_FS RT_BIT_32(2)
400# define HMSVM_LOG_GS RT_BIT_32(3)
401# define HMSVM_LOG_LBR RT_BIT_32(4)
402# define HMSVM_LOG_ALL ( HMSVM_LOG_CS \
403 | HMSVM_LOG_SS \
404 | HMSVM_LOG_FS \
405 | HMSVM_LOG_GS \
406 | HMSVM_LOG_LBR)
407
408/**
409 * Dumps CPU state and additional info. to the logger for diagnostics.
410 *
411 * @param pVCpu The cross context virtual CPU structure.
412 * @param pVmcb Pointer to the VM control block.
413 * @param pCtx Pointer to the guest-CPU context.
414 * @param pszPrefix Log prefix.
415 * @param fFlags Log flags, see HMSVM_LOG_XXX.
416 * @param uVerbose The verbosity level, currently unused.
417 */
418static void hmR0SvmLogState(PVMCPU pVCpu, PCSVMVMCB pVmcb, PCPUMCTX pCtx, const char *pszPrefix, uint32_t fFlags,
419 uint8_t uVerbose)
420{
421 RT_NOREF2(pVCpu, uVerbose);
422
423 Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", pszPrefix, pCtx->cs.Sel, pCtx->rip,
424 pCtx->rflags.u, pCtx->cr0, pCtx->cr3, pCtx->cr4));
425 Log4(("%s: rsp=%#RX64 rbp=%#RX64 rdi=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp, pCtx->rdi));
426 if (fFlags & HMSVM_LOG_CS)
427 {
428 Log4(("%s: cs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base,
429 pCtx->cs.u32Limit, pCtx->cs.Attr.u));
430 }
431 if (fFlags & HMSVM_LOG_SS)
432 {
433 Log4(("%s: ss={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base,
434 pCtx->ss.u32Limit, pCtx->ss.Attr.u));
435 }
436 if (fFlags & HMSVM_LOG_FS)
437 {
438 Log4(("%s: fs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base,
439 pCtx->fs.u32Limit, pCtx->fs.Attr.u));
440 }
441 if (fFlags & HMSVM_LOG_GS)
442 {
443 Log4(("%s: gs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base,
444 pCtx->gs.u32Limit, pCtx->gs.Attr.u));
445 }
446
447 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
448 if (fFlags & HMSVM_LOG_LBR)
449 {
450 Log4(("%s: br_from=%#RX64 br_to=%#RX64 lastxcpt_from=%#RX64 lastxcpt_to=%#RX64\n", pszPrefix, pVmcbGuest->u64BR_FROM,
451 pVmcbGuest->u64BR_TO, pVmcbGuest->u64LASTEXCPFROM, pVmcbGuest->u64LASTEXCPTO));
452 }
453 NOREF(pVmcbGuest);
454}
455#endif
456
457
458/**
459 * Sets up and activates AMD-V on the current CPU.
460 *
461 * @returns VBox status code.
462 * @param pCpu Pointer to the CPU info struct.
463 * @param pVM The cross context VM structure. Can be
464 * NULL after a resume!
465 * @param pvCpuPage Pointer to the global CPU page.
466 * @param HCPhysCpuPage Physical address of the global CPU page.
467 * @param fEnabledByHost Whether the host OS has already initialized AMD-V.
468 * @param pvArg Unused on AMD-V.
469 */
470VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
471 void *pvArg)
472{
473 Assert(!fEnabledByHost);
474 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
475 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
476 Assert(pvCpuPage); NOREF(pvCpuPage);
477 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
478
479 NOREF(pvArg);
480 NOREF(fEnabledByHost);
481
482 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */
483 RTCCUINTREG fEFlags = ASMIntDisableFlags();
484
485 /*
486 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
487 */
488 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
489 if (u64HostEfer & MSR_K6_EFER_SVME)
490 {
491 /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
492 if ( pVM
493 && pVM->hm.s.svm.fIgnoreInUseError)
494 {
495 pCpu->fIgnoreAMDVInUseError = true;
496 }
497
498 if (!pCpu->fIgnoreAMDVInUseError)
499 {
500 ASMSetFlags(fEFlags);
501 return VERR_SVM_IN_USE;
502 }
503 }
504
505 /* Turn on AMD-V in the EFER MSR. */
506 ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
507
508 /* Write the physical page address where the CPU will store the host state while executing the VM. */
509 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
510
511 /* Restore interrupts. */
512 ASMSetFlags(fEFlags);
513
514 /*
515 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
516 * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
517 * upon VMRUN). Therefore, flag that we need to flush the TLB entirely with before executing any
518 * guest code.
519 */
520 pCpu->fFlushAsidBeforeUse = true;
521
522 /*
523 * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}.
524 */
525 ++pCpu->cTlbFlushes;
526
527 return VINF_SUCCESS;
528}
529
530
531/**
532 * Deactivates AMD-V on the current CPU.
533 *
534 * @returns VBox status code.
535 * @param pCpu Pointer to the CPU info struct.
536 * @param pvCpuPage Pointer to the global CPU page.
537 * @param HCPhysCpuPage Physical address of the global CPU page.
538 */
539VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
540{
541 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
542 AssertReturn( HCPhysCpuPage
543 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
544 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
545 NOREF(pCpu);
546
547 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
548 RTCCUINTREG fEFlags = ASMIntDisableFlags();
549
550 /* Turn off AMD-V in the EFER MSR. */
551 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
552 ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
553
554 /* Invalidate host state physical address. */
555 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
556
557 /* Restore interrupts. */
558 ASMSetFlags(fEFlags);
559
560 return VINF_SUCCESS;
561}
562
563
564/**
565 * Does global AMD-V initialization (called during module initialization).
566 *
567 * @returns VBox status code.
568 */
569VMMR0DECL(int) SVMR0GlobalInit(void)
570{
571 /*
572 * Allocate 12 KB for the IO bitmap. Since this is non-optional and we always intercept all IO accesses, it's done
573 * once globally here instead of per-VM.
574 */
575 Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ);
576 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */);
577 if (RT_FAILURE(rc))
578 return rc;
579
580 g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
581 g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
582
583 /* Set all bits to intercept all IO accesses. */
584 ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
585
586#ifdef VBOX_WITH_NESTED_HWVIRT
587 /*
588 * Allocate 8 KB for the MSR permission bitmap for the nested-guest.
589 */
590 Assert(g_hMemObjNstGstMsrBitmap == NIL_RTR0MEMOBJ);
591 rc = RTR0MemObjAllocCont(&g_hMemObjNstGstMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */);
592 if (RT_FAILURE(rc))
593 return rc;
594
595 g_pvNstGstMsrBitmap = RTR0MemObjAddress(g_hMemObjNstGstMsrBitmap);
596 g_HCPhysNstGstMsrBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjNstGstMsrBitmap, 0 /* iPage */);
597
598 /* Set all bits to intercept all MSR accesses. */
599 ASMMemFill32(g_pvNstGstMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
600#endif
601
602 return VINF_SUCCESS;
603}
604
605
606/**
607 * Does global AMD-V termination (called during module termination).
608 */
609VMMR0DECL(void) SVMR0GlobalTerm(void)
610{
611 if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
612 {
613 RTR0MemObjFree(g_hMemObjIOBitmap, true /* fFreeMappings */);
614 g_pvIOBitmap = NULL;
615 g_HCPhysIOBitmap = 0;
616 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
617 }
618
619#ifdef VBOX_WITH_NESTED_HWVIRT
620 if (g_hMemObjNstGstMsrBitmap != NIL_RTR0MEMOBJ)
621 {
622 RTR0MemObjFree(g_hMemObjNstGstMsrBitmap, true /* fFreeMappings */);
623 g_pvNstGstMsrBitmap = NULL;
624 g_HCPhysNstGstMsrBitmap = 0;
625 g_hMemObjNstGstMsrBitmap = NIL_RTR0MEMOBJ;
626 }
627#endif
628}
629
630
631/**
632 * Frees any allocated per-VCPU structures for a VM.
633 *
634 * @param pVM The cross context VM structure.
635 */
636DECLINLINE(void) hmR0SvmFreeStructs(PVM pVM)
637{
638 for (uint32_t i = 0; i < pVM->cCpus; i++)
639 {
640 PVMCPU pVCpu = &pVM->aCpus[i];
641 AssertPtr(pVCpu);
642
643 if (pVCpu->hm.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
644 {
645 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false);
646 pVCpu->hm.s.svm.HCPhysVmcbHost = 0;
647 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
648 }
649
650 if (pVCpu->hm.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ)
651 {
652 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false);
653 pVCpu->hm.s.svm.pVmcb = NULL;
654 pVCpu->hm.s.svm.HCPhysVmcb = 0;
655 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
656 }
657
658 if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
659 {
660 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
661 pVCpu->hm.s.svm.pvMsrBitmap = NULL;
662 pVCpu->hm.s.svm.HCPhysMsrBitmap = 0;
663 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
664 }
665 }
666}
667
668
669/**
670 * Does per-VM AMD-V initialization.
671 *
672 * @returns VBox status code.
673 * @param pVM The cross context VM structure.
674 */
675VMMR0DECL(int) SVMR0InitVM(PVM pVM)
676{
677 int rc = VERR_INTERNAL_ERROR_5;
678
679 /*
680 * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
681 */
682 uint32_t u32Family;
683 uint32_t u32Model;
684 uint32_t u32Stepping;
685 if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
686 {
687 Log4(("SVMR0InitVM: AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
688 pVM->hm.s.svm.fAlwaysFlushTLB = true;
689 }
690
691 /*
692 * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
693 */
694 for (VMCPUID i = 0; i < pVM->cCpus; i++)
695 {
696 PVMCPU pVCpu = &pVM->aCpus[i];
697 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
698 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
699 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
700 }
701
702 for (VMCPUID i = 0; i < pVM->cCpus; i++)
703 {
704 PVMCPU pVCpu = &pVM->aCpus[i];
705
706 /*
707 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
708 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
709 */
710 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, SVM_VMCB_PAGES << PAGE_SHIFT, false /* fExecutable */);
711 if (RT_FAILURE(rc))
712 goto failure_cleanup;
713
714 void *pvVmcbHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost);
715 pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 /* iPage */);
716 Assert(pVCpu->hm.s.svm.HCPhysVmcbHost < _4G);
717 ASMMemZeroPage(pvVmcbHost);
718
719 /*
720 * Allocate one page for the guest-state VMCB.
721 */
722 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, SVM_VMCB_PAGES << PAGE_SHIFT, false /* fExecutable */);
723 if (RT_FAILURE(rc))
724 goto failure_cleanup;
725
726 pVCpu->hm.s.svm.pVmcb = (PSVMVMCB)RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);
727 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */);
728 Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G);
729 ASMMemZeroPage(pVCpu->hm.s.svm.pVmcb);
730
731 /*
732 * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
733 * SVM to not require one.
734 */
735 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT,
736 false /* fExecutable */);
737 if (RT_FAILURE(rc))
738 goto failure_cleanup;
739
740 pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
741 pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
742 /* Set all bits to intercept all MSR accesses (changed later on). */
743 ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
744 }
745
746 return VINF_SUCCESS;
747
748failure_cleanup:
749 hmR0SvmFreeStructs(pVM);
750 return rc;
751}
752
753
754/**
755 * Does per-VM AMD-V termination.
756 *
757 * @returns VBox status code.
758 * @param pVM The cross context VM structure.
759 */
760VMMR0DECL(int) SVMR0TermVM(PVM pVM)
761{
762 hmR0SvmFreeStructs(pVM);
763 return VINF_SUCCESS;
764}
765
766
767/**
768 * Returns whether the VMCB Clean Bits feature is supported.
769 *
770 * @return @c true if supported, @c false otherwise.
771 * @param pVCpu The cross context virtual CPU structure.
772 * @param pCtx Pointer to the guest-CPU context.
773 */
774DECLINLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPU pVCpu, PCPUMCTX pCtx)
775{
776 PVM pVM = pVCpu->CTX_SUFF(pVM);
777#ifdef VBOX_WITH_NESTED_HWVIRT
778 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
779 {
780 return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN)
781 && pVM->cpum.ro.GuestFeatures.fSvmVmcbClean;
782 }
783#else
784 RT_NOREF(pCtx);
785#endif
786 return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN);
787}
788
789
790/**
791 * Returns whether the decode assists feature is supported.
792 *
793 * @return @c true if supported, @c false otherwise.
794 * @param pVCpu The cross context virtual CPU structure.
795 * @param pCtx Pointer to the guest-CPU context.
796 */
797DECLINLINE(bool) hmR0SvmSupportsDecodeAssists(PVMCPU pVCpu, PCPUMCTX pCtx)
798{
799 PVM pVM = pVCpu->CTX_SUFF(pVM);
800#ifdef VBOX_WITH_NESTED_HWVIRT
801 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
802 {
803 return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS)
804 && pVM->cpum.ro.GuestFeatures.fSvmDecodeAssists;
805 }
806#else
807 RT_NOREF(pCtx);
808#endif
809 return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
810}
811
812
813/**
814 * Returns whether the NRIP_SAVE feature is supported.
815 *
816 * @return @c true if supported, @c false otherwise.
817 * @param pVCpu The cross context virtual CPU structure.
818 * @param pCtx Pointer to the guest-CPU context.
819 */
820DECLINLINE(bool) hmR0SvmSupportsNextRipSave(PVMCPU pVCpu, PCPUMCTX pCtx)
821{
822 PVM pVM = pVCpu->CTX_SUFF(pVM);
823#ifdef VBOX_WITH_NESTED_HWVIRT
824 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
825 {
826 return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
827 && pVM->cpum.ro.GuestFeatures.fSvmNextRipSave;
828 }
829#else
830 RT_NOREF(pCtx);
831#endif
832 return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
833}
834
835
836/**
837 * Sets the permission bits for the specified MSR in the MSRPM.
838 *
839 * @param pVmcb Pointer to the VM control block.
840 * @param pbMsrBitmap Pointer to the MSR bitmap.
841 * @param uMsr The MSR for which the access permissions are being set.
842 * @param enmRead MSR read permissions.
843 * @param enmWrite MSR write permissions.
844 */
845static void hmR0SvmSetMsrPermission(PSVMVMCB pVmcb, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,
846 SVMMSREXITWRITE enmWrite)
847{
848 uint16_t offMsrpm;
849 uint32_t uMsrpmBit;
850 int rc = HMSvmGetMsrpmOffsetAndBit(uMsr, &offMsrpm, &uMsrpmBit);
851 AssertRC(rc);
852
853 Assert(uMsrpmBit < 0x3fff);
854 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
855
856 pbMsrBitmap += offMsrpm;
857 if (enmRead == SVMMSREXIT_INTERCEPT_READ)
858 ASMBitSet(pbMsrBitmap, uMsrpmBit);
859 else
860 ASMBitClear(pbMsrBitmap, uMsrpmBit);
861
862 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
863 ASMBitSet(pbMsrBitmap, uMsrpmBit + 1);
864 else
865 ASMBitClear(pbMsrBitmap, uMsrpmBit + 1);
866
867 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
868}
869
870
871/**
872 * Sets up AMD-V for the specified VM.
873 * This function is only called once per-VM during initalization.
874 *
875 * @returns VBox status code.
876 * @param pVM The cross context VM structure.
877 */
878VMMR0DECL(int) SVMR0SetupVM(PVM pVM)
879{
880 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
881 AssertReturn(pVM, VERR_INVALID_PARAMETER);
882 Assert(pVM->hm.s.svm.fSupported);
883
884 bool const fPauseFilter = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
885 bool const fPauseFilterThreshold = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
886 bool const fUsePauseFilter = fPauseFilter && pVM->hm.s.svm.cPauseFilter && pVM->hm.s.svm.cPauseFilterThresholdTicks;
887
888 bool const fLbrVirt = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT);
889 bool const fUseLbrVirt = fLbrVirt; /** @todo CFGM, IEM implementation etc. */
890
891#ifdef VBOX_WITH_NESTED_HWVIRT
892 bool const fVirtVmsaveVmload = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD);
893 bool const fUseVirtVmsaveVmload = fVirtVmsaveVmload && pVM->hm.s.svm.fVirtVmsaveVmload && pVM->hm.s.fNestedPaging;
894
895 bool const fVGif = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
896 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
897#endif
898
899 for (VMCPUID i = 0; i < pVM->cCpus; i++)
900 {
901 PVMCPU pVCpu = &pVM->aCpus[i];
902 PSVMVMCB pVmcb = pVM->aCpus[i].hm.s.svm.pVmcb;
903
904 AssertMsgReturn(pVmcb, ("Invalid pVmcb for vcpu[%u]\n", i), VERR_SVM_INVALID_PVMCB);
905
906 /* Initialize the #VMEXIT history array with end-of-array markers (UINT16_MAX). */
907 Assert(!pVCpu->hm.s.idxExitHistoryFree);
908 HMCPU_EXIT_HISTORY_RESET(pVCpu);
909
910 /* Always trap #AC for reasons of security. */
911 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT_32(X86_XCPT_AC);
912
913 /* Always trap #DB for reasons of security. */
914 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT_32(X86_XCPT_DB);
915
916 /* Trap exceptions unconditionally (debug purposes). */
917#ifdef HMSVM_ALWAYS_TRAP_PF
918 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
919#endif
920#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
921 /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
922 pVmcb->ctrl.u32InterceptXcpt |= 0
923 | RT_BIT(X86_XCPT_BP)
924 | RT_BIT(X86_XCPT_DE)
925 | RT_BIT(X86_XCPT_NM)
926 | RT_BIT(X86_XCPT_UD)
927 | RT_BIT(X86_XCPT_NP)
928 | RT_BIT(X86_XCPT_SS)
929 | RT_BIT(X86_XCPT_GP)
930 | RT_BIT(X86_XCPT_PF)
931 | RT_BIT(X86_XCPT_MF)
932 ;
933#endif
934
935 /* Set up unconditional intercepts and conditions. */
936 pVmcb->ctrl.u64InterceptCtrl = HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS
937 | SVM_CTRL_INTERCEPT_VMMCALL;
938
939 /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
940 pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
941
942 /* CR0, CR4 writes must be intercepted for the same reasons as above. */
943 pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
944
945 /* Intercept all DRx reads and writes by default. Changed later on. */
946 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
947 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
948
949 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
950 pVmcb->ctrl.IntCtrl.n.u1VIntrMasking = 1;
951
952 /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts
953 and we currently deliver both PIC and APIC interrupts alike. See hmR0SvmInjectPendingEvent() */
954 pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR = 1;
955
956 /* Set IO and MSR bitmap permission bitmap physical addresses. */
957 pVmcb->ctrl.u64IOPMPhysAddr = g_HCPhysIOBitmap;
958 pVmcb->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
959
960 /* LBR virtualization. */
961 if (fUseLbrVirt)
962 {
963 pVmcb->ctrl.LbrVirt.n.u1LbrVirt = fUseLbrVirt;
964 pVmcb->guest.u64DBGCTL = MSR_IA32_DEBUGCTL_LBR;
965 }
966 else
967 Assert(pVmcb->ctrl.LbrVirt.n.u1LbrVirt == 0);
968
969#ifdef VBOX_WITH_NESTED_HWVIRT
970 /* Virtualized VMSAVE/VMLOAD. */
971 pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload = fUseVirtVmsaveVmload;
972 if (!fUseVirtVmsaveVmload)
973 {
974 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
975 | SVM_CTRL_INTERCEPT_VMLOAD;
976 }
977
978 /* Virtual GIF. */
979 pVmcb->ctrl.IntCtrl.n.u1VGifEnable = fUseVGif;
980 if (!fUseVGif)
981 {
982 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
983 | SVM_CTRL_INTERCEPT_STGI;
984 }
985#endif
986
987 /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
988 Assert(pVmcb->ctrl.u32VmcbCleanBits == 0);
989
990 /* The host ASID MBZ, for the guest start with 1. */
991 pVmcb->ctrl.TLBCtrl.n.u32ASID = 1;
992
993 /*
994 * Setup the PAT MSR (applicable for Nested Paging only).
995 * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
996 * so choose type 6 for all PAT slots.
997 */
998 pVmcb->guest.u64GPAT = UINT64_C(0x0006060606060606);
999
1000 /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
1001 pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
1002
1003 /* Without Nested Paging, we need additionally intercepts. */
1004 if (!pVM->hm.s.fNestedPaging)
1005 {
1006 /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
1007 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(3);
1008 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(3);
1009
1010 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
1011 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG
1012 | SVM_CTRL_INTERCEPT_TASK_SWITCH;
1013
1014 /* Page faults must be intercepted to implement shadow paging. */
1015 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
1016 }
1017
1018#ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
1019 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH;
1020#endif
1021
1022 /* Apply the exceptions intercepts needed by the GIM provider. */
1023 if (pVCpu->hm.s.fGIMTrapXcptUD)
1024 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
1025
1026 /* Setup Pause Filter for guest pause-loop (spinlock) exiting. */
1027 if (fUsePauseFilter)
1028 {
1029 pVmcb->ctrl.u16PauseFilterCount = pVM->hm.s.svm.cPauseFilter;
1030 if (fPauseFilterThreshold)
1031 pVmcb->ctrl.u16PauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
1032 }
1033
1034 /*
1035 * The following MSRs are saved/restored automatically during the world-switch.
1036 * Don't intercept guest read/write accesses to these MSRs.
1037 */
1038 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
1039 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1040 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1041 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1042 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1043 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1044 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1045 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1046 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1047 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1048 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1049 }
1050
1051 return VINF_SUCCESS;
1052}
1053
1054
1055/**
1056 * Gets a pointer to the currently active guest or nested-guest VMCB.
1057 *
1058 * @returns Pointer to the current context VMCB.
1059 * @param pVCpu The cross context virtual CPU structure.
1060 * @param pCtx Pointer to the guest-CPU context.
1061 */
1062DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
1063{
1064#ifdef VBOX_WITH_NESTED_HWVIRT
1065 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1066 return pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1067#else
1068 RT_NOREF(pCtx);
1069#endif
1070 return pVCpu->hm.s.svm.pVmcb;
1071}
1072
1073
1074/**
1075 * Invalidates a guest page by guest virtual address.
1076 *
1077 * @returns VBox status code.
1078 * @param pVM The cross context VM structure.
1079 * @param pVCpu The cross context virtual CPU structure.
1080 * @param GCVirt Guest virtual address of the page to invalidate.
1081 */
1082VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1083{
1084 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1085 Assert(pVM->hm.s.svm.fSupported);
1086
1087 bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1088
1089 /* Skip it if a TLB flush is already pending. */
1090 if (!fFlushPending)
1091 {
1092 Log4(("SVMR0InvalidatePage %RGv\n", GCVirt));
1093
1094 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1095 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
1096 AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
1097
1098#if HC_ARCH_BITS == 32
1099 /* If we get a flush in 64-bit guest mode, then force a full TLB flush. INVLPGA takes only 32-bit addresses. */
1100 if (CPUMIsGuestInLongMode(pVCpu))
1101 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1102 else
1103#endif
1104 {
1105 SVMR0InvlpgA(GCVirt, pVmcb->ctrl.TLBCtrl.n.u32ASID);
1106 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1107 }
1108 }
1109 return VINF_SUCCESS;
1110}
1111
1112
1113/**
1114 * Flushes the appropriate tagged-TLB entries.
1115 *
1116 * @param pVCpu The cross context virtual CPU structure.
1117 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context.
1118 * @param pVmcb Pointer to the VM control block.
1119 */
1120static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb)
1121{
1122#ifndef VBOX_WITH_NESTED_HWVIRT
1123 RT_NOREF(pCtx);
1124#endif
1125
1126 PVM pVM = pVCpu->CTX_SUFF(pVM);
1127 PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
1128
1129 /*
1130 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
1131 * This can happen both for start & resume due to long jumps back to ring-3.
1132 *
1133 * We also force a TLB flush every time when executing a nested-guest VCPU as there is no correlation
1134 * between it and the physical CPU.
1135 *
1136 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
1137 * so we cannot reuse the ASIDs without flushing.
1138 */
1139 bool fNewAsid = false;
1140 Assert(pCpu->idCpu != NIL_RTCPUID);
1141 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1142 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes
1143#ifdef VBOX_WITH_NESTED_HWVIRT
1144 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
1145#endif
1146 )
1147 {
1148 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1149 pVCpu->hm.s.fForceTLBFlush = true;
1150 fNewAsid = true;
1151 }
1152
1153 /* Set TLB flush state as checked until we return from the world switch. */
1154 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
1155
1156 /* Check for explicit TLB flushes. */
1157 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1158 {
1159 pVCpu->hm.s.fForceTLBFlush = true;
1160 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1161 }
1162
1163 /*
1164 * If the AMD CPU erratum 170, We need to flush the entire TLB for each world switch. Sad.
1165 * This Host CPU requirement takes precedence.
1166 */
1167 if (pVM->hm.s.svm.fAlwaysFlushTLB)
1168 {
1169 pCpu->uCurrentAsid = 1;
1170 pVCpu->hm.s.uCurrentAsid = 1;
1171 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1172 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1173
1174 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
1175 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1176
1177 /* Keep track of last CPU ID even when flushing all the time. */
1178 if (fNewAsid)
1179 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1180 }
1181 else
1182 {
1183 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
1184 if (pVCpu->hm.s.fForceTLBFlush)
1185 {
1186 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
1187 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1188
1189 if (fNewAsid)
1190 {
1191 ++pCpu->uCurrentAsid;
1192
1193 bool fHitASIDLimit = false;
1194 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1195 {
1196 pCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */
1197 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new ASID. */
1198 fHitASIDLimit = true;
1199 }
1200
1201 if ( fHitASIDLimit
1202 || pCpu->fFlushAsidBeforeUse)
1203 {
1204 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1205 pCpu->fFlushAsidBeforeUse = false;
1206 }
1207
1208 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1209 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1210 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1211 }
1212 else
1213 {
1214 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
1215 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
1216 else
1217 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1218 }
1219
1220 pVCpu->hm.s.fForceTLBFlush = false;
1221 }
1222 }
1223
1224 /* Update VMCB with the ASID. */
1225 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hm.s.uCurrentAsid)
1226 {
1227 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
1228 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
1229 }
1230
1231 AssertMsg(pVCpu->hm.s.idLastCpu == pCpu->idCpu,
1232 ("vcpu idLastCpu=%u pcpu idCpu=%u\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu));
1233 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1234 ("Flush count mismatch for cpu %u (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1235 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1236 ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
1237 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1238 ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1239
1240#ifdef VBOX_WITH_STATISTICS
1241 if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
1242 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1243 else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
1244 || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
1245 {
1246 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1247 }
1248 else
1249 {
1250 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE);
1251 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushEntire);
1252 }
1253#endif
1254}
1255
1256
1257/** @name 64-bit guest on 32-bit host OS helper functions.
1258 *
1259 * The host CPU is still 64-bit capable but the host OS is running in 32-bit
1260 * mode (code segment, paging). These wrappers/helpers perform the necessary
1261 * bits for the 32->64 switcher.
1262 *
1263 * @{ */
1264#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
1265/**
1266 * Prepares for and executes VMRUN (64-bit guests on a 32-bit host).
1267 *
1268 * @returns VBox status code.
1269 * @param HCPhysVmcbHost Physical address of host VMCB.
1270 * @param HCPhysVmcb Physical address of the VMCB.
1271 * @param pCtx Pointer to the guest-CPU context.
1272 * @param pVM The cross context VM structure.
1273 * @param pVCpu The cross context virtual CPU structure.
1274 */
1275DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
1276{
1277 uint32_t aParam[8];
1278 aParam[0] = RT_LO_U32(HCPhysVmcbHost); /* Param 1: HCPhysVmcbHost - Lo. */
1279 aParam[1] = RT_HI_U32(HCPhysVmcbHost); /* Param 1: HCPhysVmcbHost - Hi. */
1280 aParam[2] = RT_LO_U32(HCPhysVmcb); /* Param 2: HCPhysVmcb - Lo. */
1281 aParam[3] = RT_HI_U32(HCPhysVmcb); /* Param 2: HCPhysVmcb - Hi. */
1282 aParam[4] = VM_RC_ADDR(pVM, pVM);
1283 aParam[5] = 0;
1284 aParam[6] = VM_RC_ADDR(pVM, pVCpu);
1285 aParam[7] = 0;
1286
1287 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, RT_ELEMENTS(aParam), &aParam[0]);
1288}
1289
1290
1291/**
1292 * Executes the specified VMRUN handler in 64-bit mode.
1293 *
1294 * @returns VBox status code.
1295 * @param pVM The cross context VM structure.
1296 * @param pVCpu The cross context virtual CPU structure.
1297 * @param pCtx Pointer to the guest-CPU context.
1298 * @param enmOp The operation to perform.
1299 * @param cParams Number of parameters.
1300 * @param paParam Array of 32-bit parameters.
1301 */
1302VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,
1303 uint32_t cParams, uint32_t *paParam)
1304{
1305 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
1306 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
1307
1308 NOREF(pCtx);
1309
1310 /* Disable interrupts. */
1311 RTHCUINTREG uOldEFlags = ASMIntDisableFlags();
1312
1313#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1314 RTCPUID idHostCpu = RTMpCpuId();
1315 CPUMR0SetLApic(pVCpu, idHostCpu);
1316#endif
1317
1318 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
1319 CPUMSetHyperEIP(pVCpu, enmOp);
1320 for (int i = (int)cParams - 1; i >= 0; i--)
1321 CPUMPushHyper(pVCpu, paParam[i]);
1322
1323 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
1324 /* Call the switcher. */
1325 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
1326 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
1327
1328 /* Restore interrupts. */
1329 ASMSetFlags(uOldEFlags);
1330 return rc;
1331}
1332
1333#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
1334/** @} */
1335
1336
1337/**
1338 * Adds an exception to the intercept exception bitmap in the VMCB and updates
1339 * the corresponding VMCB Clean bit.
1340 *
1341 * @param pVmcb Pointer to the VM control block.
1342 * @param u32Xcpt The value of the exception (X86_XCPT_*).
1343 */
1344DECLINLINE(void) hmR0SvmAddXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt)
1345{
1346 if (!(pVmcb->ctrl.u32InterceptXcpt & RT_BIT(u32Xcpt)))
1347 {
1348 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(u32Xcpt);
1349 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1350 }
1351}
1352
1353
1354/**
1355 * Removes an exception from the intercept-exception bitmap in the VMCB and
1356 * updates the corresponding VMCB Clean bit.
1357 *
1358 * @param pVCpu The cross context virtual CPU structure.
1359 * @param pCtx Pointer to the guest-CPU context.
1360 * @param pVmcb Pointer to the VM control block.
1361 * @param u32Xcpt The value of the exception (X86_XCPT_*).
1362 *
1363 * @remarks This takes into account if we're executing a nested-guest and only
1364 * removes the exception intercept if both the guest -and- nested-guest
1365 * are not intercepting it.
1366 */
1367DECLINLINE(void) hmR0SvmRemoveXcptIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, uint32_t u32Xcpt)
1368{
1369 Assert(u32Xcpt != X86_XCPT_DB);
1370 Assert(u32Xcpt != X86_XCPT_AC);
1371#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
1372 if (pVmcb->ctrl.u32InterceptXcpt & RT_BIT(u32Xcpt))
1373 {
1374 bool fRemoveXcpt = true;
1375#ifdef VBOX_WITH_NESTED_HWVIRT
1376 /* Only remove the intercept if the nested-guest is also not intercepting it! */
1377 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1378 {
1379 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
1380 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
1381 fRemoveXcpt = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(u32Xcpt));
1382 }
1383#else
1384 RT_NOREF2(pVCpu, pCtx);
1385#endif
1386 if (fRemoveXcpt)
1387 {
1388 pVmcb->ctrl.u32InterceptXcpt &= ~RT_BIT(u32Xcpt);
1389 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1390 }
1391 }
1392#else
1393 RT_NOREF3(pVCpu, pCtx, pVmcb);
1394#endif
1395}
1396
1397
1398/**
1399 * Loads the guest (or nested-guest) CR0 control register into the guest-state
1400 * area in the VMCB.
1401 *
1402 * Although the guest CR0 is a separate field in the VMCB we have to consider
1403 * the FPU state itself which is shared between the host and the guest.
1404 *
1405 * @returns VBox status code.
1406 * @param pVCpu The cross context virtual CPU structure.
1407 * @param pVmcb Pointer to the VM control block.
1408 * @param pCtx Pointer to the guest-CPU context.
1409 *
1410 * @remarks No-long-jump zone!!!
1411 */
1412static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1413{
1414 uint64_t u64GuestCR0 = pCtx->cr0;
1415
1416 /* Always enable caching. */
1417 u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);
1418
1419 /*
1420 * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
1421 */
1422 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
1423 {
1424 u64GuestCR0 |= X86_CR0_PG; /* When Nested Paging is not available, use shadow page tables. */
1425 u64GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
1426 }
1427
1428 /*
1429 * Guest FPU bits.
1430 */
1431 bool fInterceptNM = false;
1432 bool fInterceptMF = false;
1433 u64GuestCR0 |= X86_CR0_NE; /* Use internal x87 FPU exceptions handling rather than external interrupts. */
1434 if (CPUMIsGuestFPUStateActive(pVCpu))
1435 {
1436 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
1437 if (!(pCtx->cr0 & X86_CR0_NE))
1438 {
1439 Log4(("hmR0SvmLoadSharedCR0: Intercepting Guest CR0.MP Old-style FPU handling!!!\n"));
1440 fInterceptMF = true;
1441 }
1442 }
1443 else
1444 {
1445 fInterceptNM = true; /* Guest FPU inactive, #VMEXIT on #NM for lazy FPU loading. */
1446 u64GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
1447 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
1448 }
1449
1450 /*
1451 * Update the exception intercept bitmap.
1452 */
1453 if (fInterceptNM)
1454 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_NM);
1455 else
1456 hmR0SvmRemoveXcptIntercept(pVCpu, pCtx, pVmcb, X86_XCPT_NM);
1457
1458 if (fInterceptMF)
1459 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_MF);
1460 else
1461 hmR0SvmRemoveXcptIntercept(pVCpu, pCtx, pVmcb, X86_XCPT_MF);
1462
1463 pVmcb->guest.u64CR0 = u64GuestCR0;
1464 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1465}
1466
1467
1468/**
1469 * Loads the guest/nested-guest control registers (CR2, CR3, CR4) into the VMCB.
1470 *
1471 * @returns VBox status code.
1472 * @param pVCpu The cross context virtual CPU structure.
1473 * @param pVmcb Pointer to the VM control block.
1474 * @param pCtx Pointer to the guest-CPU context.
1475 *
1476 * @remarks No-long-jump zone!!!
1477 */
1478static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1479{
1480 PVM pVM = pVCpu->CTX_SUFF(pVM);
1481
1482 /*
1483 * Guest CR2.
1484 */
1485 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR2))
1486 {
1487 pVmcb->guest.u64CR2 = pCtx->cr2;
1488 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
1489 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
1490 }
1491
1492 /*
1493 * Guest CR3.
1494 */
1495 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
1496 {
1497 if (pVM->hm.s.fNestedPaging)
1498 {
1499 PGMMODE enmShwPagingMode;
1500#if HC_ARCH_BITS == 32
1501 if (CPUMIsGuestInLongModeEx(pCtx))
1502 enmShwPagingMode = PGMMODE_AMD64_NX;
1503 else
1504#endif
1505 enmShwPagingMode = PGMGetHostMode(pVM);
1506
1507 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
1508 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1509 Assert(pVmcb->ctrl.u64NestedPagingCR3);
1510 pVmcb->guest.u64CR3 = pCtx->cr3;
1511 }
1512 else
1513 {
1514 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
1515 Log4(("hmR0SvmLoadGuestControlRegs: CR3=%#RX64 (HyperCR3=%#RX64)\n", pCtx->cr3, pVmcb->guest.u64CR3));
1516 }
1517
1518 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1519 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
1520 }
1521
1522 /*
1523 * Guest CR4.
1524 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
1525 */
1526 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
1527 {
1528 uint64_t u64GuestCR4 = pCtx->cr4;
1529 Assert(RT_HI_U32(u64GuestCR4) == 0);
1530 if (!pVM->hm.s.fNestedPaging)
1531 {
1532 switch (pVCpu->hm.s.enmShadowMode)
1533 {
1534 case PGMMODE_REAL:
1535 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
1536 AssertFailed();
1537 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1538
1539 case PGMMODE_32_BIT: /* 32-bit paging. */
1540 u64GuestCR4 &= ~X86_CR4_PAE;
1541 break;
1542
1543 case PGMMODE_PAE: /* PAE paging. */
1544 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
1545 /** Must use PAE paging as we could use physical memory > 4 GB */
1546 u64GuestCR4 |= X86_CR4_PAE;
1547 break;
1548
1549 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
1550 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
1551#ifdef VBOX_ENABLE_64_BITS_GUESTS
1552 break;
1553#else
1554 AssertFailed();
1555 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1556#endif
1557
1558 default: /* shut up gcc */
1559 AssertFailed();
1560 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1561 }
1562 }
1563
1564 pVmcb->guest.u64CR4 = u64GuestCR4;
1565 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1566
1567 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
1568 pVCpu->hm.s.fLoadSaveGuestXcr0 = (u64GuestCR4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
1569
1570 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
1571 }
1572
1573 return VINF_SUCCESS;
1574}
1575
1576
1577/**
1578 * Loads the guest (or nested-guest) segment registers into the VMCB.
1579 *
1580 * @returns VBox status code.
1581 * @param pVCpu The cross context virtual CPU structure.
1582 * @param pVmcb Pointer to the VM control block.
1583 * @param pCtx Pointer to the guest-CPU context.
1584 *
1585 * @remarks No-long-jump zone!!!
1586 */
1587static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1588{
1589 /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
1590 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
1591 {
1592 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, CS, cs);
1593 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, SS, ss);
1594 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, DS, ds);
1595 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, ES, es);
1596 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs);
1597 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs);
1598
1599 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
1600 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
1601 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
1602 }
1603
1604 /* Guest TR. */
1605 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
1606 {
1607 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr);
1608 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
1609 }
1610
1611 /* Guest LDTR. */
1612 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
1613 {
1614 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);
1615 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
1616 }
1617
1618 /* Guest GDTR. */
1619 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
1620 {
1621 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
1622 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
1623 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1624 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
1625 }
1626
1627 /* Guest IDTR. */
1628 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
1629 {
1630 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
1631 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
1632 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1633 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
1634 }
1635}
1636
1637
1638/**
1639 * Loads the guest (or nested-guest) MSRs into the VMCB.
1640 *
1641 * @param pVCpu The cross context virtual CPU structure.
1642 * @param pVmcb Pointer to the VM control block.
1643 * @param pCtx Pointer to the guest-CPU context.
1644 *
1645 * @remarks No-long-jump zone!!!
1646 */
1647static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1648{
1649 /* Guest Sysenter MSRs. */
1650 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
1651 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
1652 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
1653
1654 /*
1655 * Guest EFER MSR.
1656 * AMD-V requires guest EFER.SVME to be set. Weird.
1657 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
1658 */
1659 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
1660 {
1661 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
1662 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1663 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
1664 }
1665
1666 /* 64-bit MSRs. */
1667 if (CPUMIsGuestInLongModeEx(pCtx))
1668 {
1669 /* Load these always as the guest may modify FS/GS base using MSRs in 64-bit mode which we don't intercept. */
1670 pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;
1671 pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;
1672 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
1673 }
1674 else
1675 {
1676 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */
1677 if (pCtx->msrEFER & MSR_K6_EFER_LME)
1678 {
1679 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
1680 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1681 }
1682 }
1683
1684 /** @todo The following are used in 64-bit only (SYSCALL/SYSRET) but they might
1685 * be writable in 32-bit mode. Clarify with AMD spec. */
1686 pVmcb->guest.u64STAR = pCtx->msrSTAR;
1687 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
1688 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
1689 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
1690 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1691}
1692
1693
1694/**
1695 * Loads the guest (or nested-guest) debug state into the VMCB and programs the
1696 * necessary intercepts accordingly.
1697 *
1698 * @param pVCpu The cross context virtual CPU structure.
1699 * @param pVmcb Pointer to the VM control block.
1700 * @param pCtx Pointer to the guest-CPU context.
1701 *
1702 * @remarks No-long-jump zone!!!
1703 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
1704 */
1705static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1706{
1707 bool fInterceptMovDRx = false;
1708
1709 /*
1710 * Anyone single stepping on the host side? If so, we'll have to use the
1711 * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
1712 * the VMM level like the VT-x implementations does.
1713 */
1714 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
1715 if (fStepping)
1716 {
1717 pVCpu->hm.s.fClearTrapFlag = true;
1718 pVmcb->guest.u64RFlags |= X86_EFL_TF;
1719 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
1720 }
1721
1722 if ( fStepping
1723 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
1724 {
1725 /*
1726 * Use the combined guest and host DRx values found in the hypervisor
1727 * register set because the debugger has breakpoints active or someone
1728 * is single stepping on the host side.
1729 *
1730 * Note! DBGF expects a clean DR6 state before executing guest code.
1731 */
1732#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
1733 if ( CPUMIsGuestInLongModeEx(pCtx)
1734 && !CPUMIsHyperDebugStateActivePending(pVCpu))
1735 {
1736 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1737 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
1738 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
1739 }
1740 else
1741#endif
1742 if (!CPUMIsHyperDebugStateActive(pVCpu))
1743 {
1744 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1745 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1746 Assert(CPUMIsHyperDebugStateActive(pVCpu));
1747 }
1748
1749 /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
1750 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
1751 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu))
1752 {
1753 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
1754 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
1755 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1756 pVCpu->hm.s.fUsingHyperDR7 = true;
1757 }
1758
1759 /** @todo If we cared, we could optimize to allow the guest to read registers
1760 * with the same values. */
1761 fInterceptMovDRx = true;
1762 Log5(("hmR0SvmLoadSharedDebugState: Loaded hyper DRx\n"));
1763 }
1764 else
1765 {
1766 /*
1767 * Update DR6, DR7 with the guest values if necessary.
1768 */
1769 if ( pVmcb->guest.u64DR7 != pCtx->dr[7]
1770 || pVmcb->guest.u64DR6 != pCtx->dr[6])
1771 {
1772 pVmcb->guest.u64DR7 = pCtx->dr[7];
1773 pVmcb->guest.u64DR6 = pCtx->dr[6];
1774 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1775 pVCpu->hm.s.fUsingHyperDR7 = false;
1776 }
1777
1778 /*
1779 * If the guest has enabled debug registers, we need to load them prior to
1780 * executing guest code so they'll trigger at the right time.
1781 */
1782 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
1783 {
1784#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
1785 if ( CPUMIsGuestInLongModeEx(pCtx)
1786 && !CPUMIsGuestDebugStateActivePending(pVCpu))
1787 {
1788 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
1789 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1790 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
1791 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
1792 }
1793 else
1794#endif
1795 if (!CPUMIsGuestDebugStateActive(pVCpu))
1796 {
1797 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
1798 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1799 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1800 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1801 }
1802 Log5(("hmR0SvmLoadSharedDebugState: Loaded guest DRx\n"));
1803 }
1804 /*
1805 * If no debugging enabled, we'll lazy load DR0-3. We don't need to
1806 * intercept #DB as DR6 is updated in the VMCB.
1807 *
1808 * Note! If we cared and dared, we could skip intercepting \#DB here.
1809 * However, \#DB shouldn't be performance critical, so we'll play safe
1810 * and keep the code similar to the VT-x code and always intercept it.
1811 */
1812#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
1813 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
1814 && !CPUMIsGuestDebugStateActive(pVCpu))
1815#else
1816 else if (!CPUMIsGuestDebugStateActive(pVCpu))
1817#endif
1818 {
1819 fInterceptMovDRx = true;
1820 }
1821 }
1822
1823 Assert(pVmcb->ctrl.u32InterceptXcpt & RT_BIT_32(X86_XCPT_DB));
1824 if (fInterceptMovDRx)
1825 {
1826 if ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
1827 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
1828 {
1829 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
1830 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
1831 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1832 }
1833 }
1834 else
1835 {
1836 if ( pVmcb->ctrl.u16InterceptRdDRx
1837 || pVmcb->ctrl.u16InterceptWrDRx)
1838 {
1839 pVmcb->ctrl.u16InterceptRdDRx = 0;
1840 pVmcb->ctrl.u16InterceptWrDRx = 0;
1841 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1842 }
1843 }
1844 Log4(("hmR0SvmLoadSharedDebugState: DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7]));
1845}
1846
1847
1848#ifdef VBOX_WITH_NESTED_HWVIRT
1849/**
1850 * Loads the nested-guest APIC state (currently just the TPR).
1851 *
1852 * @param pVCpu The cross context virtual CPU structure.
1853 * @param pVmcbNstGst Pointer to the nested-guest VM control block.
1854 */
1855static void hmR0SvmLoadGuestApicStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst)
1856{
1857 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE))
1858 {
1859 /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */
1860 pVmcbNstGst->ctrl.IntCtrl.n.u1VIntrMasking = 1;
1861 pVCpu->hm.s.svm.fSyncVTpr = false;
1862 pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_TPR;
1863
1864 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
1865 }
1866}
1867#endif
1868
1869/**
1870 * Loads the guest APIC state (currently just the TPR).
1871 *
1872 * @returns VBox status code.
1873 * @param pVCpu The cross context virtual CPU structure.
1874 * @param pVmcb Pointer to the VM control block.
1875 * @param pCtx Pointer to the guest-CPU context.
1876 */
1877static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1878{
1879 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE))
1880 return VINF_SUCCESS;
1881
1882 int rc = VINF_SUCCESS;
1883 PVM pVM = pVCpu->CTX_SUFF(pVM);
1884 if ( PDMHasApic(pVM)
1885 && APICIsEnabled(pVCpu))
1886 {
1887 bool fPendingIntr;
1888 uint8_t u8Tpr;
1889 rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
1890 AssertRCReturn(rc, rc);
1891
1892 /* Assume that we need to trap all TPR accesses and thus need not check on
1893 every #VMEXIT if we should update the TPR. */
1894 Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking);
1895 pVCpu->hm.s.svm.fSyncVTpr = false;
1896
1897 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
1898 if (pVM->hm.s.fTPRPatchingActive)
1899 {
1900 pCtx->msrLSTAR = u8Tpr;
1901 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
1902
1903 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
1904 if (fPendingIntr)
1905 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
1906 else
1907 {
1908 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1909 pVCpu->hm.s.svm.fSyncVTpr = true;
1910 }
1911 }
1912 else
1913 {
1914 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
1915 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
1916
1917 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
1918 if (fPendingIntr)
1919 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
1920 else
1921 {
1922 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
1923 pVCpu->hm.s.svm.fSyncVTpr = true;
1924 }
1925
1926 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
1927 }
1928 }
1929
1930 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
1931 return rc;
1932}
1933
1934
1935/**
1936 * Loads the exception interrupts required for guest (or nested-guest) execution in
1937 * the VMCB.
1938 *
1939 * @param pVCpu The cross context virtual CPU structure.
1940 * @param pVmcb Pointer to the VM control block.
1941 * @param pCtx Pointer to the guest-CPU context.
1942 */
1943static void hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1944{
1945 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
1946 {
1947 /* Trap #UD for GIM provider (e.g. for hypercalls). */
1948 if (pVCpu->hm.s.fGIMTrapXcptUD)
1949 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_UD);
1950 else
1951 hmR0SvmRemoveXcptIntercept(pVCpu, pCtx, pVmcb, X86_XCPT_UD);
1952
1953 /* Trap #BP for INT3 debug breakpoints set by the VM debugger. */
1954 if (pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
1955 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_BP);
1956 else
1957 hmR0SvmRemoveXcptIntercept(pVCpu, pCtx, pVmcb, X86_XCPT_BP);
1958
1959 /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmLoadSharedCR0(). */
1960 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
1961 }
1962}
1963
1964
1965#ifdef VBOX_WITH_NESTED_HWVIRT
1966/**
1967 * Loads the intercepts required for nested-guest execution in the VMCB.
1968 *
1969 * This merges the guest and nested-guest intercepts in a way that if the outer
1970 * guest intercepts an exception we need to intercept it in the nested-guest as
1971 * well and handle it accordingly.
1972 *
1973 * @param pVCpu The cross context virtual CPU structure.
1974 * @param pVmcbNstGst Pointer to the nested-guest VM control block.
1975 * @param pCtx Pointer to the guest-CPU context.
1976 */
1977static void hmR0SvmLoadGuestXcptInterceptsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
1978{
1979 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
1980 {
1981 /* First, load the guest exception intercepts into the guest VMCB. */
1982 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
1983 hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb, pCtx);
1984
1985 /* Next, merge the intercepts into the nested-guest VMCB. */
1986 pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
1987 pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
1988
1989 /* Always intercept CR0, CR4 reads and writes as we alter them. */
1990 pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(0) | RT_BIT(4);
1991 pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(0) | RT_BIT(4);
1992
1993 /* Always intercept CR3 reads and writes without nested-paging as we load shadow page tables. */
1994 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
1995 {
1996 pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(3);
1997 pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(3);
1998 }
1999
2000 /** @todo Figure out debugging with nested-guests, till then just intercept
2001 * all DR[0-15] accesses. */
2002 pVmcbNstGst->ctrl.u16InterceptRdDRx |= 0xffff;
2003 pVmcbNstGst->ctrl.u16InterceptWrDRx |= 0xffff;
2004
2005 /*
2006 * Adjust intercepts while executing the nested-guest that differ from the
2007 * outer guest intercepts.
2008 *
2009 * - VINTR: Exclude the outer guest intercept as we don't need to cause VINTR #VMEXITs
2010 * that belong to the nested-guest to the outer guest.
2011 *
2012 * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by
2013 * the nested-guest, the physical CPU raises a \#UD exception as expected.
2014 */
2015 pVmcbNstGst->ctrl.u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt;
2016 pVmcbNstGst->ctrl.u64InterceptCtrl |= (pVmcb->ctrl.u64InterceptCtrl & ( ~SVM_CTRL_INTERCEPT_VINTR
2017 | ~SVM_CTRL_INTERCEPT_VMMCALL))
2018 | HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;
2019
2020 Assert( (pVmcbNstGst->ctrl.u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
2021 == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
2022
2023 /*
2024 * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we
2025 * need to intercept VMSAVE/VMLOAD instructions executed by the nested-guest.
2026 */
2027 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
2028 {
2029 pVmcbNstGst->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
2030 | SVM_CTRL_INTERCEPT_VMLOAD;
2031 }
2032
2033 /*
2034 * If we don't expose Virtual GIF feature to the outer guest, we need to intercept
2035 * CLGI/STGI instructions executed by the nested-guest.
2036 */
2037 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif)
2038 {
2039 pVmcbNstGst->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
2040 | SVM_CTRL_INTERCEPT_STGI;
2041 }
2042
2043 /* Finally, update the VMCB clean bits. */
2044 pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2045
2046 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS));
2047 }
2048}
2049#endif
2050
2051
2052/**
2053 * Sets up the appropriate function to run guest code.
2054 *
2055 * @returns VBox status code.
2056 * @param pVCpu The cross context virtual CPU structure.
2057 *
2058 * @remarks No-long-jump zone!!!
2059 */
2060static int hmR0SvmSetupVMRunHandler(PVMCPU pVCpu)
2061{
2062 if (CPUMIsGuestInLongMode(pVCpu))
2063 {
2064#ifndef VBOX_ENABLE_64_BITS_GUESTS
2065 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2066#endif
2067 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
2068#if HC_ARCH_BITS == 32
2069 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
2070 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
2071#else
2072 /* 64-bit host or hybrid host. */
2073 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64;
2074#endif
2075 }
2076 else
2077 {
2078 /* Guest is not in long mode, use the 32-bit handler. */
2079 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
2080 }
2081 return VINF_SUCCESS;
2082}
2083
2084
2085/**
2086 * Enters the AMD-V session.
2087 *
2088 * @returns VBox status code.
2089 * @param pVM The cross context VM structure.
2090 * @param pVCpu The cross context virtual CPU structure.
2091 * @param pCpu Pointer to the CPU info struct.
2092 */
2093VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2094{
2095 AssertPtr(pVM);
2096 AssertPtr(pVCpu);
2097 Assert(pVM->hm.s.svm.fSupported);
2098 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2099 NOREF(pVM); NOREF(pCpu);
2100
2101 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2102 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
2103
2104 pVCpu->hm.s.fLeaveDone = false;
2105 return VINF_SUCCESS;
2106}
2107
2108
2109/**
2110 * Thread-context callback for AMD-V.
2111 *
2112 * @param enmEvent The thread-context event.
2113 * @param pVCpu The cross context virtual CPU structure.
2114 * @param fGlobalInit Whether global VT-x/AMD-V init. is used.
2115 * @thread EMT(pVCpu)
2116 */
2117VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
2118{
2119 NOREF(fGlobalInit);
2120
2121 switch (enmEvent)
2122 {
2123 case RTTHREADCTXEVENT_OUT:
2124 {
2125 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2126 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
2127 VMCPU_ASSERT_EMT(pVCpu);
2128
2129 /* No longjmps (log-flush, locks) in this fragile context. */
2130 VMMRZCallRing3Disable(pVCpu);
2131
2132 if (!pVCpu->hm.s.fLeaveDone)
2133 {
2134 hmR0SvmLeave(pVCpu);
2135 pVCpu->hm.s.fLeaveDone = true;
2136 }
2137
2138 /* Leave HM context, takes care of local init (term). */
2139 int rc = HMR0LeaveCpu(pVCpu);
2140 AssertRC(rc); NOREF(rc);
2141
2142 /* Restore longjmp state. */
2143 VMMRZCallRing3Enable(pVCpu);
2144 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
2145 break;
2146 }
2147
2148 case RTTHREADCTXEVENT_IN:
2149 {
2150 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2151 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
2152 VMCPU_ASSERT_EMT(pVCpu);
2153
2154 /* No longjmps (log-flush, locks) in this fragile context. */
2155 VMMRZCallRing3Disable(pVCpu);
2156
2157 /*
2158 * Initialize the bare minimum state required for HM. This takes care of
2159 * initializing AMD-V if necessary (onlined CPUs, local init etc.)
2160 */
2161 int rc = HMR0EnterCpu(pVCpu);
2162 AssertRC(rc); NOREF(rc);
2163 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
2164
2165 pVCpu->hm.s.fLeaveDone = false;
2166
2167 /* Restore longjmp state. */
2168 VMMRZCallRing3Enable(pVCpu);
2169 break;
2170 }
2171
2172 default:
2173 break;
2174 }
2175}
2176
2177
2178/**
2179 * Saves the host state.
2180 *
2181 * @returns VBox status code.
2182 * @param pVM The cross context VM structure.
2183 * @param pVCpu The cross context virtual CPU structure.
2184 *
2185 * @remarks No-long-jump zone!!!
2186 */
2187VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
2188{
2189 NOREF(pVM);
2190 NOREF(pVCpu);
2191 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
2192 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
2193 return VINF_SUCCESS;
2194}
2195
2196
2197/**
2198 * Loads the guest state into the VMCB.
2199 *
2200 * The CPU state will be loaded from these fields on every successful VM-entry.
2201 * Also sets up the appropriate VMRUN function to execute guest code based on
2202 * the guest CPU mode.
2203 *
2204 * @returns VBox status code.
2205 * @param pVM The cross context VM structure.
2206 * @param pVCpu The cross context virtual CPU structure.
2207 * @param pCtx Pointer to the guest-CPU context.
2208 *
2209 * @remarks No-long-jump zone!!!
2210 */
2211static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2212{
2213 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
2214
2215 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
2216 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
2217
2218 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
2219
2220 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcb, pCtx);
2221 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
2222
2223 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcb, pCtx);
2224 hmR0SvmLoadGuestMsrs(pVCpu, pVmcb, pCtx);
2225
2226 pVmcb->guest.u64RIP = pCtx->rip;
2227 pVmcb->guest.u64RSP = pCtx->rsp;
2228 pVmcb->guest.u64RFlags = pCtx->eflags.u32;
2229 pVmcb->guest.u64RAX = pCtx->rax;
2230
2231#ifdef VBOX_WITH_NESTED_HWVIRT
2232 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable == 1)
2233 {
2234 Assert(pVM->hm.s.svm.fVGif);
2235 pVmcb->ctrl.IntCtrl.n.u1VGif = pCtx->hwvirt.fGif;
2236 }
2237#endif
2238
2239 rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx);
2240 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
2241
2242 hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb, pCtx);
2243
2244 rc = hmR0SvmSetupVMRunHandler(pVCpu);
2245 AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
2246
2247 /* Clear any unused and reserved bits. */
2248 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */
2249 | HM_CHANGED_GUEST_RSP
2250 | HM_CHANGED_GUEST_RFLAGS
2251 | HM_CHANGED_GUEST_SYSENTER_CS_MSR
2252 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
2253 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
2254 | HM_CHANGED_GUEST_LAZY_MSRS /* Unused. */
2255 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */
2256 | HM_CHANGED_SVM_RESERVED2
2257 | HM_CHANGED_SVM_RESERVED3
2258 | HM_CHANGED_SVM_RESERVED4);
2259
2260 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
2261 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
2262 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
2263 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
2264
2265#ifdef VBOX_STRICT
2266 hmR0SvmLogState(pVCpu, pVmcb, pCtx, "hmR0SvmLoadGuestState", 0 /* fFlags */, 0 /* uVerbose */);
2267#endif
2268 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
2269 return rc;
2270}
2271
2272
2273#ifdef VBOX_WITH_NESTED_HWVIRT
2274/**
2275 * Caches the nested-guest VMCB fields before we modify them for execution using
2276 * hardware-assisted SVM.
2277 *
2278 * @returns true if the VMCB was previously already cached, false otherwise.
2279 * @param pCtx Pointer to the guest-CPU context.
2280 *
2281 * @sa HMSvmNstGstVmExitNotify.
2282 */
2283static bool hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
2284{
2285 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2286 PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2287 PCSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest;
2288 PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
2289
2290 /*
2291 * Cache the nested-guest programmed VMCB fields if we have not cached it yet.
2292 * Otherwise we risk re-caching the values we may have modified, see @bugref{7243#c44}.
2293 *
2294 * Nested-paging CR3 is not saved back into the VMCB on #VMEXIT, hence no need to
2295 * cache and restore it, see AMD spec. 15.25.4 "Nested Paging and VMRUN/#VMEXIT".
2296 */
2297 bool const fWasCached = pCtx->hwvirt.svm.fHMCachedVmcb;
2298 if (!fWasCached)
2299 {
2300 pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
2301 pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
2302 pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
2303 pNstGstVmcbCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
2304 pNstGstVmcbCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt;
2305 pNstGstVmcbCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl;
2306 pNstGstVmcbCache->u64CR0 = pVmcbNstGstState->u64CR0;
2307 pNstGstVmcbCache->u64CR3 = pVmcbNstGstState->u64CR3;
2308 pNstGstVmcbCache->u64CR4 = pVmcbNstGstState->u64CR4;
2309 pNstGstVmcbCache->u64EFER = pVmcbNstGstState->u64EFER;
2310 pNstGstVmcbCache->u64DBGCTL = pVmcbNstGstState->u64DBGCTL;
2311 pNstGstVmcbCache->u64IOPMPhysAddr = pVmcbNstGstCtrl->u64IOPMPhysAddr;
2312 pNstGstVmcbCache->u64MSRPMPhysAddr = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
2313 pNstGstVmcbCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset;
2314 pNstGstVmcbCache->u32VmcbCleanBits = pVmcbNstGstCtrl->u32VmcbCleanBits;
2315 pNstGstVmcbCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
2316 pNstGstVmcbCache->TLBCtrl = pVmcbNstGstCtrl->TLBCtrl;
2317 pNstGstVmcbCache->u1NestedPaging = pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging;
2318 pNstGstVmcbCache->u1LbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
2319 pCtx->hwvirt.svm.fHMCachedVmcb = true;
2320 Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n"));
2321 }
2322
2323 return fWasCached;
2324}
2325
2326
2327/**
2328 * Sets up the nested-guest VMCB for execution using hardware-assisted SVM.
2329 *
2330 * @param pVCpu The cross context virtual CPU structure.
2331 * @param pCtx Pointer to the guest-CPU context.
2332 */
2333static void hmR0SvmVmRunSetupVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
2334{
2335 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2336 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2337
2338 /*
2339 * First cache the nested-guest VMCB fields we may potentially modify.
2340 */
2341 bool const fVmcbCached = hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
2342 if (!fVmcbCached)
2343 {
2344 /*
2345 * The IOPM of the nested-guest can be ignored because the the guest always
2346 * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
2347 * into the nested-guest one and swap it back on the #VMEXIT.
2348 */
2349 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
2350
2351 /*
2352 * Load the host-physical address into the MSRPM rather than the nested-guest
2353 * physical address (currently we trap all MSRs in the nested-guest).
2354 */
2355 pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;
2356
2357 /*
2358 * Use the same nested-paging as the "outer" guest. We can't dynamically
2359 * switch off nested-paging suddenly while executing a VM (see assertion at the
2360 * end of Trap0eHandler in PGMAllBth.h).
2361 */
2362 pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
2363
2364 /* For now copy the LBR info. from outer guest VMCB. */
2365 /** @todo fix this later. */
2366 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
2367 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt;
2368 pVmcbNstGst->guest.u64DBGCTL = pVmcb->guest.u64DBGCTL;
2369 }
2370 else
2371 {
2372 Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap);
2373 Assert(pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap);
2374 Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
2375 }
2376}
2377
2378
2379/**
2380 * Loads the nested-guest state into the VMCB.
2381 *
2382 * @returns VBox status code.
2383 * @param pVCpu The cross context virtual CPU structure.
2384 * @param pCtx Pointer to the guest-CPU context.
2385 *
2386 * @remarks No-long-jump zone!!!
2387 */
2388static int hmR0SvmLoadGuestStateNested(PVMCPU pVCpu, PCPUMCTX pCtx)
2389{
2390 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
2391
2392 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2393 Assert(pVmcbNstGst);
2394
2395 hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
2396
2397 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcbNstGst, pCtx);
2398 AssertRCReturn(rc, rc);
2399
2400 /*
2401 * We need to load the entire state (including FS, GS etc.) as we could be continuing
2402 * to execute the nested-guest at any point (not just immediately after VMRUN) and thus
2403 * the VMCB can possibly be out-of-sync with the actual nested-guest state if it was
2404 * executed in IEM.
2405 */
2406 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx);
2407 hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx);
2408 hmR0SvmLoadGuestApicStateNested(pVCpu, pVmcbNstGst);
2409
2410 pVmcbNstGst->guest.u64RIP = pCtx->rip;
2411 pVmcbNstGst->guest.u64RSP = pCtx->rsp;
2412 pVmcbNstGst->guest.u64RFlags = pCtx->eflags.u32;
2413 pVmcbNstGst->guest.u64RAX = pCtx->rax;
2414
2415#ifdef VBOX_WITH_NESTED_HWVIRT
2416 Assert(pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable == 0); /* Nested VGIF not supported yet. */
2417#endif
2418
2419 hmR0SvmLoadGuestXcptInterceptsNested(pVCpu, pVmcbNstGst, pCtx);
2420
2421 rc = hmR0SvmSetupVMRunHandler(pVCpu);
2422 AssertRCReturn(rc, rc);
2423
2424 /* Clear any unused and reserved bits. */
2425 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */
2426 | HM_CHANGED_GUEST_RSP
2427 | HM_CHANGED_GUEST_RFLAGS
2428 | HM_CHANGED_GUEST_SYSENTER_CS_MSR
2429 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
2430 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
2431 | HM_CHANGED_GUEST_LAZY_MSRS /* Unused. */
2432 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */
2433 | HM_CHANGED_SVM_RESERVED2
2434 | HM_CHANGED_SVM_RESERVED3
2435 | HM_CHANGED_SVM_RESERVED4);
2436
2437 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
2438 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
2439 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
2440 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
2441
2442#ifdef VBOX_STRICT
2443 hmR0SvmLogState(pVCpu, pVmcbNstGst, pCtx, "hmR0SvmLoadGuestStateNested", HMSVM_LOG_ALL, 0 /* uVerbose */);
2444#endif
2445 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
2446 return rc;
2447}
2448#endif
2449
2450
2451/**
2452 * Loads the state shared between the host and guest or nested-guest into the
2453 * VMCB.
2454 *
2455 * @param pVCpu The cross context virtual CPU structure.
2456 * @param pVmcb Pointer to the VM control block.
2457 * @param pCtx Pointer to the guest-CPU context.
2458 *
2459 * @remarks No-long-jump zone!!!
2460 */
2461static void hmR0SvmLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
2462{
2463 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2464 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2465
2466 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
2467 {
2468 hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx);
2469 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
2470 }
2471
2472 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
2473 {
2474 /** @todo Figure out stepping with nested-guest. */
2475 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2476 hmR0SvmLoadSharedDebugState(pVCpu, pVmcb, pCtx);
2477 else
2478 {
2479 pVmcb->guest.u64DR6 = pCtx->dr[6];
2480 pVmcb->guest.u64DR7 = pCtx->dr[7];
2481 Log4(("hmR0SvmLoadSharedState: DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7]));
2482 }
2483
2484 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
2485 }
2486
2487 /* Unused on AMD-V. */
2488 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
2489
2490 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
2491 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
2492}
2493
2494
2495/**
2496 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU
2497 * context.
2498 *
2499 * Currently there is no residual state left in the CPU that is not updated in the
2500 * VMCB.
2501 *
2502 * @returns VBox status code.
2503 * @param pVCpu The cross context virtual CPU structure.
2504 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2505 * out-of-sync. Make sure to update the required fields
2506 * before using them.
2507 * @param pVmcb Pointer to the VM control block.
2508 */
2509static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PCSVMVMCB pVmcb)
2510{
2511 Assert(VMMRZCallRing3IsEnabled(pVCpu));
2512
2513 pMixedCtx->rip = pVmcb->guest.u64RIP;
2514 pMixedCtx->rsp = pVmcb->guest.u64RSP;
2515 pMixedCtx->eflags.u32 = pVmcb->guest.u64RFlags;
2516 pMixedCtx->rax = pVmcb->guest.u64RAX;
2517
2518#ifdef VBOX_WITH_NESTED_HWVIRT
2519 /*
2520 * Guest Virtual GIF (Global Interrupt Flag).
2521 */
2522 if ( pVmcb->ctrl.IntCtrl.n.u1VGifEnable == 1
2523 && !CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx))
2524 {
2525 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fVGif);
2526 pMixedCtx->hwvirt.fGif = pVmcb->ctrl.IntCtrl.n.u1VGif;
2527 }
2528#endif
2529
2530 /*
2531 * Guest interrupt shadow.
2532 */
2533 if (pVmcb->ctrl.IntShadow.n.u1IntShadow)
2534 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
2535 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2536 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2537
2538 /*
2539 * Guest Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
2540 */
2541 pMixedCtx->cr2 = pVmcb->guest.u64CR2;
2542
2543 /*
2544 * Guest MSRs.
2545 */
2546 pMixedCtx->msrSTAR = pVmcb->guest.u64STAR; /* legacy syscall eip, cs & ss */
2547 pMixedCtx->msrLSTAR = pVmcb->guest.u64LSTAR; /* 64-bit mode syscall rip */
2548 pMixedCtx->msrCSTAR = pVmcb->guest.u64CSTAR; /* compatibility mode syscall rip */
2549 pMixedCtx->msrSFMASK = pVmcb->guest.u64SFMASK; /* syscall flag mask */
2550 pMixedCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; /* swapgs exchange value */
2551 pMixedCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS;
2552 pMixedCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP;
2553 pMixedCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP;
2554
2555 /*
2556 * Guest segment registers (includes FS, GS base MSRs for 64-bit guests).
2557 */
2558 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, CS, cs);
2559 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, SS, ss);
2560 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, DS, ds);
2561 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, ES, es);
2562 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, FS, fs);
2563 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, GS, gs);
2564
2565 /*
2566 * Correct the hidden CS granularity bit. Haven't seen it being wrong in any other
2567 * register (yet).
2568 */
2569 /** @todo SELM might need to be fixed as it too should not care about the
2570 * granularity bit. See @bugref{6785}. */
2571 if ( !pMixedCtx->cs.Attr.n.u1Granularity
2572 && pMixedCtx->cs.Attr.n.u1Present
2573 && pMixedCtx->cs.u32Limit > UINT32_C(0xfffff))
2574 {
2575 Assert((pMixedCtx->cs.u32Limit & 0xfff) == 0xfff);
2576 pMixedCtx->cs.Attr.n.u1Granularity = 1;
2577 }
2578
2579 HMSVM_ASSERT_SEG_GRANULARITY(cs);
2580 HMSVM_ASSERT_SEG_GRANULARITY(ss);
2581 HMSVM_ASSERT_SEG_GRANULARITY(ds);
2582 HMSVM_ASSERT_SEG_GRANULARITY(es);
2583 HMSVM_ASSERT_SEG_GRANULARITY(fs);
2584 HMSVM_ASSERT_SEG_GRANULARITY(gs);
2585
2586 /*
2587 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the VMCB and uses that
2588 * and thus it's possible that when the CPL changes during guest execution that the SS DPL
2589 * isn't updated by AMD-V. Observed on some AMD Fusion CPUs with 64-bit guests.
2590 * See AMD spec. 15.5.1 "Basic operation".
2591 */
2592 Assert(!(pVmcb->guest.u8CPL & ~0x3));
2593 uint8_t const uCpl = pVmcb->guest.u8CPL;
2594 if (pMixedCtx->ss.Attr.n.u2Dpl != uCpl)
2595 {
2596 Log4(("hmR0SvmSaveGuestState: CPL differs. SS.DPL=%u, CPL=%u, overwriting SS.DPL!\n", pMixedCtx->ss.Attr.n.u2Dpl, uCpl));
2597 pMixedCtx->ss.Attr.n.u2Dpl = pVmcb->guest.u8CPL & 0x3;
2598 }
2599
2600 /*
2601 * Guest TR.
2602 * Fixup TR attributes so it's compatible with Intel. Important when saved-states are used
2603 * between Intel and AMD. See @bugref{6208#c39}.
2604 * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode.
2605 */
2606 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, TR, tr);
2607 if (pMixedCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
2608 {
2609 if ( pMixedCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2610 || CPUMIsGuestInLongModeEx(pMixedCtx))
2611 pMixedCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2612 else if (pMixedCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
2613 pMixedCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
2614 }
2615
2616 /*
2617 * Guest Descriptor-Table registers (GDTR, IDTR, LDTR).
2618 */
2619 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, LDTR, ldtr);
2620 pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit;
2621 pMixedCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base;
2622
2623 pMixedCtx->idtr.cbIdt = pVmcb->guest.IDTR.u32Limit;
2624 pMixedCtx->idtr.pIdt = pVmcb->guest.IDTR.u64Base;
2625
2626 /*
2627 * Guest Debug registers.
2628 */
2629 if (!pVCpu->hm.s.fUsingHyperDR7)
2630 {
2631 pMixedCtx->dr[6] = pVmcb->guest.u64DR6;
2632 pMixedCtx->dr[7] = pVmcb->guest.u64DR7;
2633 }
2634 else
2635 {
2636 Assert(pVmcb->guest.u64DR7 == CPUMGetHyperDR7(pVCpu));
2637 CPUMSetHyperDR6(pVCpu, pVmcb->guest.u64DR6);
2638 }
2639
2640 /*
2641 * With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now.
2642 * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
2643 */
2644 if ( pVmcb->ctrl.NestedPaging.n.u1NestedPaging
2645 && pMixedCtx->cr3 != pVmcb->guest.u64CR3)
2646 {
2647 CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3);
2648 PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3);
2649 }
2650
2651#ifdef VBOX_STRICT
2652 if (CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx))
2653 hmR0SvmLogState(pVCpu, pVmcb, pMixedCtx, "hmR0SvmSaveGuestStateNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 0 /* uVerbose */);
2654#endif
2655}
2656
2657
2658/**
2659 * Does the necessary state syncing before returning to ring-3 for any reason
2660 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
2661 *
2662 * @param pVCpu The cross context virtual CPU structure.
2663 *
2664 * @remarks No-long-jmp zone!!!
2665 */
2666static void hmR0SvmLeave(PVMCPU pVCpu)
2667{
2668 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2669 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2670 Assert(VMMR0IsLogFlushDisabled(pVCpu));
2671
2672 /*
2673 * !!! IMPORTANT !!!
2674 * If you modify code here, make sure to check whether hmR0SvmCallRing3Callback() needs to be updated too.
2675 */
2676
2677 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
2678 if (CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu))
2679 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); /** @todo r=ramshankar: This shouldn't be necessary, it's set in HMR0EnterCpu. */
2680
2681 /*
2682 * Restore host debug registers if necessary and resync on next R0 reentry.
2683 */
2684#ifdef VBOX_STRICT
2685 if (CPUMIsHyperDebugStateActive(pVCpu))
2686 {
2687 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; /** @todo nested-guest. */
2688 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
2689 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
2690 }
2691#endif
2692 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
2693 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);/** @todo r=ramshankar: This shouldn't be necessary, it's set in HMR0EnterCpu. */
2694
2695 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
2696 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
2697
2698 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
2699 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
2700 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
2701 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
2702 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
2703
2704 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
2705}
2706
2707
2708/**
2709 * Leaves the AMD-V session.
2710 *
2711 * @returns VBox status code.
2712 * @param pVCpu The cross context virtual CPU structure.
2713 */
2714static int hmR0SvmLeaveSession(PVMCPU pVCpu)
2715{
2716 HM_DISABLE_PREEMPT();
2717 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2718 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2719
2720 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
2721 and done this from the SVMR0ThreadCtxCallback(). */
2722 if (!pVCpu->hm.s.fLeaveDone)
2723 {
2724 hmR0SvmLeave(pVCpu);
2725 pVCpu->hm.s.fLeaveDone = true;
2726 }
2727
2728 /*
2729 * !!! IMPORTANT !!!
2730 * If you modify code here, make sure to check whether hmR0SvmCallRing3Callback() needs to be updated too.
2731 */
2732
2733 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
2734 /* Deregister hook now that we've left HM context before re-enabling preemption. */
2735 VMMR0ThreadCtxHookDisable(pVCpu);
2736
2737 /* Leave HM context. This takes care of local init (term). */
2738 int rc = HMR0LeaveCpu(pVCpu);
2739
2740 HM_RESTORE_PREEMPT();
2741 return rc;
2742}
2743
2744
2745/**
2746 * Does the necessary state syncing before doing a longjmp to ring-3.
2747 *
2748 * @returns VBox status code.
2749 * @param pVCpu The cross context virtual CPU structure.
2750 *
2751 * @remarks No-long-jmp zone!!!
2752 */
2753static int hmR0SvmLongJmpToRing3(PVMCPU pVCpu)
2754{
2755 return hmR0SvmLeaveSession(pVCpu);
2756}
2757
2758
2759/**
2760 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
2761 * any remaining host state) before we longjump to ring-3 and possibly get
2762 * preempted.
2763 *
2764 * @param pVCpu The cross context virtual CPU structure.
2765 * @param enmOperation The operation causing the ring-3 longjump.
2766 * @param pvUser The user argument (pointer to the possibly
2767 * out-of-date guest-CPU context).
2768 */
2769static DECLCALLBACK(int) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
2770{
2771 RT_NOREF_PV(pvUser);
2772
2773 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
2774 {
2775 /*
2776 * !!! IMPORTANT !!!
2777 * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
2778 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
2779 */
2780 VMMRZCallRing3RemoveNotification(pVCpu);
2781 VMMRZCallRing3Disable(pVCpu);
2782 HM_DISABLE_PREEMPT();
2783
2784 /* Restore host FPU state if necessary and resync on next R0 reentry. */
2785 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
2786
2787 /* Restore host debug registers if necessary and resync on next R0 reentry. */
2788 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
2789
2790 /* Deregister the hook now that we've left HM context before re-enabling preemption. */
2791 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
2792 VMMR0ThreadCtxHookDisable(pVCpu);
2793
2794 /* Leave HM context. This takes care of local init (term). */
2795 HMR0LeaveCpu(pVCpu);
2796
2797 HM_RESTORE_PREEMPT();
2798 return VINF_SUCCESS;
2799 }
2800
2801 Assert(pVCpu);
2802 Assert(pvUser);
2803 Assert(VMMRZCallRing3IsEnabled(pVCpu));
2804 HMSVM_ASSERT_PREEMPT_SAFE();
2805
2806 VMMRZCallRing3Disable(pVCpu);
2807 Assert(VMMR0IsLogFlushDisabled(pVCpu));
2808
2809 Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));
2810 int rc = hmR0SvmLongJmpToRing3(pVCpu);
2811 AssertRCReturn(rc, rc);
2812
2813 VMMRZCallRing3Enable(pVCpu);
2814 return VINF_SUCCESS;
2815}
2816
2817
2818/**
2819 * Take necessary actions before going back to ring-3.
2820 *
2821 * An action requires us to go back to ring-3. This function does the necessary
2822 * steps before we can safely return to ring-3. This is not the same as longjmps
2823 * to ring-3, this is voluntary.
2824 *
2825 * @returns VBox status code.
2826 * @param pVM The cross context VM structure.
2827 * @param pVCpu The cross context virtual CPU structure.
2828 * @param pCtx Pointer to the guest-CPU context.
2829 * @param rcExit The reason for exiting to ring-3. Can be
2830 * VINF_VMM_UNKNOWN_RING3_CALL.
2831 */
2832static int hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)
2833{
2834 Assert(pVM);
2835 Assert(pVCpu);
2836 Assert(pCtx);
2837 HMSVM_ASSERT_PREEMPT_SAFE();
2838
2839 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
2840 VMMRZCallRing3Disable(pVCpu);
2841 Log4(("hmR0SvmExitToRing3: VCPU[%u]: rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", pVCpu->idCpu, rcExit,
2842 pVCpu->fLocalForcedActions, pVM->fGlobalForcedActions));
2843
2844 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
2845 if (pVCpu->hm.s.Event.fPending)
2846 {
2847 hmR0SvmPendingEventToTrpmTrap(pVCpu);
2848 Assert(!pVCpu->hm.s.Event.fPending);
2849 }
2850
2851 /* Sync. the necessary state for going back to ring-3. */
2852 hmR0SvmLeaveSession(pVCpu);
2853 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
2854
2855 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
2856 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
2857 | CPUM_CHANGED_LDTR
2858 | CPUM_CHANGED_GDTR
2859 | CPUM_CHANGED_IDTR
2860 | CPUM_CHANGED_TR
2861 | CPUM_CHANGED_HIDDEN_SEL_REGS);
2862 if ( pVM->hm.s.fNestedPaging
2863 && CPUMIsGuestPagingEnabledEx(pCtx))
2864 {
2865 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
2866 }
2867
2868 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
2869 if (rcExit != VINF_EM_RAW_INTERRUPT)
2870 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
2871
2872 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
2873
2874 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
2875 VMMRZCallRing3RemoveNotification(pVCpu);
2876 VMMRZCallRing3Enable(pVCpu);
2877
2878 /*
2879 * If we're emulating an instruction, we shouldn't have any TRPM traps pending
2880 * and if we're injecting an event we should have a TRPM trap pending.
2881 */
2882 AssertReturnStmt(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu),
2883 pVCpu->hm.s.u32HMError = rcExit,
2884 VERR_SVM_IPE_5);
2885 AssertReturnStmt(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu),
2886 pVCpu->hm.s.u32HMError = rcExit,
2887 VERR_SVM_IPE_4);
2888
2889 return rcExit;
2890}
2891
2892
2893#ifdef VBOX_WITH_NESTED_HWVIRT
2894/**
2895 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
2896 * intercepts for the nested-guest.
2897 *
2898 * @param pVM The cross context VM structure.
2899 * @param pVCpu The cross context virtual CPU structure.
2900 * @param pCtx Pointer to the nested guest-CPU context.
2901 * @param pVmcbNstGst Pointer to the nested-guest VM control block.
2902 *
2903 * @remarks No-long-jump zone!!!
2904 */
2905static void hmR0SvmUpdateTscOffsettingNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcbNstGst)
2906{
2907 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2908
2909 bool fParavirtTsc;
2910 uint64_t uTscOffset;
2911 bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
2912
2913 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2914 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
2915 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); RT_NOREF(pCtx);
2916
2917 /*
2918 * Only avoid intercepting if we determined the host TSC (++) is stable enough
2919 * to not intercept -and- the nested-hypervisor itself does not want to intercept it.
2920 */
2921 if ( fCanUseRealTsc
2922 && !(pVmcbNstGstCache->u64InterceptCtrl & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP)))
2923 {
2924 pVmcbNstGstCtrl->u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSC;
2925 pVmcbNstGstCtrl->u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSCP;
2926 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
2927 }
2928 else
2929 {
2930 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSC;
2931 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSCP;
2932 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
2933 }
2934
2935 /* Apply the nested-guest VMCB's TSC offset over the guest one. */
2936 uTscOffset = HMSvmNstGstApplyTscOffset(pVCpu, uTscOffset);
2937
2938 /* Update the nested-guest VMCB with the combined TSC offset (of guest and nested-guest). */
2939 pVmcbNstGstCtrl->u64TSCOffset = uTscOffset;
2940
2941 /* Finally update the VMCB clean bits since we touched the intercepts as well as the TSC offset. */
2942 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2943
2944 if (fParavirtTsc)
2945 {
2946 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
2947 information before every VM-entry, hence disable it for performance sake. */
2948 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
2949 }
2950}
2951#endif
2952
2953
2954/**
2955 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
2956 * intercepts.
2957 *
2958 * @param pVM The cross context VM structure.
2959 * @param pVCpu The cross context virtual CPU structure.
2960 * @param pVmcb Pointer to the VM control block.
2961 *
2962 * @remarks No-long-jump zone!!!
2963 */
2964static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PSVMVMCB pVmcb)
2965{
2966 bool fParavirtTsc;
2967 bool fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc);
2968 if (fCanUseRealTsc)
2969 {
2970 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSC;
2971 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSCP;
2972 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
2973 }
2974 else
2975 {
2976 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSC;
2977 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSCP;
2978 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
2979 }
2980 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2981
2982 /** @todo later optimize this to be done elsewhere and not before every
2983 * VM-entry. */
2984 if (fParavirtTsc)
2985 {
2986 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
2987 information before every VM-entry, hence disable it for performance sake. */
2988#if 0
2989 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
2990 AssertRC(rc);
2991#endif
2992 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
2993 }
2994}
2995
2996
2997/**
2998 * Sets an event as a pending event to be injected into the guest.
2999 *
3000 * @param pVCpu The cross context virtual CPU structure.
3001 * @param pEvent Pointer to the SVM event.
3002 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3003 * page-fault.
3004 *
3005 * @remarks Statistics counter assumes this is a guest event being reflected to
3006 * the guest i.e. 'StatInjectPendingReflect' is incremented always.
3007 */
3008DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
3009{
3010 Assert(!pVCpu->hm.s.Event.fPending);
3011 Assert(pEvent->n.u1Valid);
3012
3013 pVCpu->hm.s.Event.u64IntInfo = pEvent->u;
3014 pVCpu->hm.s.Event.fPending = true;
3015 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
3016
3017 Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,
3018 pEvent->n.u8Vector, (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
3019}
3020
3021
3022/**
3023 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3024 *
3025 * @param pVCpu The cross context virtual CPU structure.
3026 */
3027DECLINLINE(void) hmR0SvmSetPendingXcptUD(PVMCPU pVCpu)
3028{
3029 SVMEVENT Event;
3030 Event.u = 0;
3031 Event.n.u1Valid = 1;
3032 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3033 Event.n.u8Vector = X86_XCPT_UD;
3034 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3035}
3036
3037
3038/**
3039 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3040 *
3041 * @param pVCpu The cross context virtual CPU structure.
3042 */
3043DECLINLINE(void) hmR0SvmSetPendingXcptDB(PVMCPU pVCpu)
3044{
3045 SVMEVENT Event;
3046 Event.u = 0;
3047 Event.n.u1Valid = 1;
3048 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3049 Event.n.u8Vector = X86_XCPT_DB;
3050 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3051}
3052
3053
3054/**
3055 * Sets a page fault (\#PF) exception as pending-for-injection into the VM.
3056 *
3057 * @param pVCpu The cross context virtual CPU structure.
3058 * @param pCtx Pointer to the guest-CPU context.
3059 * @param u32ErrCode The error-code for the page-fault.
3060 * @param uFaultAddress The page fault address (CR2).
3061 *
3062 * @remarks This updates the guest CR2 with @a uFaultAddress!
3063 */
3064DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)
3065{
3066 SVMEVENT Event;
3067 Event.u = 0;
3068 Event.n.u1Valid = 1;
3069 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3070 Event.n.u8Vector = X86_XCPT_PF;
3071 Event.n.u1ErrorCodeValid = 1;
3072 Event.n.u32ErrorCode = u32ErrCode;
3073
3074 /* Update CR2 of the guest. */
3075 if (pCtx->cr2 != uFaultAddress)
3076 {
3077 pCtx->cr2 = uFaultAddress;
3078 /* The VMCB clean bit for CR2 will be updated while re-loading the guest state. */
3079 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR2);
3080 }
3081
3082 hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress);
3083}
3084
3085
3086/**
3087 * Sets a device-not-available (\#NM) exception as pending-for-injection into
3088 * the VM.
3089 *
3090 * @param pVCpu The cross context virtual CPU structure.
3091 */
3092DECLINLINE(void) hmR0SvmSetPendingXcptNM(PVMCPU pVCpu)
3093{
3094 SVMEVENT Event;
3095 Event.u = 0;
3096 Event.n.u1Valid = 1;
3097 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3098 Event.n.u8Vector = X86_XCPT_NM;
3099 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3100}
3101
3102
3103/**
3104 * Sets a math-fault (\#MF) exception as pending-for-injection into the VM.
3105 *
3106 * @param pVCpu The cross context virtual CPU structure.
3107 */
3108DECLINLINE(void) hmR0SvmSetPendingXcptMF(PVMCPU pVCpu)
3109{
3110 SVMEVENT Event;
3111 Event.u = 0;
3112 Event.n.u1Valid = 1;
3113 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3114 Event.n.u8Vector = X86_XCPT_MF;
3115 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3116}
3117
3118
3119/**
3120 * Sets a double fault (\#DF) exception as pending-for-injection into the VM.
3121 *
3122 * @param pVCpu The cross context virtual CPU structure.
3123 */
3124DECLINLINE(void) hmR0SvmSetPendingXcptDF(PVMCPU pVCpu)
3125{
3126 SVMEVENT Event;
3127 Event.u = 0;
3128 Event.n.u1Valid = 1;
3129 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3130 Event.n.u8Vector = X86_XCPT_DF;
3131 Event.n.u1ErrorCodeValid = 1;
3132 Event.n.u32ErrorCode = 0;
3133 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3134}
3135
3136
3137/**
3138 * Injects an event into the guest upon VMRUN by updating the relevant field
3139 * in the VMCB.
3140 *
3141 * @param pVCpu The cross context virtual CPU structure.
3142 * @param pVmcb Pointer to the guest VM control block.
3143 * @param pCtx Pointer to the guest-CPU context.
3144 * @param pEvent Pointer to the event.
3145 *
3146 * @remarks No-long-jump zone!!!
3147 * @remarks Requires CR0!
3148 */
3149DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent)
3150{
3151 NOREF(pVCpu); NOREF(pCtx);
3152
3153 Assert(!pVmcb->ctrl.EventInject.n.u1Valid);
3154 pVmcb->ctrl.EventInject.u = pEvent->u;
3155 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
3156
3157 Log4(("hmR0SvmInjectEventVmcb: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,
3158 pEvent->n.u8Vector, (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
3159}
3160
3161
3162
3163/**
3164 * Converts any TRPM trap into a pending HM event. This is typically used when
3165 * entering from ring-3 (not longjmp returns).
3166 *
3167 * @param pVCpu The cross context virtual CPU structure.
3168 */
3169static void hmR0SvmTrpmTrapToPendingEvent(PVMCPU pVCpu)
3170{
3171 Assert(TRPMHasTrap(pVCpu));
3172 Assert(!pVCpu->hm.s.Event.fPending);
3173
3174 uint8_t uVector;
3175 TRPMEVENT enmTrpmEvent;
3176 RTGCUINT uErrCode;
3177 RTGCUINTPTR GCPtrFaultAddress;
3178 uint8_t cbInstr;
3179
3180 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
3181 AssertRC(rc);
3182
3183 SVMEVENT Event;
3184 Event.u = 0;
3185 Event.n.u1Valid = 1;
3186 Event.n.u8Vector = uVector;
3187
3188 /* Refer AMD spec. 15.20 "Event Injection" for the format. */
3189 if (enmTrpmEvent == TRPM_TRAP)
3190 {
3191 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3192 switch (uVector)
3193 {
3194 case X86_XCPT_NMI:
3195 {
3196 Event.n.u3Type = SVM_EVENT_NMI;
3197 break;
3198 }
3199
3200 case X86_XCPT_PF:
3201 case X86_XCPT_DF:
3202 case X86_XCPT_TS:
3203 case X86_XCPT_NP:
3204 case X86_XCPT_SS:
3205 case X86_XCPT_GP:
3206 case X86_XCPT_AC:
3207 {
3208 Event.n.u1ErrorCodeValid = 1;
3209 Event.n.u32ErrorCode = uErrCode;
3210 break;
3211 }
3212 }
3213 }
3214 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
3215 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3216 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
3217 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
3218 else
3219 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
3220
3221 rc = TRPMResetTrap(pVCpu);
3222 AssertRC(rc);
3223
3224 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
3225 !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode));
3226
3227 hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress);
3228}
3229
3230
3231/**
3232 * Converts any pending SVM event into a TRPM trap. Typically used when leaving
3233 * AMD-V to execute any instruction.
3234 *
3235 * @param pVCpu The cross context virtual CPU structure.
3236 */
3237static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu)
3238{
3239 Assert(pVCpu->hm.s.Event.fPending);
3240 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
3241
3242 SVMEVENT Event;
3243 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3244
3245 uint8_t uVector = Event.n.u8Vector;
3246 uint8_t uVectorType = Event.n.u3Type;
3247 TRPMEVENT enmTrapType = HMSvmEventToTrpmEventType(&Event);
3248
3249 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, uVectorType));
3250
3251 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
3252 AssertRC(rc);
3253
3254 if (Event.n.u1ErrorCodeValid)
3255 TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode);
3256
3257 if ( uVectorType == SVM_EVENT_EXCEPTION
3258 && uVector == X86_XCPT_PF)
3259 {
3260 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
3261 Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
3262 }
3263 else if (uVectorType == SVM_EVENT_SOFTWARE_INT)
3264 {
3265 AssertMsg( uVectorType == SVM_EVENT_SOFTWARE_INT
3266 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
3267 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
3268 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
3269 }
3270 pVCpu->hm.s.Event.fPending = false;
3271}
3272
3273
3274/**
3275 * Checks if the guest (or nested-guest) has an interrupt shadow active right
3276 * now.
3277 *
3278 * @returns @c true if the interrupt shadow is active, @c false otherwise.
3279 * @param pVCpu The cross context virtual CPU structure.
3280 * @param pCtx Pointer to the guest-CPU context.
3281 *
3282 * @remarks No-long-jump zone!!!
3283 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
3284 */
3285DECLINLINE(bool) hmR0SvmIsIntrShadowActive(PVMCPU pVCpu, PCPUMCTX pCtx)
3286{
3287 /*
3288 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
3289 * inhibit interrupts or clear any existing interrupt-inhibition.
3290 */
3291 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3292 {
3293 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
3294 {
3295 /*
3296 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
3297 * AMD-V, the flag's condition to be cleared is met and thus the cleared state is correct.
3298 */
3299 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3300 return false;
3301 }
3302 return true;
3303 }
3304 return false;
3305}
3306
3307
3308/**
3309 * Sets the virtual interrupt intercept control in the VMCB.
3310 *
3311 * @param pVmcb Pointer to the VM control block.
3312 */
3313DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
3314{
3315 /*
3316 * When AVIC isn't supported, indicate that a virtual interrupt is pending and to
3317 * cause a #VMEXIT when the guest is ready to accept interrupts. At #VMEXIT, we
3318 * then get the interrupt from the APIC (updating ISR at the right time) and
3319 * inject the interrupt.
3320 *
3321 * With AVIC is supported, we could make use of the asynchronously delivery without
3322 * #VMEXIT and we would be passing the AVIC page to SVM.
3323 */
3324 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
3325 {
3326 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 0);
3327 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
3328 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR;
3329 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
3330 Log4(("Set VINTR intercept\n"));
3331 }
3332}
3333
3334
3335/**
3336 * Clears the virtual interrupt intercept control in the VMCB as
3337 * we are figured the guest is unable process any interrupts
3338 * at this point of time.
3339 *
3340 * @param pVmcb Pointer to the VM control block.
3341 */
3342DECLINLINE(void) hmR0SvmClearVirtIntrIntercept(PSVMVMCB pVmcb)
3343{
3344 if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
3345 {
3346 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 1);
3347 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0;
3348 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
3349 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
3350 Log4(("Cleared VINTR intercept\n"));
3351 }
3352}
3353
3354
3355/**
3356 * Sets the IRET intercept control in the VMCB which instructs AMD-V to cause a
3357 * \#VMEXIT as soon as a guest starts executing an IRET. This is used to unblock
3358 * virtual NMIs.
3359 *
3360 * @param pVmcb Pointer to the VM control block.
3361 */
3362DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb)
3363{
3364 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET))
3365 {
3366 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET;
3367 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
3368
3369 Log4(("Setting IRET intercept\n"));
3370 }
3371}
3372
3373
3374/**
3375 * Clears the IRET intercept control in the VMCB.
3376 *
3377 * @param pVmcb Pointer to the VM control block.
3378 */
3379DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb)
3380{
3381 if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET)
3382 {
3383 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_IRET;
3384 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
3385
3386 Log4(("Clearing IRET intercept\n"));
3387 }
3388}
3389
3390#ifdef VBOX_WITH_NESTED_HWVIRT
3391
3392
3393/**
3394 * Evaluates the event to be delivered to the nested-guest and sets it as the
3395 * pending event.
3396 *
3397 * @returns VBox strict status code.
3398 * @param pVCpu The cross context virtual CPU structure.
3399 * @param pCtx Pointer to the guest-CPU context.
3400 */
3401static VBOXSTRICTRC hmR0SvmEvaluatePendingEventNested(PVMCPU pVCpu, PCPUMCTX pCtx)
3402{
3403 Log4Func(("\n"));
3404
3405 Assert(!pVCpu->hm.s.Event.fPending);
3406
3407 bool const fGif = pCtx->hwvirt.fGif;
3408 if (fGif)
3409 {
3410 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
3411
3412 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
3413
3414 /*
3415 * Check if the nested-guest can receive NMIs.
3416 * NMIs are higher priority than regular interrupts.
3417 */
3418 /** @todo SMI. SMIs take priority over NMIs. */
3419 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))
3420 {
3421 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
3422 if (fBlockNmi)
3423 hmR0SvmSetIretIntercept(pVmcbNstGst);
3424 else if (fIntShadow)
3425 {
3426 /** @todo Figure this out, how we shall manage virt. intercept if the
3427 * nested-guest already has one set and/or if we really need it? */
3428 //hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
3429 }
3430 else
3431 {
3432 Log4(("Pending NMI\n"));
3433
3434 SVMEVENT Event;
3435 Event.u = 0;
3436 Event.n.u1Valid = 1;
3437 Event.n.u8Vector = X86_XCPT_NMI;
3438 Event.n.u3Type = SVM_EVENT_NMI;
3439
3440 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3441 hmR0SvmSetIretIntercept(pVmcbNstGst);
3442 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3443 return VINF_SUCCESS;
3444 }
3445 }
3446
3447 /*
3448 * Check if the nested-guest can receive external interrupts (generated by
3449 * the guest's PIC/APIC).
3450 *
3451 * External intercepts, NMI, SMI etc. from the physical CPU are -always- intercepted
3452 * when executing using hardware-assisted SVM, see HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS.
3453 *
3454 * External interrupts that are generated for the outer guest may be intercepted
3455 * depending on how the nested-guest VMCB was programmed by guest software.
3456 *
3457 * Physical interrupts always take priority over virtual interrupts,
3458 * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
3459 */
3460 if (!fIntShadow)
3461 {
3462 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
3463 && !pVCpu->hm.s.fSingleInstruction
3464 && CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
3465 {
3466 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
3467 {
3468 Log4(("Intercepting external interrupt -> #VMEXIT\n"));
3469 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
3470 }
3471
3472 uint8_t u8Interrupt;
3473 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
3474 if (RT_SUCCESS(rc))
3475 {
3476 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
3477
3478 SVMEVENT Event;
3479 Event.u = 0;
3480 Event.n.u1Valid = 1;
3481 Event.n.u8Vector = u8Interrupt;
3482 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3483
3484 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3485 }
3486 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
3487 {
3488 /*
3489 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
3490 * updated eventually when the TPR is written by the guest.
3491 */
3492 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
3493 }
3494 else
3495 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
3496 }
3497
3498 /*
3499 * Check if the nested-guest is intercepting virtual (using V_IRQ and related fields)
3500 * interrupt injection. The virtual interrupt injection itself, if any, will be done
3501 * by the physical CPU.
3502 */
3503 /** @todo later explore this for performance reasons. Right now the hardware
3504 * takes care of virtual interrupt injection for nested-guest. */
3505#if 0
3506 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
3507 && (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
3508 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
3509 {
3510 Log4(("Intercepting virtual interrupt -> #VMEXIT\n"));
3511 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
3512 }
3513#endif
3514 }
3515 }
3516
3517 return VINF_SUCCESS;
3518}
3519#endif
3520
3521
3522/**
3523 * Evaluates the event to be delivered to the guest and sets it as the pending
3524 * event.
3525 *
3526 * @param pVCpu The cross context virtual CPU structure.
3527 * @param pCtx Pointer to the guest-CPU context.
3528 *
3529 * @remarks Don't use this function when we are actively executing a
3530 * nested-guest, use hmR0SvmEvaluatePendingEventNested instead.
3531 */
3532static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
3533{
3534 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
3535 Assert(!pVCpu->hm.s.Event.fPending);
3536
3537#ifdef VBOX_WITH_NESTED_HWVIRT
3538 bool const fGif = pCtx->hwvirt.fGif;
3539#else
3540 bool const fGif = true;
3541#endif
3542 Log4Func(("fGif=%RTbool\n", fGif));
3543
3544 /*
3545 * If the global interrupt flag (GIF) isn't set, even NMIs and other events are blocked.
3546 * See AMD spec. Table 15-10. "Effect of the GIF on Interrupt Handling".
3547 */
3548 if (fGif)
3549 {
3550 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
3551 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
3552 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
3553 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
3554
3555 Log4Func(("fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fBlockInt, fIntShadow,
3556 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
3557
3558 /** @todo SMI. SMIs take priority over NMIs. */
3559 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
3560 {
3561 if (fBlockNmi)
3562 hmR0SvmSetIretIntercept(pVmcb);
3563 else if (fIntShadow)
3564 hmR0SvmSetVirtIntrIntercept(pVmcb);
3565 else
3566 {
3567 Log4(("Pending NMI\n"));
3568
3569 SVMEVENT Event;
3570 Event.u = 0;
3571 Event.n.u1Valid = 1;
3572 Event.n.u8Vector = X86_XCPT_NMI;
3573 Event.n.u3Type = SVM_EVENT_NMI;
3574
3575 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3576 hmR0SvmSetIretIntercept(pVmcb);
3577 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3578 return;
3579 }
3580 }
3581 else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
3582 && !pVCpu->hm.s.fSingleInstruction)
3583 {
3584 /*
3585 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
3586 * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC.
3587 */
3588 if ( !fBlockInt
3589 && !fIntShadow)
3590 {
3591 uint8_t u8Interrupt;
3592 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
3593 if (RT_SUCCESS(rc))
3594 {
3595 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
3596
3597 SVMEVENT Event;
3598 Event.u = 0;
3599 Event.n.u1Valid = 1;
3600 Event.n.u8Vector = u8Interrupt;
3601 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3602
3603 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3604 }
3605 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
3606 {
3607 /*
3608 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
3609 * updated eventually when the TPR is written by the guest.
3610 */
3611 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
3612 }
3613 else
3614 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
3615 }
3616 else
3617 hmR0SvmSetVirtIntrIntercept(pVmcb);
3618 }
3619 }
3620}
3621
3622
3623/**
3624 * Injects any pending events into the guest or nested-guest.
3625 *
3626 * @param pVCpu The cross context virtual CPU structure.
3627 * @param pCtx Pointer to the guest-CPU context.
3628 * @param pVmcb Pointer to the VM control block.
3629 */
3630static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb)
3631{
3632 Assert(!TRPMHasTrap(pVCpu));
3633 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3634
3635 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
3636#ifdef VBOX_STRICT
3637 bool const fGif = pCtx->hwvirt.fGif;
3638 bool fAllowInt = fGif;
3639 if (fGif)
3640 {
3641 /*
3642 * For nested-guests we have no way to determine if we're injecting a physical or virtual
3643 * interrupt at this point. Hence the partial verification below.
3644 */
3645 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3646 fAllowInt = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx) || CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx);
3647 else
3648 fAllowInt = RT_BOOL(pCtx->eflags.u32 & X86_EFL_IF);
3649 }
3650#endif
3651
3652 if (pVCpu->hm.s.Event.fPending)
3653 {
3654 SVMEVENT Event;
3655 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3656 Assert(Event.n.u1Valid);
3657
3658 /*
3659 * Validate event injection pre-conditions.
3660 */
3661 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
3662 {
3663 Assert(fAllowInt);
3664 Assert(!fIntShadow);
3665 }
3666 else if (Event.n.u3Type == SVM_EVENT_NMI)
3667 {
3668 Assert(fGif);
3669 Assert(!fIntShadow);
3670 }
3671
3672 /*
3673 * Inject it (update VMCB for injection by the hardware).
3674 */
3675 Log4(("Injecting pending HM event\n"));
3676 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
3677 pVCpu->hm.s.Event.fPending = false;
3678
3679 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
3680 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
3681 else
3682 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
3683 }
3684 else
3685 Assert(pVmcb->ctrl.EventInject.n.u1Valid == 0);
3686
3687 /*
3688 * Update the guest interrupt shadow in the guest or nested-guest VMCB.
3689 *
3690 * For nested-guests: We need to update it too for the scenario where IEM executes
3691 * the nested-guest but execution later continues here with an interrupt shadow active.
3692 */
3693 pVmcb->ctrl.IntShadow.n.u1IntShadow = fIntShadow;
3694}
3695
3696
3697/**
3698 * Reports world-switch error and dumps some useful debug info.
3699 *
3700 * @param pVM The cross context VM structure.
3701 * @param pVCpu The cross context virtual CPU structure.
3702 * @param rcVMRun The return code from VMRUN (or
3703 * VERR_SVM_INVALID_GUEST_STATE for invalid
3704 * guest-state).
3705 * @param pCtx Pointer to the guest-CPU context.
3706 */
3707static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx)
3708{
3709 NOREF(pCtx);
3710 HMSVM_ASSERT_PREEMPT_SAFE();
3711 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
3712 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
3713
3714 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
3715 {
3716 hmR0DumpRegs(pVM, pVCpu, pCtx); NOREF(pVM);
3717#ifdef VBOX_STRICT
3718 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits));
3719 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx));
3720 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx));
3721 Log4(("ctrl.u16InterceptRdDRx %#x\n", pVmcb->ctrl.u16InterceptRdDRx));
3722 Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx));
3723 Log4(("ctrl.u32InterceptXcpt %#x\n", pVmcb->ctrl.u32InterceptXcpt));
3724 Log4(("ctrl.u64InterceptCtrl %#RX64\n", pVmcb->ctrl.u64InterceptCtrl));
3725 Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr));
3726 Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr));
3727 Log4(("ctrl.u64TSCOffset %#RX64\n", pVmcb->ctrl.u64TSCOffset));
3728
3729 Log4(("ctrl.TLBCtrl.u32ASID %#x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID));
3730 Log4(("ctrl.TLBCtrl.u8TLBFlush %#x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush));
3731 Log4(("ctrl.TLBCtrl.u24Reserved %#x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved));
3732
3733 Log4(("ctrl.IntCtrl.u8VTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR));
3734 Log4(("ctrl.IntCtrl.u1VIrqPending %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqPending));
3735 Log4(("ctrl.IntCtrl.u1VGif %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGif));
3736 Log4(("ctrl.IntCtrl.u6Reserved0 %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved0));
3737 Log4(("ctrl.IntCtrl.u4VIntrPrio %#x\n", pVmcb->ctrl.IntCtrl.n.u4VIntrPrio));
3738 Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR));
3739 Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved));
3740 Log4(("ctrl.IntCtrl.u1VIntrMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIntrMasking));
3741 Log4(("ctrl.IntCtrl.u1VGifEnable %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGifEnable));
3742 Log4(("ctrl.IntCtrl.u5Reserved1 %#x\n", pVmcb->ctrl.IntCtrl.n.u5Reserved1));
3743 Log4(("ctrl.IntCtrl.u8VIntrVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIntrVector));
3744 Log4(("ctrl.IntCtrl.u24Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved));
3745
3746 Log4(("ctrl.IntShadow.u1IntShadow %#x\n", pVmcb->ctrl.IntShadow.n.u1IntShadow));
3747 Log4(("ctrl.IntShadow.u1GuestIntMask %#x\n", pVmcb->ctrl.IntShadow.n.u1GuestIntMask));
3748 Log4(("ctrl.u64ExitCode %#RX64\n", pVmcb->ctrl.u64ExitCode));
3749 Log4(("ctrl.u64ExitInfo1 %#RX64\n", pVmcb->ctrl.u64ExitInfo1));
3750 Log4(("ctrl.u64ExitInfo2 %#RX64\n", pVmcb->ctrl.u64ExitInfo2));
3751 Log4(("ctrl.ExitIntInfo.u8Vector %#x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector));
3752 Log4(("ctrl.ExitIntInfo.u3Type %#x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type));
3753 Log4(("ctrl.ExitIntInfo.u1ErrorCodeValid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
3754 Log4(("ctrl.ExitIntInfo.u19Reserved %#x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved));
3755 Log4(("ctrl.ExitIntInfo.u1Valid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid));
3756 Log4(("ctrl.ExitIntInfo.u32ErrorCode %#x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
3757 Log4(("ctrl.NestedPaging.u1NestedPaging %#x\n", pVmcb->ctrl.NestedPaging.n.u1NestedPaging));
3758 Log4(("ctrl.NestedPaging.u1Sev %#x\n", pVmcb->ctrl.NestedPaging.n.u1Sev));
3759 Log4(("ctrl.NestedPaging.u1SevEs %#x\n", pVmcb->ctrl.NestedPaging.n.u1SevEs));
3760 Log4(("ctrl.EventInject.u8Vector %#x\n", pVmcb->ctrl.EventInject.n.u8Vector));
3761 Log4(("ctrl.EventInject.u3Type %#x\n", pVmcb->ctrl.EventInject.n.u3Type));
3762 Log4(("ctrl.EventInject.u1ErrorCodeValid %#x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid));
3763 Log4(("ctrl.EventInject.u19Reserved %#x\n", pVmcb->ctrl.EventInject.n.u19Reserved));
3764 Log4(("ctrl.EventInject.u1Valid %#x\n", pVmcb->ctrl.EventInject.n.u1Valid));
3765 Log4(("ctrl.EventInject.u32ErrorCode %#x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode));
3766
3767 Log4(("ctrl.u64NestedPagingCR3 %#RX64\n", pVmcb->ctrl.u64NestedPagingCR3));
3768
3769 Log4(("ctrl.LbrVirt.u1LbrVirt %#x\n", pVmcb->ctrl.LbrVirt.n.u1LbrVirt));
3770 Log4(("ctrl.LbrVirt.u1VirtVmsaveVmload %#x\n", pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload));
3771
3772 Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel));
3773 Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr));
3774 Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit));
3775 Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base));
3776 Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel));
3777 Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr));
3778 Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit));
3779 Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base));
3780 Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel));
3781 Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr));
3782 Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit));
3783 Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base));
3784 Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel));
3785 Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr));
3786 Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit));
3787 Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base));
3788 Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel));
3789 Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr));
3790 Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit));
3791 Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base));
3792
3793 Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit));
3794 Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base));
3795
3796 Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel));
3797 Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr));
3798 Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit));
3799 Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base));
3800
3801 Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit));
3802 Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base));
3803
3804 Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel));
3805 Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr));
3806 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit));
3807 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base));
3808
3809 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL));
3810 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0));
3811 Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2));
3812 Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3));
3813 Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4));
3814 Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6));
3815 Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7));
3816
3817 Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP));
3818 Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP));
3819 Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX));
3820 Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags));
3821
3822 Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS));
3823 Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP));
3824 Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP));
3825
3826 Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER));
3827 Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR));
3828 Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR));
3829 Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR));
3830 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK));
3831 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase));
3832 Log4(("guest.u64GPAT %#RX64\n", pVmcb->guest.u64GPAT));
3833 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL));
3834 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM));
3835 Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO));
3836 Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM));
3837 Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO));
3838#endif /* VBOX_STRICT */
3839 }
3840 else
3841 Log4(("hmR0SvmReportWorldSwitchError: rcVMRun=%d\n", rcVMRun));
3842
3843 NOREF(pVmcb);
3844}
3845
3846
3847/**
3848 * Check per-VM and per-VCPU force flag actions that require us to go back to
3849 * ring-3 for one reason or another.
3850 *
3851 * @returns VBox status code (information status code included).
3852 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3853 * ring-3.
3854 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3855 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3856 * interrupts)
3857 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3858 * all EMTs to be in ring-3.
3859 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3860 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3861 * to the EM loop.
3862 *
3863 * @param pVM The cross context VM structure.
3864 * @param pVCpu The cross context virtual CPU structure.
3865 * @param pCtx Pointer to the guest-CPU context.
3866 */
3867static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3868{
3869 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3870
3871 /* On AMD-V we don't need to update CR3, PAE PDPES lazily. See hmR0SvmSaveGuestState(). */
3872 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3873 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
3874
3875 /* Update pending interrupts into the APIC's IRR. */
3876 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3877 APICUpdatePendingInterrupts(pVCpu);
3878
3879 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
3880 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
3881 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
3882 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3883 {
3884 /* Pending PGM C3 sync. */
3885 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3886 {
3887 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3888 if (rc != VINF_SUCCESS)
3889 {
3890 Log4(("hmR0SvmCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
3891 return rc;
3892 }
3893 }
3894
3895 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3896 /* -XXX- what was that about single stepping? */
3897 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
3898 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3899 {
3900 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
3901 int rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
3902 Log4(("hmR0SvmCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3903 return rc;
3904 }
3905
3906 /* Pending VM request packets, such as hardware interrupts. */
3907 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
3908 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
3909 {
3910 Log4(("hmR0SvmCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
3911 return VINF_EM_PENDING_REQUEST;
3912 }
3913
3914 /* Pending PGM pool flushes. */
3915 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3916 {
3917 Log4(("hmR0SvmCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
3918 return VINF_PGM_POOL_FLUSH_PENDING;
3919 }
3920
3921 /* Pending DMA requests. */
3922 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
3923 {
3924 Log4(("hmR0SvmCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
3925 return VINF_EM_RAW_TO_R3;
3926 }
3927 }
3928
3929 return VINF_SUCCESS;
3930}
3931
3932
3933#ifdef VBOX_WITH_NESTED_HWVIRT
3934/**
3935 * Does the preparations before executing nested-guest code in AMD-V.
3936 *
3937 * @returns VBox status code (informational status codes included).
3938 * @retval VINF_SUCCESS if we can proceed with running the guest.
3939 * @retval VINF_* scheduling changes, we have to go back to ring-3.
3940 *
3941 * @param pVM The cross context VM structure.
3942 * @param pVCpu The cross context virtual CPU structure.
3943 * @param pCtx Pointer to the guest-CPU context.
3944 * @param pSvmTransient Pointer to the SVM transient structure.
3945 *
3946 * @remarks Same caveats regarding longjumps as hmR0SvmPreRunGuest applies.
3947 * @sa hmR0SvmPreRunGuest.
3948 */
3949static int hmR0SvmPreRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3950{
3951 HMSVM_ASSERT_PREEMPT_SAFE();
3952
3953 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3954 {
3955#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
3956 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
3957 return VINF_EM_RESCHEDULE_REM;
3958#endif
3959 }
3960 else
3961 return VINF_SVM_VMEXIT;
3962
3963 /* Check force flag actions that might require us to go back to ring-3. */
3964 int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
3965 if (rc != VINF_SUCCESS)
3966 return rc;
3967
3968 if (TRPMHasTrap(pVCpu))
3969 hmR0SvmTrpmTrapToPendingEvent(pVCpu);
3970 else if (!pVCpu->hm.s.Event.fPending)
3971 {
3972 VBOXSTRICTRC rcStrict = hmR0SvmEvaluatePendingEventNested(pVCpu, pCtx);
3973 if (rcStrict != VINF_SUCCESS)
3974 return VBOXSTRICTRC_VAL(rcStrict);
3975 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3976 return VINF_SVM_VMEXIT;
3977 }
3978
3979 /*
3980 * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.
3981 * Just do it in software, see @bugref{8411}.
3982 * NB: If we could continue a task switch exit we wouldn't need to do this.
3983 */
3984 if (RT_UNLIKELY( !pVM->hm.s.svm.u32Features
3985 && pVCpu->hm.s.Event.fPending
3986 && SVM_EVENT_GET_TYPE(pVCpu->hm.s.Event.u64IntInfo) == SVM_EVENT_NMI))
3987 {
3988 return VINF_EM_RAW_INJECT_TRPM_EVENT;
3989 }
3990
3991#ifdef HMSVM_SYNC_FULL_NESTED_GUEST_STATE
3992 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
3993#endif
3994
3995 /*
3996 * Load the nested-guest state.
3997 */
3998 rc = hmR0SvmLoadGuestStateNested(pVCpu, pCtx);
3999 AssertRCReturn(rc, rc);
4000 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); /** @todo Get new STAM counter for this? */
4001
4002 /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware SVM. */
4003 Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
4004
4005 /*
4006 * No longjmps to ring-3 from this point on!!!
4007 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
4008 * This also disables flushing of the R0-logger instance (if any).
4009 */
4010 VMMRZCallRing3Disable(pVCpu);
4011
4012 /*
4013 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
4014 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
4015 *
4016 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
4017 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
4018 *
4019 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
4020 * executing guest code.
4021 */
4022 pSvmTransient->fEFlags = ASMIntDisableFlags();
4023 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4024 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4025 {
4026 ASMSetFlags(pSvmTransient->fEFlags);
4027 VMMRZCallRing3Enable(pVCpu);
4028 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
4029 return VINF_EM_RAW_TO_R3;
4030 }
4031 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
4032 {
4033 ASMSetFlags(pSvmTransient->fEFlags);
4034 VMMRZCallRing3Enable(pVCpu);
4035 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
4036 return VINF_EM_RAW_INTERRUPT;
4037 }
4038
4039 /*
4040 * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute
4041 * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
4042 * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.
4043 *
4044 * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the
4045 * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
4046 */
4047 if (pVCpu->hm.s.Event.fPending)
4048 {
4049 SVMEVENT Event;
4050 Event.u = pVCpu->hm.s.Event.u64IntInfo;
4051 if ( Event.n.u1Valid
4052 && Event.n.u3Type == SVM_EVENT_NMI
4053 && Event.n.u8Vector == X86_XCPT_NMI
4054 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
4055 {
4056 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
4057 }
4058 }
4059
4060 return VINF_SUCCESS;
4061}
4062#endif
4063
4064
4065/**
4066 * Does the preparations before executing guest code in AMD-V.
4067 *
4068 * This may cause longjmps to ring-3 and may even result in rescheduling to the
4069 * recompiler. We must be cautious what we do here regarding committing
4070 * guest-state information into the VMCB assuming we assuredly execute the guest
4071 * in AMD-V. If we fall back to the recompiler after updating the VMCB and
4072 * clearing the common-state (TRPM/forceflags), we must undo those changes so
4073 * that the recompiler can (and should) use them when it resumes guest
4074 * execution. Otherwise such operations must be done when we can no longer
4075 * exit to ring-3.
4076 *
4077 * @returns VBox status code (informational status codes included).
4078 * @retval VINF_SUCCESS if we can proceed with running the guest.
4079 * @retval VINF_* scheduling changes, we have to go back to ring-3.
4080 *
4081 * @param pVM The cross context VM structure.
4082 * @param pVCpu The cross context virtual CPU structure.
4083 * @param pCtx Pointer to the guest-CPU context.
4084 * @param pSvmTransient Pointer to the SVM transient structure.
4085 */
4086static int hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4087{
4088 HMSVM_ASSERT_PREEMPT_SAFE();
4089 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
4090
4091 /* Check force flag actions that might require us to go back to ring-3. */
4092 int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
4093 if (rc != VINF_SUCCESS)
4094 return rc;
4095
4096 if (TRPMHasTrap(pVCpu))
4097 hmR0SvmTrpmTrapToPendingEvent(pVCpu);
4098 else if (!pVCpu->hm.s.Event.fPending)
4099 hmR0SvmEvaluatePendingEvent(pVCpu, pCtx);
4100
4101 /*
4102 * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.
4103 * Just do it in software, see @bugref{8411}.
4104 * NB: If we could continue a task switch exit we wouldn't need to do this.
4105 */
4106 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending && (((pVCpu->hm.s.Event.u64IntInfo >> 8) & 7) == SVM_EVENT_NMI)))
4107 if (RT_UNLIKELY(!pVM->hm.s.svm.u32Features))
4108 return VINF_EM_RAW_INJECT_TRPM_EVENT;
4109
4110#ifdef HMSVM_SYNC_FULL_GUEST_STATE
4111 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
4112#endif
4113
4114 /* Load the guest bits that are not shared with the host in any way since we can longjmp or get preempted. */
4115 rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx);
4116 AssertRCReturn(rc, rc);
4117 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
4118
4119 /*
4120 * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
4121 * so we can update it on the way back if the guest changed the TPR.
4122 */
4123 if (pVCpu->hm.s.svm.fSyncVTpr)
4124 {
4125 if (pVM->hm.s.fTPRPatchingActive)
4126 pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
4127 else
4128 {
4129 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
4130 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
4131 }
4132 }
4133
4134 /*
4135 * No longjmps to ring-3 from this point on!!!
4136 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
4137 * This also disables flushing of the R0-logger instance (if any).
4138 */
4139 VMMRZCallRing3Disable(pVCpu);
4140
4141 /*
4142 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
4143 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
4144 *
4145 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
4146 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
4147 *
4148 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
4149 * executing guest code.
4150 */
4151 pSvmTransient->fEFlags = ASMIntDisableFlags();
4152 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4153 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4154 {
4155 ASMSetFlags(pSvmTransient->fEFlags);
4156 VMMRZCallRing3Enable(pVCpu);
4157 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
4158 return VINF_EM_RAW_TO_R3;
4159 }
4160 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
4161 {
4162 ASMSetFlags(pSvmTransient->fEFlags);
4163 VMMRZCallRing3Enable(pVCpu);
4164 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
4165 return VINF_EM_RAW_INTERRUPT;
4166 }
4167
4168 /*
4169 * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute
4170 * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
4171 * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.
4172 *
4173 * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the
4174 * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
4175 */
4176 if (pVCpu->hm.s.Event.fPending)
4177 {
4178 SVMEVENT Event;
4179 Event.u = pVCpu->hm.s.Event.u64IntInfo;
4180 if ( Event.n.u1Valid
4181 && Event.n.u3Type == SVM_EVENT_NMI
4182 && Event.n.u8Vector == X86_XCPT_NMI
4183 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
4184 {
4185 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
4186 }
4187 }
4188
4189 return VINF_SUCCESS;
4190}
4191
4192
4193#ifdef VBOX_WITH_NESTED_HWVIRT
4194/**
4195 * Prepares to run nested-guest code in AMD-V and we've committed to doing so. This
4196 * means there is no backing out to ring-3 or anywhere else at this point.
4197 *
4198 * @param pVM The cross context VM structure.
4199 * @param pVCpu The cross context virtual CPU structure.
4200 * @param pCtx Pointer to the guest-CPU context.
4201 * @param pSvmTransient Pointer to the SVM transient structure.
4202 *
4203 * @remarks Called with preemption disabled.
4204 * @remarks No-long-jump zone!!!
4205 */
4206static void hmR0SvmPreRunGuestCommittedNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4207{
4208 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4209 Assert(VMMR0IsLogFlushDisabled(pVCpu));
4210 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4211 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
4212
4213 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4214 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
4215
4216 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
4217 hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcbNstGst);
4218
4219 if ( pVCpu->hm.s.fPreloadGuestFpu
4220 && !CPUMIsGuestFPUStateActive(pVCpu))
4221 {
4222 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
4223 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
4224 }
4225
4226 /* Load the state shared between host and nested-guest (FPU, debug). */
4227 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
4228 hmR0SvmLoadSharedState(pVCpu, pVmcbNstGst, pCtx);
4229
4230 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); /* Preemption might set this, nothing to do on AMD-V. */
4231 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
4232
4233 /* Setup TSC offsetting. */
4234 RTCPUID idCurrentCpu = hmR0GetCurrentCpu()->idCpu;
4235 if ( pSvmTransient->fUpdateTscOffsetting
4236 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
4237 {
4238 hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcbNstGst);
4239 pSvmTransient->fUpdateTscOffsetting = false;
4240 }
4241
4242 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
4243 if (idCurrentCpu != pVCpu->hm.s.idLastCpu)
4244 pVmcbNstGst->ctrl.u32VmcbCleanBits = 0;
4245
4246 /* Store status of the shared guest-host state at the time of VMRUN. */
4247#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4248 if (CPUMIsGuestInLongModeEx(pCtx))
4249 {
4250 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
4251 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
4252 }
4253 else
4254#endif
4255 {
4256 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
4257 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
4258 }
4259 pSvmTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
4260
4261 /* The TLB flushing would've already been setup by the nested-hypervisor. */
4262 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
4263 hmR0SvmFlushTaggedTlb(pVCpu, pCtx, pVmcbNstGst);
4264 Assert(hmR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
4265
4266 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
4267
4268 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
4269 to start executing. */
4270
4271 /*
4272 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
4273 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
4274 *
4275 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
4276 */
4277 uint8_t *pbMsrBitmap = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
4278 if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
4279 && !(pVmcbNstGst->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
4280 {
4281 hmR0SvmSetMsrPermission(pVmcbNstGst, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
4282 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
4283 uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
4284 if (u64GuestTscAux != pVCpu->hm.s.u64HostTscAux)
4285 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
4286 pSvmTransient->fRestoreTscAuxMsr = true;
4287 }
4288 else
4289 {
4290 hmR0SvmSetMsrPermission(pVmcbNstGst, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
4291 pSvmTransient->fRestoreTscAuxMsr = false;
4292 }
4293
4294 /*
4295 * If VMCB Clean bits isn't supported by the CPU or exposed by the guest,
4296 * mark all state-bits as dirty indicating to the CPU to re-load from VMCB.
4297 */
4298 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx);
4299 if (!fSupportsVmcbCleanBits)
4300 pVmcbNstGst->ctrl.u32VmcbCleanBits = 0;
4301}
4302#endif
4303
4304
4305/**
4306 * Prepares to run guest code in AMD-V and we've committed to doing so. This
4307 * means there is no backing out to ring-3 or anywhere else at this
4308 * point.
4309 *
4310 * @param pVM The cross context VM structure.
4311 * @param pVCpu The cross context virtual CPU structure.
4312 * @param pCtx Pointer to the guest-CPU context.
4313 * @param pSvmTransient Pointer to the SVM transient structure.
4314 *
4315 * @remarks Called with preemption disabled.
4316 * @remarks No-long-jump zone!!!
4317 */
4318static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4319{
4320 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4321 Assert(VMMR0IsLogFlushDisabled(pVCpu));
4322 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4323 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
4324
4325 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4326 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
4327
4328 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
4329 hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb);
4330
4331 if ( pVCpu->hm.s.fPreloadGuestFpu
4332 && !CPUMIsGuestFPUStateActive(pVCpu))
4333 {
4334 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
4335 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
4336 }
4337
4338 /* Load the state shared between host and guest (FPU, debug). */
4339 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
4340 hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx);
4341
4342 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); /* Preemption might set this, nothing to do on AMD-V. */
4343 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
4344
4345 /* Setup TSC offsetting. */
4346 RTCPUID idCurrentCpu = hmR0GetCurrentCpu()->idCpu;
4347 if ( pSvmTransient->fUpdateTscOffsetting
4348 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
4349 {
4350 hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pVmcb);
4351 pSvmTransient->fUpdateTscOffsetting = false;
4352 }
4353
4354 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
4355 if (idCurrentCpu != pVCpu->hm.s.idLastCpu)
4356 pVmcb->ctrl.u32VmcbCleanBits = 0;
4357
4358 /* Store status of the shared guest-host state at the time of VMRUN. */
4359#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4360 if (CPUMIsGuestInLongModeEx(pCtx))
4361 {
4362 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
4363 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
4364 }
4365 else
4366#endif
4367 {
4368 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
4369 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
4370 }
4371 pSvmTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
4372
4373 /* Flush the appropriate tagged-TLB entries. */
4374 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
4375 hmR0SvmFlushTaggedTlb(pVCpu, pCtx, pVmcb);
4376 Assert(hmR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
4377
4378 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
4379
4380 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
4381 to start executing. */
4382
4383 /*
4384 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
4385 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
4386 *
4387 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
4388 */
4389 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
4390 if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
4391 && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
4392 {
4393 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
4394 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
4395 uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
4396 if (u64GuestTscAux != pVCpu->hm.s.u64HostTscAux)
4397 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
4398 pSvmTransient->fRestoreTscAuxMsr = true;
4399 }
4400 else
4401 {
4402 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
4403 pSvmTransient->fRestoreTscAuxMsr = false;
4404 }
4405
4406 /* If VMCB Clean bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
4407 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx);
4408 if (!fSupportsVmcbCleanBits)
4409 pVmcb->ctrl.u32VmcbCleanBits = 0;
4410}
4411
4412
4413/**
4414 * Wrapper for running the guest code in AMD-V.
4415 *
4416 * @returns VBox strict status code.
4417 * @param pVM The cross context VM structure.
4418 * @param pVCpu The cross context virtual CPU structure.
4419 * @param pCtx Pointer to the guest-CPU context.
4420 *
4421 * @remarks No-long-jump zone!!!
4422 */
4423DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4424{
4425 /*
4426 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4427 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4428 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4429 */
4430#ifdef VBOX_WITH_KERNEL_USING_XMM
4431 return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
4432 pVCpu->hm.s.svm.pfnVMRun);
4433#else
4434 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
4435#endif
4436}
4437
4438
4439#ifdef VBOX_WITH_NESTED_HWVIRT
4440/**
4441 * Wrapper for running the nested-guest code in AMD-V.
4442 *
4443 * @returns VBox strict status code.
4444 * @param pVM The cross context VM structure.
4445 * @param pVCpu The cross context virtual CPU structure.
4446 * @param pCtx Pointer to the guest-CPU context.
4447 *
4448 * @remarks No-long-jump zone!!!
4449 */
4450DECLINLINE(int) hmR0SvmRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4451{
4452 /*
4453 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4454 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4455 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4456 */
4457#ifdef VBOX_WITH_KERNEL_USING_XMM
4458 return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
4459 pVCpu->hm.s.svm.pfnVMRun);
4460#else
4461 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
4462#endif
4463}
4464
4465
4466/**
4467 * Performs some essential restoration of state after running nested-guest code in
4468 * AMD-V.
4469 *
4470 * @param pVM The cross context VM structure.
4471 * @param pVCpu The cross context virtual CPU structure.
4472 * @param pMixedCtx Pointer to the nested-guest-CPU context. The data maybe
4473 * out-of-sync. Make sure to update the required fields
4474 * before using them.
4475 * @param pSvmTransient Pointer to the SVM transient structure.
4476 * @param rcVMRun Return code of VMRUN.
4477 *
4478 * @remarks Called with interrupts disabled.
4479 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
4480 * unconditionally when it is safe to do so.
4481 */
4482static void hmR0SvmPostRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
4483{
4484 RT_NOREF(pVM);
4485 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4486
4487 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
4488 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
4489
4490 /* TSC read must be done early for maximum accuracy. */
4491 PSVMVMCB pVmcbNstGst = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb);
4492 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
4493 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
4494 if (!(pVmcbNstGstCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
4495 {
4496 /*
4497 * Undo what we did in hmR0SvmUpdateTscOffsettingNested() but don't restore the
4498 * nested-guest VMCB TSC offset here. It shall eventually be restored on #VMEXIT
4499 * later by HMSvmNstGstVmExitNotify().
4500 */
4501 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcbNstGstCtrl->u64TSCOffset - pVmcbNstGstCache->u64TSCOffset);
4502 }
4503
4504 if (pSvmTransient->fRestoreTscAuxMsr)
4505 {
4506 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
4507 CPUMR0SetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
4508 if (u64GuestTscAuxMsr != pVCpu->hm.s.u64HostTscAux)
4509 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
4510 }
4511
4512 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
4513 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
4514 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4515
4516 Assert(!(ASMGetFlags() & X86_EFL_IF));
4517 ASMSetFlags(pSvmTransient->fEFlags); /* Enable interrupts. */
4518 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
4519
4520 /* Mark the VMCB-state cache as unmodified by VMM. */
4521 pVmcbNstGstCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;
4522
4523 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
4524 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
4525 {
4526 Log4(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun));
4527 return;
4528 }
4529
4530 pSvmTransient->u64ExitCode = pVmcbNstGstCtrl->u64ExitCode; /* Save the #VMEXIT reason. */
4531 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcbNstGstCtrl->u64ExitCode);/* Update the #VMEXIT history array. */
4532 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
4533 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
4534
4535 Assert(!pVCpu->hm.s.svm.fSyncVTpr);
4536 hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcbNstGst); /* Save the nested-guest state from the VMCB to the
4537 guest-CPU context. */
4538}
4539#endif
4540
4541/**
4542 * Performs some essential restoration of state after running guest code in
4543 * AMD-V.
4544 *
4545 * @param pVM The cross context VM structure.
4546 * @param pVCpu The cross context virtual CPU structure.
4547 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4548 * out-of-sync. Make sure to update the required fields
4549 * before using them.
4550 * @param pSvmTransient Pointer to the SVM transient structure.
4551 * @param rcVMRun Return code of VMRUN.
4552 *
4553 * @remarks Called with interrupts disabled.
4554 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
4555 * unconditionally when it is safe to do so.
4556 */
4557static void hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
4558{
4559 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4560
4561 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
4562 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
4563
4564 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
4565 pVmcb->ctrl.u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
4566
4567 /* TSC read must be done early for maximum accuracy. */
4568 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
4569 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset);
4570
4571 if (pSvmTransient->fRestoreTscAuxMsr)
4572 {
4573 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
4574 CPUMR0SetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
4575 if (u64GuestTscAuxMsr != pVCpu->hm.s.u64HostTscAux)
4576 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
4577 }
4578
4579 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
4580 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
4581 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4582
4583 Assert(!(ASMGetFlags() & X86_EFL_IF));
4584 ASMSetFlags(pSvmTransient->fEFlags); /* Enable interrupts. */
4585 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
4586
4587 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
4588 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
4589 {
4590 Log4(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun));
4591 return;
4592 }
4593
4594 pSvmTransient->u64ExitCode = pVmcb->ctrl.u64ExitCode; /* Save the #VMEXIT reason. */
4595 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcb->ctrl.u64ExitCode); /* Update the #VMEXIT history array. */
4596 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
4597 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
4598
4599 hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcb); /* Save the guest state from the VMCB to the guest-CPU context. */
4600
4601 if (RT_LIKELY(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID))
4602 {
4603 if (pVCpu->hm.s.svm.fSyncVTpr)
4604 {
4605 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
4606 if ( pVM->hm.s.fTPRPatchingActive
4607 && (pMixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
4608 {
4609 int rc = APICSetTpr(pVCpu, pMixedCtx->msrLSTAR & 0xff);
4610 AssertRC(rc);
4611 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
4612 }
4613 else if (pSvmTransient->u8GuestTpr != pVmcb->ctrl.IntCtrl.n.u8VTPR)
4614 {
4615 int rc = APICSetTpr(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4);
4616 AssertRC(rc);
4617 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
4618 }
4619 }
4620 }
4621}
4622
4623
4624/**
4625 * Runs the guest code using AMD-V.
4626 *
4627 * @returns VBox status code.
4628 * @param pVM The cross context VM structure.
4629 * @param pVCpu The cross context virtual CPU structure.
4630 * @param pCtx Pointer to the guest-CPU context.
4631 * @param pcLoops Pointer to the number of executed loops.
4632 */
4633static int hmR0SvmRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
4634{
4635 uint32_t const cMaxResumeLoops = pVM->hm.s.cMaxResumeLoops;
4636 Assert(pcLoops);
4637 Assert(*pcLoops <= cMaxResumeLoops);
4638
4639 SVMTRANSIENT SvmTransient;
4640 SvmTransient.fUpdateTscOffsetting = true;
4641
4642 int rc = VERR_INTERNAL_ERROR_5;
4643 for (;;)
4644 {
4645 Assert(!HMR0SuspendPending());
4646 HMSVM_ASSERT_CPU_SAFE();
4647
4648 /* Preparatory work for running guest code, this may force us to return
4649 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4650 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4651 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
4652 if (rc != VINF_SUCCESS)
4653 break;
4654
4655 /*
4656 * No longjmps to ring-3 from this point on!!!
4657 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
4658 * This also disables flushing of the R0-logger instance (if any).
4659 */
4660 hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
4661 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
4662
4663 /* Restore any residual host-state and save any bits shared between host
4664 and guest into the guest-CPU state. Re-enables interrupts! */
4665 hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
4666
4667 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
4668 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
4669 {
4670 if (rc == VINF_SUCCESS)
4671 rc = VERR_SVM_INVALID_GUEST_STATE;
4672 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
4673 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
4674 break;
4675 }
4676
4677 /* Handle the #VMEXIT. */
4678 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4679 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
4680 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
4681 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
4682 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
4683 if (rc != VINF_SUCCESS)
4684 break;
4685 if (++(*pcLoops) >= cMaxResumeLoops)
4686 {
4687 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4688 rc = VINF_EM_RAW_INTERRUPT;
4689 break;
4690 }
4691 }
4692
4693 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4694 return rc;
4695}
4696
4697
4698/**
4699 * Runs the guest code using AMD-V in single step mode.
4700 *
4701 * @returns VBox status code.
4702 * @param pVM The cross context VM structure.
4703 * @param pVCpu The cross context virtual CPU structure.
4704 * @param pCtx Pointer to the guest-CPU context.
4705 * @param pcLoops Pointer to the number of executed loops.
4706 */
4707static int hmR0SvmRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
4708{
4709 uint32_t const cMaxResumeLoops = pVM->hm.s.cMaxResumeLoops;
4710 Assert(pcLoops);
4711 Assert(*pcLoops <= cMaxResumeLoops);
4712
4713 SVMTRANSIENT SvmTransient;
4714 SvmTransient.fUpdateTscOffsetting = true;
4715
4716 uint16_t uCsStart = pCtx->cs.Sel;
4717 uint64_t uRipStart = pCtx->rip;
4718
4719 int rc = VERR_INTERNAL_ERROR_5;
4720 for (;;)
4721 {
4722 Assert(!HMR0SuspendPending());
4723 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
4724 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
4725 (unsigned)RTMpCpuId(), *pcLoops));
4726
4727 /* Preparatory work for running guest code, this may force us to return
4728 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4729 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4730 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
4731 if (rc != VINF_SUCCESS)
4732 break;
4733
4734 /*
4735 * No longjmps to ring-3 from this point on!!!
4736 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
4737 * This also disables flushing of the R0-logger instance (if any).
4738 */
4739 VMMRZCallRing3Disable(pVCpu);
4740 VMMRZCallRing3RemoveNotification(pVCpu);
4741 hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
4742
4743 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
4744
4745 /*
4746 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
4747 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
4748 */
4749 hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
4750 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
4751 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
4752 {
4753 if (rc == VINF_SUCCESS)
4754 rc = VERR_SVM_INVALID_GUEST_STATE;
4755 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
4756 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
4757 return rc;
4758 }
4759
4760 /* Handle the #VMEXIT. */
4761 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4762 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
4763 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
4764 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
4765 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
4766 if (rc != VINF_SUCCESS)
4767 break;
4768 if (++(*pcLoops) >= cMaxResumeLoops)
4769 {
4770 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4771 rc = VINF_EM_RAW_INTERRUPT;
4772 break;
4773 }
4774
4775 /*
4776 * Did the RIP change, if so, consider it a single step.
4777 * Otherwise, make sure one of the TFs gets set.
4778 */
4779 if ( pCtx->rip != uRipStart
4780 || pCtx->cs.Sel != uCsStart)
4781 {
4782 rc = VINF_EM_DBG_STEPPED;
4783 break;
4784 }
4785 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
4786 }
4787
4788 /*
4789 * Clear the X86_EFL_TF if necessary.
4790 */
4791 if (pVCpu->hm.s.fClearTrapFlag)
4792 {
4793 pVCpu->hm.s.fClearTrapFlag = false;
4794 pCtx->eflags.Bits.u1TF = 0;
4795 }
4796
4797 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4798 return rc;
4799}
4800
4801#ifdef VBOX_WITH_NESTED_HWVIRT
4802/**
4803 * Runs the nested-guest code using AMD-V.
4804 *
4805 * @returns VBox status code.
4806 * @param pVM The cross context VM structure.
4807 * @param pVCpu The cross context virtual CPU structure.
4808 * @param pCtx Pointer to the guest-CPU context.
4809 * @param pcLoops Pointer to the number of executed loops. If we're switching
4810 * from the guest-code execution loop to this nested-guest
4811 * execution loop pass the remainder value, else pass 0.
4812 */
4813static int hmR0SvmRunGuestCodeNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
4814{
4815 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
4816 Assert(pcLoops);
4817 Assert(*pcLoops <= pVM->hm.s.cMaxResumeLoops);
4818
4819 SVMTRANSIENT SvmTransient;
4820 SvmTransient.fUpdateTscOffsetting = true;
4821
4822 int rc = VERR_INTERNAL_ERROR_4;
4823 for (;;)
4824 {
4825 Assert(!HMR0SuspendPending());
4826 HMSVM_ASSERT_CPU_SAFE();
4827
4828 /* Preparatory work for running nested-guest code, this may force us to return
4829 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4830 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4831 rc = hmR0SvmPreRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient);
4832 if ( rc != VINF_SUCCESS
4833 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
4834 {
4835 break;
4836 }
4837
4838 /*
4839 * No longjmps to ring-3 from this point on!!!
4840 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
4841 * This also disables flushing of the R0-logger instance (if any).
4842 */
4843 hmR0SvmPreRunGuestCommittedNested(pVM, pVCpu, pCtx, &SvmTransient);
4844
4845 rc = hmR0SvmRunGuestNested(pVM, pVCpu, pCtx);
4846
4847 /* Restore any residual host-state and save any bits shared between host
4848 and guest into the guest-CPU state. Re-enables interrupts! */
4849 hmR0SvmPostRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient, rc);
4850
4851 if (RT_LIKELY( rc == VINF_SUCCESS
4852 && SvmTransient.u64ExitCode != SVM_EXIT_INVALID))
4853 { /* extremely likely */ }
4854 else
4855 {
4856 /* VMRUN failed, shouldn't really happen, Guru. */
4857 if (rc != VINF_SUCCESS)
4858 break;
4859
4860 /* Invalid nested-guest state. Cause a #VMEXIT but assert on strict builds. */
4861 AssertMsgFailed(("Invalid nested-guest state. rc=%Rrc u64ExitCode=%#RX64\n", rc, SvmTransient.u64ExitCode));
4862 rc = VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0, 0));
4863 break;
4864 }
4865
4866 /* Handle the #VMEXIT. */
4867 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4868 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
4869 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pCtx->hwvirt.svm.CTX_SUFF(pVmcb));
4870 rc = hmR0SvmHandleExitNested(pVCpu, pCtx, &SvmTransient);
4871 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
4872 if (rc != VINF_SUCCESS)
4873 break;
4874 if (++(*pcLoops) >= pVM->hm.s.cMaxResumeLoops)
4875 {
4876 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4877 rc = VINF_EM_RAW_INTERRUPT;
4878 break;
4879 }
4880
4881 /** @todo handle single-stepping */
4882 }
4883
4884 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4885 return rc;
4886}
4887#endif
4888
4889
4890/**
4891 * Runs the guest code using AMD-V.
4892 *
4893 * @returns Strict VBox status code.
4894 * @param pVM The cross context VM structure.
4895 * @param pVCpu The cross context virtual CPU structure.
4896 * @param pCtx Pointer to the guest-CPU context.
4897 */
4898VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4899{
4900 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4901 HMSVM_ASSERT_PREEMPT_SAFE();
4902 VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pCtx);
4903
4904 uint32_t cLoops = 0;
4905 int rc;
4906#ifdef VBOX_WITH_NESTED_HWVIRT
4907 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
4908#endif
4909 {
4910 if (!pVCpu->hm.s.fSingleInstruction)
4911 rc = hmR0SvmRunGuestCodeNormal(pVM, pVCpu, pCtx, &cLoops);
4912 else
4913 rc = hmR0SvmRunGuestCodeStep(pVM, pVCpu, pCtx, &cLoops);
4914 }
4915#ifdef VBOX_WITH_NESTED_HWVIRT
4916 else
4917 {
4918 rc = VINF_SVM_VMRUN;
4919 }
4920
4921 /* Re-check the nested-guest condition here as we may be transitioning from the normal
4922 execution loop into the nested-guest, hence this is not placed in the 'else' part above. */
4923 if (rc == VINF_SVM_VMRUN)
4924 {
4925 rc = hmR0SvmRunGuestCodeNested(pVM, pVCpu, pCtx, &cLoops);
4926 if (rc == VINF_SVM_VMEXIT)
4927 rc = VINF_SUCCESS;
4928 }
4929#endif
4930
4931 /* Fixup error codes. */
4932 if (rc == VERR_EM_INTERPRETER)
4933 rc = VINF_EM_RAW_EMULATE_INSTR;
4934 else if (rc == VINF_EM_RESET)
4935 rc = VINF_EM_TRIPLE_FAULT;
4936
4937 /* Prepare to return to ring-3. This will remove longjmp notifications. */
4938 rc = hmR0SvmExitToRing3(pVM, pVCpu, pCtx, rc);
4939 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
4940 return rc;
4941}
4942
4943
4944#ifdef VBOX_WITH_NESTED_HWVIRT
4945/**
4946 * Determines whether an IOIO intercept is active for the nested-guest or not.
4947 *
4948 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
4949 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO.
4950 */
4951static bool hmR0SvmIsIoInterceptActive(void *pvIoBitmap, PSVMIOIOEXITINFO pIoExitInfo)
4952{
4953 const uint16_t u16Port = pIoExitInfo->n.u16Port;
4954 const SVMIOIOTYPE enmIoType = (SVMIOIOTYPE)pIoExitInfo->n.u1Type;
4955 const uint8_t cbReg = (pIoExitInfo->u >> SVM_IOIO_OP_SIZE_SHIFT) & 7;
4956 const uint8_t cAddrSizeBits = ((pIoExitInfo->u >> SVM_IOIO_ADDR_SIZE_SHIFT) & 7) << 4;
4957 const uint8_t iEffSeg = pIoExitInfo->n.u3SEG;
4958 const bool fRep = pIoExitInfo->n.u1REP;
4959 const bool fStrIo = pIoExitInfo->n.u1STR;
4960
4961 return HMSvmIsIOInterceptActive(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo,
4962 NULL /* pIoExitInfo */);
4963}
4964
4965
4966/**
4967 * Handles a nested-guest \#VMEXIT (for all EXITCODE values except
4968 * SVM_EXIT_INVALID).
4969 *
4970 * @returns VBox status code (informational status codes included).
4971 * @param pVCpu The cross context virtual CPU structure.
4972 * @param pCtx Pointer to the guest-CPU context.
4973 * @param pSvmTransient Pointer to the SVM transient structure.
4974 */
4975static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4976{
4977 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
4978 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
4979 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
4980
4981#define HM_SVM_VMEXIT_NESTED(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
4982 VBOXSTRICTRC_TODO(IEMExecSvmVmexit(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2))
4983
4984 /*
4985 * For all the #VMEXITs here we primarily figure out if the #VMEXIT is expected
4986 * by the nested-guest. If it isn't, it should be handled by the (outer) guest.
4987 */
4988 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
4989 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
4990 uint64_t const uExitCode = pVmcbNstGstCtrl->u64ExitCode;
4991 uint64_t const uExitInfo1 = pVmcbNstGstCtrl->u64ExitInfo1;
4992 uint64_t const uExitInfo2 = pVmcbNstGstCtrl->u64ExitInfo2;
4993
4994 Assert(uExitCode == pVmcbNstGstCtrl->u64ExitCode);
4995 switch (uExitCode)
4996 {
4997 case SVM_EXIT_CPUID:
4998 {
4999 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CPUID))
5000 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5001 return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient);
5002 }
5003
5004 case SVM_EXIT_RDTSC:
5005 {
5006 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSC))
5007 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5008 return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
5009 }
5010
5011 case SVM_EXIT_RDTSCP:
5012 {
5013 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSCP))
5014 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5015 return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
5016 }
5017
5018
5019 case SVM_EXIT_MONITOR:
5020 {
5021 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MONITOR))
5022 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5023 return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient);
5024 }
5025
5026 case SVM_EXIT_MWAIT:
5027 {
5028 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MWAIT))
5029 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5030 return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient);
5031 }
5032
5033 case SVM_EXIT_HLT:
5034 {
5035 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_HLT))
5036 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5037 return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient);
5038 }
5039
5040 case SVM_EXIT_MSR:
5041 {
5042 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MSR_PROT))
5043 {
5044 uint32_t const idMsr = pCtx->ecx;
5045 uint16_t offMsrpm;
5046 uint32_t uMsrpmBit;
5047 int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
5048 if (RT_SUCCESS(rc))
5049 {
5050 void const *pvMsrBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
5051 bool const fInterceptRead = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit);
5052 bool const fInterceptWrite = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit + 1);
5053
5054 if ( (fInterceptWrite && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
5055 || (fInterceptRead && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ))
5056 {
5057 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5058 }
5059 }
5060 else
5061 {
5062 /*
5063 * MSRs not covered by the MSRPM automatically cause an #VMEXIT.
5064 * See AMD-V spec. "15.11 MSR Intercepts".
5065 */
5066 Assert(rc == VERR_OUT_OF_RANGE);
5067 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5068 }
5069 }
5070 return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient);
5071 }
5072
5073 case SVM_EXIT_IOIO:
5074 {
5075 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT))
5076 {
5077 void *pvIoBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
5078 SVMIOIOEXITINFO IoExitInfo;
5079 IoExitInfo.u = pVmcbNstGst->ctrl.u64ExitInfo1;
5080 bool const fIntercept = hmR0SvmIsIoInterceptActive(pvIoBitmap, &IoExitInfo);
5081 if (fIntercept)
5082 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5083 }
5084 return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
5085 }
5086
5087 case SVM_EXIT_EXCEPTION_14: /* X86_XCPT_PF */
5088 {
5089 PVM pVM = pVCpu->CTX_SUFF(pVM);
5090 if (pVM->hm.s.fNestedPaging)
5091 {
5092 uint32_t const u32ErrCode = pVmcbNstGstCtrl->u64ExitInfo1;
5093 uint64_t const uFaultAddress = pVmcbNstGstCtrl->u64ExitInfo2;
5094
5095 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
5096 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
5097 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, u32ErrCode, uFaultAddress);
5098
5099 /* If the nested-guest is not intercepting #PFs, forward the #PF to the nested-guest. */
5100 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
5101 return VINF_SUCCESS;
5102 }
5103 return hmR0SvmExitXcptPFNested(pVCpu, pCtx,pSvmTransient);
5104 }
5105
5106 case SVM_EXIT_EXCEPTION_7: /* X86_XCPT_NM */
5107 {
5108 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_NM))
5109 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5110 hmR0SvmSetPendingXcptNM(pVCpu);
5111 return VINF_SUCCESS;
5112 }
5113
5114 case SVM_EXIT_EXCEPTION_6: /* X86_XCPT_UD */
5115 {
5116 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_UD))
5117 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5118 hmR0SvmSetPendingXcptUD(pVCpu);
5119 return VINF_SUCCESS;
5120 }
5121
5122 case SVM_EXIT_EXCEPTION_16: /* X86_XCPT_MF */
5123 {
5124 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_MF))
5125 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5126 hmR0SvmSetPendingXcptMF(pVCpu);
5127 return VINF_SUCCESS;
5128 }
5129
5130 case SVM_EXIT_EXCEPTION_1: /* X86_XCPT_DB */
5131 {
5132 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_DB))
5133 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5134 return hmR0SvmNestedExitXcptDB(pVCpu, pCtx, pSvmTransient);
5135 }
5136
5137 case SVM_EXIT_EXCEPTION_17: /* X86_XCPT_AC */
5138 {
5139 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_AC))
5140 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5141 return hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient);
5142 }
5143
5144 case SVM_EXIT_EXCEPTION_3: /* X86_XCPT_BP */
5145 {
5146 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_BP))
5147 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5148 return hmR0SvmNestedExitXcptBP(pVCpu, pCtx, pSvmTransient);
5149 }
5150
5151 case SVM_EXIT_READ_CR0:
5152 case SVM_EXIT_READ_CR3:
5153 case SVM_EXIT_READ_CR4:
5154 {
5155 uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0;
5156 if (HMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr))
5157 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5158 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
5159 }
5160
5161 case SVM_EXIT_WRITE_CR0:
5162 case SVM_EXIT_WRITE_CR3:
5163 case SVM_EXIT_WRITE_CR4:
5164 case SVM_EXIT_WRITE_CR8: /** @todo Shouldn't writes to CR8 go to V_TPR instead since we run with V_INTR_MASKING set?? */
5165 {
5166 uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;
5167 Log4(("hmR0SvmHandleExitNested: Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2));
5168
5169 if (HMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr))
5170 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5171 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
5172 }
5173
5174 case SVM_EXIT_PAUSE:
5175 {
5176 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
5177 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5178 return hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient);
5179 }
5180
5181 case SVM_EXIT_VINTR:
5182 {
5183 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
5184 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5185 return hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient);
5186 }
5187
5188 case SVM_EXIT_INTR:
5189 case SVM_EXIT_NMI:
5190 case SVM_EXIT_SMI:
5191 {
5192 /*
5193 * We shouldn't direct physical interrupts, NMIs, SMIs to the nested-guest.
5194 *
5195 * Although we don't intercept SMIs, the nested-guest might. Therefore, we
5196 * might get an SMI #VMEXIT here so simply ignore rather than causing a
5197 * corresponding nested-guest #VMEXIT.
5198 */
5199 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
5200 }
5201
5202 case SVM_EXIT_FERR_FREEZE:
5203 {
5204 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_FERR_FREEZE))
5205 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5206 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
5207 }
5208
5209 case SVM_EXIT_INVLPG:
5210 {
5211 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPG))
5212 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5213 return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient);
5214 }
5215
5216 case SVM_EXIT_WBINVD:
5217 {
5218 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_WBINVD))
5219 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5220 return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient);
5221 }
5222
5223 case SVM_EXIT_INVD:
5224 {
5225 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVD))
5226 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5227 return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient);
5228 }
5229
5230 case SVM_EXIT_RDPMC:
5231 {
5232 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDPMC))
5233 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5234 return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient);
5235 }
5236
5237 default:
5238 {
5239 switch (uExitCode)
5240 {
5241 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5242 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
5243 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
5244 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5245 {
5246 uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0;
5247 if (HMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr))
5248 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5249 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
5250 }
5251
5252 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5253 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
5254 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
5255 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5256 {
5257 uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0;
5258 if (HMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr))
5259 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5260 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
5261 }
5262
5263 /* The exceptions not handled here are already handled individually above (as they occur more frequently). */
5264 case SVM_EXIT_EXCEPTION_0: /*case SVM_EXIT_EXCEPTION_1:*/ case SVM_EXIT_EXCEPTION_2:
5265 /*case SVM_EXIT_EXCEPTION_3:*/ case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5:
5266 /*case SVM_EXIT_EXCEPTION_6:*/ /*case SVM_EXIT_EXCEPTION_7:*/ case SVM_EXIT_EXCEPTION_8:
5267 case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11:
5268 case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13: /*case SVM_EXIT_EXCEPTION_14:*/
5269 case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16: /*case SVM_EXIT_EXCEPTION_17:*/
5270 case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19: case SVM_EXIT_EXCEPTION_20:
5271 case SVM_EXIT_EXCEPTION_21: case SVM_EXIT_EXCEPTION_22: case SVM_EXIT_EXCEPTION_23:
5272 case SVM_EXIT_EXCEPTION_24: case SVM_EXIT_EXCEPTION_25: case SVM_EXIT_EXCEPTION_26:
5273 case SVM_EXIT_EXCEPTION_27: case SVM_EXIT_EXCEPTION_28: case SVM_EXIT_EXCEPTION_29:
5274 case SVM_EXIT_EXCEPTION_30: case SVM_EXIT_EXCEPTION_31:
5275 {
5276 uint8_t const uVector = uExitCode - SVM_EXIT_EXCEPTION_0;
5277 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector))
5278 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5279 return hmR0SvmExitXcptGeneric(pVCpu, pCtx, pSvmTransient);
5280 }
5281
5282 case SVM_EXIT_XSETBV:
5283 {
5284 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_XSETBV))
5285 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5286 return hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient);
5287 }
5288
5289 case SVM_EXIT_TASK_SWITCH:
5290 {
5291 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_TASK_SWITCH))
5292 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5293 return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
5294 }
5295
5296 case SVM_EXIT_IRET:
5297 {
5298 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IRET))
5299 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5300 return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient);
5301 }
5302
5303 case SVM_EXIT_SHUTDOWN:
5304 {
5305 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN))
5306 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5307 return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient);
5308 }
5309
5310 case SVM_EXIT_VMMCALL:
5311 {
5312 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMMCALL))
5313 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5314 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
5315 }
5316
5317 case SVM_EXIT_CLGI:
5318 {
5319 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CLGI))
5320 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5321 return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);
5322 }
5323
5324 case SVM_EXIT_STGI:
5325 {
5326 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_STGI))
5327 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5328 return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);
5329 }
5330
5331 case SVM_EXIT_VMLOAD:
5332 {
5333 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMLOAD))
5334 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5335 return hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient);
5336 }
5337
5338 case SVM_EXIT_VMSAVE:
5339 {
5340 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMSAVE))
5341 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5342 return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient);
5343 }
5344
5345 case SVM_EXIT_INVLPGA:
5346 {
5347 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPGA))
5348 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5349 return hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient);
5350 }
5351
5352 case SVM_EXIT_VMRUN:
5353 {
5354 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMRUN))
5355 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5356 return hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient);
5357 }
5358
5359 case SVM_EXIT_RSM:
5360 {
5361 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RSM))
5362 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5363 hmR0SvmSetPendingXcptUD(pVCpu);
5364 return VINF_SUCCESS;
5365 }
5366
5367 case SVM_EXIT_SKINIT:
5368 {
5369 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SKINIT))
5370 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5371 hmR0SvmSetPendingXcptUD(pVCpu);
5372 return VINF_SUCCESS;
5373 }
5374
5375 case SVM_EXIT_INIT: /* We shouldn't get INIT signals while executing a nested-guest. */
5376 case SVM_EXIT_NPF: /* We don't yet support nested-paging for nested-guests, so this should never happen. */
5377 {
5378 return hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient);
5379 }
5380
5381 default:
5382 {
5383 AssertMsgFailed(("hmR0SvmHandleExitNested: Unknown exit code %#x\n", pSvmTransient->u64ExitCode));
5384 pVCpu->hm.s.u32HMError = pSvmTransient->u64ExitCode;
5385 return VERR_SVM_UNKNOWN_EXIT;
5386 }
5387 }
5388 }
5389 }
5390 /* not reached */
5391
5392#undef HM_SVM_VMEXIT_NESTED
5393}
5394#endif
5395
5396
5397/**
5398 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
5399 *
5400 * @returns VBox status code (informational status codes included).
5401 * @param pVCpu The cross context virtual CPU structure.
5402 * @param pCtx Pointer to the guest-CPU context.
5403 * @param pSvmTransient Pointer to the SVM transient structure.
5404 */
5405static int hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5406{
5407 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
5408 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
5409
5410 /*
5411 * The ordering of the case labels is based on most-frequently-occurring #VMEXITs for most guests under
5412 * normal workloads (for some definition of "normal").
5413 */
5414 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
5415 switch (uExitCode)
5416 {
5417 case SVM_EXIT_NPF:
5418 return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient);
5419
5420 case SVM_EXIT_IOIO:
5421 return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
5422
5423 case SVM_EXIT_RDTSC:
5424 return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
5425
5426 case SVM_EXIT_RDTSCP:
5427 return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
5428
5429 case SVM_EXIT_CPUID:
5430 return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient);
5431
5432 case SVM_EXIT_EXCEPTION_14: /* X86_XCPT_PF */
5433 return hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient);
5434
5435 case SVM_EXIT_EXCEPTION_7: /* X86_XCPT_NM */
5436 return hmR0SvmExitXcptNM(pVCpu, pCtx, pSvmTransient);
5437
5438 case SVM_EXIT_EXCEPTION_6: /* X86_XCPT_UD */
5439 return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
5440
5441 case SVM_EXIT_EXCEPTION_16: /* X86_XCPT_MF */
5442 return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient);
5443
5444 case SVM_EXIT_EXCEPTION_1: /* X86_XCPT_DB */
5445 return hmR0SvmExitXcptDB(pVCpu, pCtx, pSvmTransient);
5446
5447 case SVM_EXIT_EXCEPTION_17: /* X86_XCPT_AC */
5448 return hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient);
5449
5450 case SVM_EXIT_EXCEPTION_3: /* X86_XCPT_BP */
5451 return hmR0SvmExitXcptBP(pVCpu, pCtx, pSvmTransient);
5452
5453 case SVM_EXIT_MONITOR:
5454 return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient);
5455
5456 case SVM_EXIT_MWAIT:
5457 return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient);
5458
5459 case SVM_EXIT_HLT:
5460 return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient);
5461
5462 case SVM_EXIT_READ_CR0:
5463 case SVM_EXIT_READ_CR3:
5464 case SVM_EXIT_READ_CR4:
5465 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
5466
5467 case SVM_EXIT_WRITE_CR0:
5468 case SVM_EXIT_WRITE_CR3:
5469 case SVM_EXIT_WRITE_CR4:
5470 case SVM_EXIT_WRITE_CR8:
5471 {
5472 uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;
5473 Log4(("hmR0SvmHandleExit: Write CR%u\n", uCr)); NOREF(uCr);
5474 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
5475 }
5476
5477 case SVM_EXIT_PAUSE:
5478 return hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient);
5479
5480 case SVM_EXIT_VMMCALL:
5481 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
5482
5483 case SVM_EXIT_VINTR:
5484 return hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient);
5485
5486 case SVM_EXIT_INTR:
5487 case SVM_EXIT_FERR_FREEZE:
5488 case SVM_EXIT_NMI:
5489 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
5490
5491 case SVM_EXIT_MSR:
5492 return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient);
5493
5494 case SVM_EXIT_INVLPG:
5495 return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient);
5496
5497 case SVM_EXIT_WBINVD:
5498 return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient);
5499
5500 case SVM_EXIT_INVD:
5501 return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient);
5502
5503 case SVM_EXIT_RDPMC:
5504 return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient);
5505
5506 default:
5507 {
5508 switch (pSvmTransient->u64ExitCode)
5509 {
5510 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5511 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
5512 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
5513 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5514 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
5515
5516 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5517 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
5518 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
5519 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5520 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
5521
5522 case SVM_EXIT_XSETBV:
5523 return hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient);
5524
5525 case SVM_EXIT_TASK_SWITCH:
5526 return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
5527
5528 case SVM_EXIT_IRET:
5529 return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient);
5530
5531 case SVM_EXIT_SHUTDOWN:
5532 return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient);
5533
5534 case SVM_EXIT_SMI:
5535 case SVM_EXIT_INIT:
5536 {
5537 /*
5538 * We don't intercept SMIs. As for INIT signals, it really shouldn't ever happen here.
5539 * If it ever does, we want to know about it so log the exit code and bail.
5540 */
5541 return hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient);
5542 }
5543
5544#ifdef VBOX_WITH_NESTED_HWVIRT
5545 case SVM_EXIT_CLGI: return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);
5546 case SVM_EXIT_STGI: return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);
5547 case SVM_EXIT_VMLOAD: return hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient);
5548 case SVM_EXIT_VMSAVE: return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient);
5549 case SVM_EXIT_INVLPGA: return hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient);
5550 case SVM_EXIT_VMRUN: return hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient);
5551#else
5552 case SVM_EXIT_CLGI:
5553 case SVM_EXIT_STGI:
5554 case SVM_EXIT_VMLOAD:
5555 case SVM_EXIT_VMSAVE:
5556 case SVM_EXIT_INVLPGA:
5557 case SVM_EXIT_VMRUN:
5558#endif
5559 case SVM_EXIT_RSM:
5560 case SVM_EXIT_SKINIT:
5561 {
5562 hmR0SvmSetPendingXcptUD(pVCpu);
5563 return VINF_SUCCESS;
5564 }
5565
5566#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
5567 case SVM_EXIT_EXCEPTION_0: /* X86_XCPT_DE */
5568 /* SVM_EXIT_EXCEPTION_1: */ /* X86_XCPT_DB - Handled above. */
5569 case SVM_EXIT_EXCEPTION_2: /* X86_XCPT_NMI */
5570 /* SVM_EXIT_EXCEPTION_3: */ /* X86_XCPT_BP - Handled above. */
5571 case SVM_EXIT_EXCEPTION_4: /* X86_XCPT_OF */
5572 case SVM_EXIT_EXCEPTION_5: /* X86_XCPT_BR */
5573 /* SVM_EXIT_EXCEPTION_6: */ /* X86_XCPT_UD - Handled above. */
5574 /* SVM_EXIT_EXCEPTION_7: */ /* X86_XCPT_NM - Handled above. */
5575 case SVM_EXIT_EXCEPTION_8: /* X86_XCPT_DF */
5576 case SVM_EXIT_EXCEPTION_9: /* X86_XCPT_CO_SEG_OVERRUN */
5577 case SVM_EXIT_EXCEPTION_10: /* X86_XCPT_TS */
5578 case SVM_EXIT_EXCEPTION_11: /* X86_XCPT_NP */
5579 case SVM_EXIT_EXCEPTION_12: /* X86_XCPT_SS */
5580 case SVM_EXIT_EXCEPTION_13: /* X86_XCPT_GP */
5581 /* SVM_EXIT_EXCEPTION_14: */ /* X86_XCPT_PF - Handled above. */
5582 case SVM_EXIT_EXCEPTION_15: /* Reserved. */
5583 /* SVM_EXIT_EXCEPTION_16: */ /* X86_XCPT_MF - Handled above. */
5584 /* SVM_EXIT_EXCEPTION_17: */ /* X86_XCPT_AC - Handled above. */
5585 case SVM_EXIT_EXCEPTION_18: /* X86_XCPT_MC */
5586 case SVM_EXIT_EXCEPTION_19: /* X86_XCPT_XF */
5587 case SVM_EXIT_EXCEPTION_20: case SVM_EXIT_EXCEPTION_21: case SVM_EXIT_EXCEPTION_22:
5588 case SVM_EXIT_EXCEPTION_23: case SVM_EXIT_EXCEPTION_24: case SVM_EXIT_EXCEPTION_25:
5589 case SVM_EXIT_EXCEPTION_26: case SVM_EXIT_EXCEPTION_27: case SVM_EXIT_EXCEPTION_28:
5590 case SVM_EXIT_EXCEPTION_29: case SVM_EXIT_EXCEPTION_30: case SVM_EXIT_EXCEPTION_31:
5591 return hmR0SvmExitXcptGeneric(pVCpu, pCtx, pSvmTransient);
5592#endif /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */
5593
5594 default:
5595 {
5596 AssertMsgFailed(("hmR0SvmHandleExit: Unknown exit code %#RX64\n", uExitCode));
5597 pVCpu->hm.s.u32HMError = uExitCode;
5598 return VERR_SVM_UNKNOWN_EXIT;
5599 }
5600 }
5601 }
5602 }
5603 /* not reached */
5604}
5605
5606
5607#ifdef DEBUG
5608/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5609# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
5610 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5611
5612# define HMSVM_ASSERT_PREEMPT_CPUID() \
5613 do \
5614 { \
5615 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5616 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5617 } while (0)
5618
5619# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \
5620 do { \
5621 AssertPtr(pVCpu); \
5622 AssertPtr(pCtx); \
5623 AssertPtr(pSvmTransient); \
5624 Assert(ASMIntAreEnabled()); \
5625 HMSVM_ASSERT_PREEMPT_SAFE(); \
5626 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
5627 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
5628 HMSVM_ASSERT_PREEMPT_SAFE(); \
5629 if (VMMR0IsLogFlushDisabled(pVCpu)) \
5630 HMSVM_ASSERT_PREEMPT_CPUID(); \
5631 } while (0)
5632#else /* Release builds */
5633# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { NOREF(pVCpu); NOREF(pCtx); NOREF(pSvmTransient); } while (0)
5634#endif
5635
5636
5637/**
5638 * Worker for hmR0SvmInterpretInvlpg().
5639 *
5640 * @return VBox status code.
5641 * @param pVCpu The cross context virtual CPU structure.
5642 * @param pCpu Pointer to the disassembler state.
5643 * @param pCtx The guest CPU context.
5644 */
5645static int hmR0SvmInterpretInvlPgEx(PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTX pCtx)
5646{
5647 DISQPVPARAMVAL Param1;
5648 RTGCPTR GCPtrPage;
5649
5650 int rc = DISQueryParamVal(CPUMCTX2CORE(pCtx), pCpu, &pCpu->Param1, &Param1, DISQPVWHICH_SRC);
5651 if (RT_FAILURE(rc))
5652 return VERR_EM_INTERPRETER;
5653
5654 if ( Param1.type == DISQPV_TYPE_IMMEDIATE
5655 || Param1.type == DISQPV_TYPE_ADDRESS)
5656 {
5657 if (!(Param1.flags & (DISQPV_FLAG_32 | DISQPV_FLAG_64)))
5658 return VERR_EM_INTERPRETER;
5659
5660 GCPtrPage = Param1.val.val64;
5661 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), GCPtrPage);
5662 rc = VBOXSTRICTRC_VAL(rc2);
5663 }
5664 else
5665 {
5666 Log4(("hmR0SvmInterpretInvlPgEx invalid parameter type %#x\n", Param1.type));
5667 rc = VERR_EM_INTERPRETER;
5668 }
5669
5670 return rc;
5671}
5672
5673
5674/**
5675 * Interprets INVLPG.
5676 *
5677 * @returns VBox status code.
5678 * @retval VINF_* Scheduling instructions.
5679 * @retval VERR_EM_INTERPRETER Something we can't cope with.
5680 * @retval VERR_* Fatal errors.
5681 *
5682 * @param pVM The cross context VM structure.
5683 * @param pVCpu The cross context virtual CPU structure.
5684 * @param pCtx The guest CPU context.
5685 *
5686 * @remarks Updates the RIP if the instruction was executed successfully.
5687 */
5688static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
5689{
5690 /* Only allow 32 & 64 bit code. */
5691 if (CPUMGetGuestCodeBits(pVCpu) != 16)
5692 {
5693 PDISSTATE pDis = &pVCpu->hm.s.DisState;
5694 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
5695 if ( RT_SUCCESS(rc)
5696 && pDis->pCurInstr->uOpcode == OP_INVLPG)
5697 {
5698 rc = hmR0SvmInterpretInvlPgEx(pVCpu, pDis, pCtx);
5699 if (RT_SUCCESS(rc))
5700 pCtx->rip += pDis->cbInstr;
5701 return rc;
5702 }
5703 else
5704 Log4(("hmR0SvmInterpretInvlpg: EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
5705 }
5706 return VERR_EM_INTERPRETER;
5707}
5708
5709
5710#ifdef HMSVM_USE_IEM_EVENT_REFLECTION
5711/**
5712 * Gets the IEM exception flags for the specified SVM event.
5713 *
5714 * @returns The IEM exception flags.
5715 * @param pEvent Pointer to the SVM event.
5716 *
5717 * @remarks This function currently only constructs flags required for
5718 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g. error-code
5719 * and CR2 aspects of an exception are not included).
5720 */
5721static uint32_t hmR0SvmGetIemXcptFlags(PCSVMEVENT pEvent)
5722{
5723 uint8_t const uEventType = pEvent->n.u3Type;
5724 uint32_t fIemXcptFlags;
5725 switch (uEventType)
5726 {
5727 case SVM_EVENT_EXCEPTION:
5728 /*
5729 * Only INT3 and INTO instructions can raise #BP and #OF exceptions.
5730 * See AMD spec. Table 8-1. "Interrupt Vector Source and Cause".
5731 */
5732 if (pEvent->n.u8Vector == X86_XCPT_BP)
5733 {
5734 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR;
5735 break;
5736 }
5737 if (pEvent->n.u8Vector == X86_XCPT_OF)
5738 {
5739 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_OF_INSTR;
5740 break;
5741 }
5742 /** @todo How do we distinguish ICEBP \#DB from the regular one? */
5743 RT_FALL_THRU();
5744 case SVM_EVENT_NMI:
5745 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
5746 break;
5747
5748 case SVM_EVENT_EXTERNAL_IRQ:
5749 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
5750 break;
5751
5752 case SVM_EVENT_SOFTWARE_INT:
5753 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
5754 break;
5755
5756 default:
5757 fIemXcptFlags = 0;
5758 AssertMsgFailed(("Unexpected event type! uEventType=%#x uVector=%#x", uEventType, pEvent->n.u8Vector));
5759 break;
5760 }
5761 return fIemXcptFlags;
5762}
5763
5764#else
5765/**
5766 * Determines if an exception is a contributory exception.
5767 *
5768 * Contributory exceptions are ones which can cause double-faults unless the
5769 * original exception was a benign exception. Page-fault is intentionally not
5770 * included here as it's a conditional contributory exception.
5771 *
5772 * @returns @c true if the exception is contributory, @c false otherwise.
5773 * @param uVector The exception vector.
5774 */
5775DECLINLINE(bool) hmR0SvmIsContributoryXcpt(const uint32_t uVector)
5776{
5777 switch (uVector)
5778 {
5779 case X86_XCPT_GP:
5780 case X86_XCPT_SS:
5781 case X86_XCPT_NP:
5782 case X86_XCPT_TS:
5783 case X86_XCPT_DE:
5784 return true;
5785 default:
5786 break;
5787 }
5788 return false;
5789}
5790#endif /* HMSVM_USE_IEM_EVENT_REFLECTION */
5791
5792
5793/**
5794 * Handle a condition that occurred while delivering an event through the guest
5795 * IDT.
5796 *
5797 * @returns VBox status code (informational error codes included).
5798 * @retval VINF_SUCCESS if we should continue handling the \#VMEXIT.
5799 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought to
5800 * continue execution of the guest which will delivery the \#DF.
5801 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5802 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5803 *
5804 * @param pVCpu The cross context virtual CPU structure.
5805 * @param pCtx Pointer to the guest-CPU context.
5806 * @param pSvmTransient Pointer to the SVM transient structure.
5807 *
5808 * @remarks No-long-jump zone!!!
5809 */
5810static int hmR0SvmCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
5811{
5812 int rc = VINF_SUCCESS;
5813 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
5814
5815 Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n",
5816 pVmcb->ctrl.ExitIntInfo.u, !!pVmcb->ctrl.ExitIntInfo.n.u1Valid, !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid,
5817 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, pVmcb->ctrl.ExitIntInfo.n.u3Type, pVmcb->ctrl.ExitIntInfo.n.u8Vector));
5818
5819 /* See AMD spec. 15.7.3 "EXITINFO Pseudo-Code". The EXITINTINFO (if valid) contains the prior exception (IDT vector)
5820 * that was trying to be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector). */
5821 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid)
5822 {
5823#ifdef HMSVM_USE_IEM_EVENT_REFLECTION
5824 IEMXCPTRAISE enmRaise;
5825 IEMXCPTRAISEINFO fRaiseInfo;
5826 bool const fExitIsHwXcpt = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_31;
5827 uint8_t const uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
5828 if (fExitIsHwXcpt)
5829 {
5830 uint8_t const uExitVector = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0;
5831 uint32_t const fIdtVectorFlags = hmR0SvmGetIemXcptFlags(&pVmcb->ctrl.ExitIntInfo);
5832 uint32_t const fExitVectorFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
5833 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5834 }
5835 else
5836 {
5837 /*
5838 * If delivery of an event caused a #VMEXIT that is not an exception (e.g. #NPF) then we
5839 * end up here.
5840 *
5841 * If the event was:
5842 * - a software interrupt, we can re-execute the instruction which will regenerate
5843 * the event.
5844 * - an NMI, we need to clear NMI blocking and re-inject the NMI.
5845 * - a hardware exception or external interrupt, we re-inject it.
5846 */
5847 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5848 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_SOFTWARE_INT)
5849 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5850 else
5851 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5852 }
5853
5854 switch (enmRaise)
5855 {
5856 case IEMXCPTRAISE_CURRENT_XCPT:
5857 case IEMXCPTRAISE_PREV_EVENT:
5858 {
5859 /* For software interrupts, we shall re-execute the instruction. */
5860 if (!(fRaiseInfo & IEMXCPTRAISEINFO_SOFT_INT_XCPT))
5861 {
5862 RTGCUINTPTR GCPtrFaultAddress = 0;
5863
5864 /* If we are re-injecting an NMI, clear NMI blocking. */
5865 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)
5866 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5867
5868 /* Determine a vectoring #PF condition, see comment in hmR0SvmExitXcptPF(). */
5869 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5870 pSvmTransient->fVectoringPF = true;
5871 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION
5872 && uIdtVector == X86_XCPT_PF)
5873 {
5874 /*
5875 * If the previous exception was a #PF, we need to recover the CR2 value.
5876 * This can't happen with shadow paging.
5877 */
5878 GCPtrFaultAddress = pCtx->cr2;
5879 }
5880
5881 /*
5882 * Without nested paging, when uExitVector is #PF, CR2 value will be updated from the VMCB's
5883 * exit info. fields, if it's a guest #PF, see hmR0SvmExitXcptPF().
5884 */
5885 Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);
5886 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5887 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, GCPtrFaultAddress);
5888
5889 Log4(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32 GCPtrFaultAddress=%#RX64\n",
5890 pVmcb->ctrl.ExitIntInfo.u, RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid),
5891 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, GCPtrFaultAddress));
5892 }
5893 break;
5894 }
5895
5896 case IEMXCPTRAISE_REEXEC_INSTR:
5897 {
5898 Assert(rc == VINF_SUCCESS);
5899 break;
5900 }
5901
5902 case IEMXCPTRAISE_DOUBLE_FAULT:
5903 {
5904 /*
5905 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5906 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5907 */
5908 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5909 {
5910 pSvmTransient->fVectoringDoublePF = true;
5911 Assert(rc == VINF_SUCCESS);
5912 }
5913 else
5914 {
5915 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5916 hmR0SvmSetPendingXcptDF(pVCpu);
5917 rc = VINF_HM_DOUBLE_FAULT;
5918 }
5919 break;
5920 }
5921
5922 case IEMXCPTRAISE_TRIPLE_FAULT:
5923 {
5924 rc = VINF_EM_RESET;
5925 break;
5926 }
5927
5928 case IEMXCPTRAISE_CPU_HANG:
5929 {
5930 rc = VERR_EM_GUEST_CPU_HANG;
5931 break;
5932 }
5933
5934 default:
5935 {
5936 AssertMsgFailed(("hmR0SvmExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
5937 rc = VERR_SVM_IPE_2;
5938 break;
5939 }
5940 }
5941#else
5942 uint8_t uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
5943
5944 typedef enum
5945 {
5946 SVMREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
5947 SVMREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
5948 SVMREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
5949 SVMREFLECTXCPT_HANG, /* Indicate bad VM trying to deadlock the CPU. */
5950 SVMREFLECTXCPT_NONE /* Nothing to reflect. */
5951 } SVMREFLECTXCPT;
5952
5953 SVMREFLECTXCPT enmReflect = SVMREFLECTXCPT_NONE;
5954 bool fReflectingNmi = false;
5955 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION)
5956 {
5957 if (pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_31)
5958 {
5959 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0);
5960
5961#ifdef VBOX_STRICT
5962 if ( hmR0SvmIsContributoryXcpt(uIdtVector)
5963 && uExitVector == X86_XCPT_PF)
5964 {
5965 Log4(("IDT: Contributory #PF idCpu=%u uCR2=%#RX64\n", pVCpu->idCpu, pCtx->cr2));
5966 }
5967#endif
5968
5969 if ( uIdtVector == X86_XCPT_BP
5970 || uIdtVector == X86_XCPT_OF)
5971 {
5972 /* Ignore INT3/INTO, just re-execute. See @bugref{8357}. */
5973 }
5974 else if ( uExitVector == X86_XCPT_PF
5975 && uIdtVector == X86_XCPT_PF)
5976 {
5977 pSvmTransient->fVectoringDoublePF = true;
5978 Log4(("IDT: Vectoring double #PF uCR2=%#RX64\n", pCtx->cr2));
5979 }
5980 else if ( uExitVector == X86_XCPT_AC
5981 && uIdtVector == X86_XCPT_AC)
5982 {
5983 enmReflect = SVMREFLECTXCPT_HANG;
5984 Log4(("IDT: Nested #AC - Bad guest\n"));
5985 }
5986 else if ( (pVmcb->ctrl.u32InterceptXcpt & HMSVM_CONTRIBUTORY_XCPT_MASK)
5987 && hmR0SvmIsContributoryXcpt(uExitVector)
5988 && ( hmR0SvmIsContributoryXcpt(uIdtVector)
5989 || uIdtVector == X86_XCPT_PF))
5990 {
5991 enmReflect = SVMREFLECTXCPT_DF;
5992 Log4(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
5993 uIdtVector, uExitVector));
5994 }
5995 else if (uIdtVector == X86_XCPT_DF)
5996 {
5997 enmReflect = SVMREFLECTXCPT_TF;
5998 Log4(("IDT: Pending vectoring triple-fault %#RX64 uIdtVector=%#x uExitVector=%#x\n",
5999 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
6000 }
6001 else
6002 enmReflect = SVMREFLECTXCPT_XCPT;
6003 }
6004 else
6005 {
6006 /*
6007 * If event delivery caused an #VMEXIT that is not an exception (e.g. #NPF) then reflect the original
6008 * exception to the guest after handling the #VMEXIT.
6009 */
6010 enmReflect = SVMREFLECTXCPT_XCPT;
6011 }
6012 }
6013 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXTERNAL_IRQ
6014 || pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)
6015 {
6016 enmReflect = SVMREFLECTXCPT_XCPT;
6017 fReflectingNmi = RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI);
6018
6019 if (pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_31)
6020 {
6021 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0);
6022 if (uExitVector == X86_XCPT_PF)
6023 {
6024 pSvmTransient->fVectoringPF = true;
6025 Log4(("IDT: Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pCtx->cr2));
6026 }
6027 }
6028 }
6029 /* else: Ignore software interrupts (INT n) as they reoccur when restarting the instruction. */
6030
6031 switch (enmReflect)
6032 {
6033 case SVMREFLECTXCPT_XCPT:
6034 {
6035 /* If we are re-injecting the NMI, clear NMI blocking. */
6036 if (fReflectingNmi)
6037 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6038
6039 Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);
6040 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
6041 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, 0 /* GCPtrFaultAddress */);
6042
6043 /* If uExitVector is #PF, CR2 value will be updated from the VMCB if it's a guest #PF. See hmR0SvmExitXcptPF(). */
6044 Log4(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32\n", pVmcb->ctrl.ExitIntInfo.u,
6045 !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid, pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
6046 break;
6047 }
6048
6049 case SVMREFLECTXCPT_DF:
6050 {
6051 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
6052 hmR0SvmSetPendingXcptDF(pVCpu);
6053 rc = VINF_HM_DOUBLE_FAULT;
6054 break;
6055 }
6056
6057 case SVMREFLECTXCPT_TF:
6058 {
6059 rc = VINF_EM_RESET;
6060 break;
6061 }
6062
6063 case SVMREFLECTXCPT_HANG:
6064 {
6065 rc = VERR_EM_GUEST_CPU_HANG;
6066 break;
6067 }
6068
6069 default:
6070 Assert(rc == VINF_SUCCESS);
6071 break;
6072 }
6073#endif /* HMSVM_USE_IEM_EVENT_REFLECTION */
6074 }
6075 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG);
6076 NOREF(pCtx);
6077 return rc;
6078}
6079
6080
6081/**
6082 * Advances the guest RIP making use of the CPU's NRIP_SAVE feature if
6083 * supported, otherwise advances the RIP by the number of bytes specified in
6084 * @a cb.
6085 *
6086 * @param pVCpu The cross context virtual CPU structure.
6087 * @param pCtx Pointer to the guest-CPU context.
6088 * @param cb RIP increment value in bytes.
6089 *
6090 * @remarks Use this function only from \#VMEXIT's where the NRIP value is valid
6091 * when NRIP_SAVE is supported by the CPU, otherwise use
6092 * hmR0SvmAdvanceRipDumb!
6093 */
6094DECLINLINE(void) hmR0SvmAdvanceRipHwAssist(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t cb)
6095{
6096 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
6097 if (fSupportsNextRipSave)
6098 {
6099 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
6100 Assert(pVmcb->ctrl.u64NextRIP);
6101 AssertRelease(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb); /* temporary, remove later */
6102 pCtx->rip = pVmcb->ctrl.u64NextRIP;
6103 }
6104 else
6105 pCtx->rip += cb;
6106
6107 HMSVM_UPDATE_INTR_SHADOW(pVCpu, pCtx);
6108}
6109
6110
6111#ifdef VBOX_WITH_NESTED_HWVIRT
6112/**
6113 * Gets the length of the current instruction if the CPU supports the NRIP_SAVE
6114 * feature. Otherwise, returns the value in @a cbLikely.
6115 *
6116 * @param pVCpu The cross context virtual CPU structure.
6117 * @param pCtx Pointer to the guest-CPU context.
6118 * @param cbLikely The likely instruction length.
6119 */
6120DECLINLINE(uint8_t) hmR0SvmGetInstrLengthHwAssist(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbLikely)
6121{
6122 Assert(cbLikely <= 15); /* See Intel spec. 2.3.11 "AVX Instruction Length" */
6123 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
6124 if (fSupportsNextRipSave)
6125 {
6126 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
6127 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
6128 Assert(cbInstr == cbLikely);
6129 return cbInstr;
6130 }
6131 return cbLikely;
6132}
6133#endif
6134
6135
6136/**
6137 * Advances the guest RIP by the number of bytes specified in @a cb. This does
6138 * not make use of any hardware features to determine the instruction length.
6139 *
6140 * @param pVCpu The cross context virtual CPU structure.
6141 * @param pCtx Pointer to the guest-CPU context.
6142 * @param cb RIP increment value in bytes.
6143 */
6144DECLINLINE(void) hmR0SvmAdvanceRipDumb(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t cb)
6145{
6146 pCtx->rip += cb;
6147 HMSVM_UPDATE_INTR_SHADOW(pVCpu, pCtx);
6148}
6149#undef HMSVM_UPDATE_INTR_SHADOW
6150
6151
6152/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6153/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6154/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6155
6156/** @name \#VMEXIT handlers.
6157 * @{
6158 */
6159
6160/**
6161 * \#VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT
6162 * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT).
6163 */
6164HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6165{
6166 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6167
6168 if (pSvmTransient->u64ExitCode == SVM_EXIT_NMI)
6169 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
6170 else if (pSvmTransient->u64ExitCode == SVM_EXIT_INTR)
6171 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
6172
6173 /*
6174 * AMD-V has no preemption timer and the generic periodic preemption timer has no way to signal -before- the timer
6175 * fires if the current interrupt is our own timer or a some other host interrupt. We also cannot examine what
6176 * interrupt it is until the host actually take the interrupt.
6177 *
6178 * Going back to executing guest code here unconditionally causes random scheduling problems (observed on an
6179 * AMD Phenom 9850 Quad-Core on Windows 64-bit host).
6180 */
6181 return VINF_EM_RAW_INTERRUPT;
6182}
6183
6184
6185/**
6186 * \#VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional \#VMEXIT.
6187 */
6188HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6189{
6190 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6191
6192 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2);
6193 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
6194 int rc = VINF_SUCCESS;
6195 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6196 return rc;
6197}
6198
6199
6200/**
6201 * \#VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional \#VMEXIT.
6202 */
6203HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6204{
6205 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6206
6207 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2);
6208 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
6209 int rc = VINF_SUCCESS;
6210 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6211 return rc;
6212}
6213
6214
6215/**
6216 * \#VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional \#VMEXIT.
6217 */
6218HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6219{
6220 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6221 PVM pVM = pVCpu->CTX_SUFF(pVM);
6222 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
6223 if (RT_LIKELY(rc == VINF_SUCCESS))
6224 {
6225 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2);
6226 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6227 }
6228 else
6229 {
6230 AssertMsgFailed(("hmR0SvmExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
6231 rc = VERR_EM_INTERPRETER;
6232 }
6233 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
6234 return rc;
6235}
6236
6237
6238/**
6239 * \#VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional \#VMEXIT.
6240 */
6241HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6242{
6243 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6244 PVM pVM = pVCpu->CTX_SUFF(pVM);
6245 int rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
6246 if (RT_LIKELY(rc == VINF_SUCCESS))
6247 {
6248 pSvmTransient->fUpdateTscOffsetting = true;
6249 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2);
6250 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6251 }
6252 else
6253 {
6254 AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
6255 rc = VERR_EM_INTERPRETER;
6256 }
6257 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
6258 return rc;
6259}
6260
6261
6262/**
6263 * \#VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional \#VMEXIT.
6264 */
6265HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6266{
6267 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6268 int rc = EMInterpretRdtscp(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
6269 if (RT_LIKELY(rc == VINF_SUCCESS))
6270 {
6271 pSvmTransient->fUpdateTscOffsetting = true;
6272 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 3);
6273 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6274 }
6275 else
6276 {
6277 AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtscp failed with %Rrc\n", rc));
6278 rc = VERR_EM_INTERPRETER;
6279 }
6280 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
6281 return rc;
6282}
6283
6284
6285/**
6286 * \#VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional \#VMEXIT.
6287 */
6288HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6289{
6290 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6291 int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
6292 if (RT_LIKELY(rc == VINF_SUCCESS))
6293 {
6294 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2);
6295 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6296 }
6297 else
6298 {
6299 AssertMsgFailed(("hmR0SvmExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
6300 rc = VERR_EM_INTERPRETER;
6301 }
6302 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
6303 return rc;
6304}
6305
6306
6307/**
6308 * \#VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional \#VMEXIT.
6309 */
6310HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6311{
6312 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6313 PVM pVM = pVCpu->CTX_SUFF(pVM);
6314 Assert(!pVM->hm.s.fNestedPaging);
6315 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
6316
6317 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu, pCtx);
6318 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
6319 if ( fSupportsDecodeAssists
6320 && fSupportsNextRipSave)
6321 {
6322 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
6323 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
6324 RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1;
6325 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, cbInstr, GCPtrPage);
6326 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6327 return VBOXSTRICTRC_VAL(rcStrict);
6328 }
6329
6330 int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, pCtx); /* Updates RIP if successful. */
6331 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
6332 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6333 return rc;
6334}
6335
6336
6337/**
6338 * \#VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional \#VMEXIT.
6339 */
6340HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6341{
6342 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6343
6344 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 1);
6345 int rc = EMShouldContinueAfterHalt(pVCpu, pCtx) ? VINF_SUCCESS : VINF_EM_HALT;
6346 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6347 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
6348 if (rc != VINF_SUCCESS)
6349 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
6350 return rc;
6351}
6352
6353
6354/**
6355 * \#VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional \#VMEXIT.
6356 */
6357HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6358{
6359 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6360 int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
6361 if (RT_LIKELY(rc == VINF_SUCCESS))
6362 {
6363 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 3);
6364 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6365 }
6366 else
6367 {
6368 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
6369 rc = VERR_EM_INTERPRETER;
6370 }
6371 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
6372 return rc;
6373}
6374
6375
6376/**
6377 * \#VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional \#VMEXIT.
6378 */
6379HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6380{
6381 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6382 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
6383 int rc = VBOXSTRICTRC_VAL(rc2);
6384 if ( rc == VINF_EM_HALT
6385 || rc == VINF_SUCCESS)
6386 {
6387 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 3);
6388
6389 if ( rc == VINF_EM_HALT
6390 && EMMonitorWaitShouldContinue(pVCpu, pCtx))
6391 {
6392 rc = VINF_SUCCESS;
6393 }
6394 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6395 }
6396 else
6397 {
6398 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
6399 rc = VERR_EM_INTERPRETER;
6400 }
6401 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
6402 ("hmR0SvmExitMwait: EMInterpretMWait failed rc=%Rrc\n", rc));
6403 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
6404 return rc;
6405}
6406
6407
6408/**
6409 * \#VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN). Conditional
6410 * \#VMEXIT.
6411 */
6412HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6413{
6414 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6415 return VINF_EM_RESET;
6416}
6417
6418
6419/**
6420 * \#VMEXIT handler for unexpected exits. Conditional \#VMEXIT.
6421 */
6422HMSVM_EXIT_DECL hmR0SvmExitUnexpected(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6423{
6424 RT_NOREF(pCtx);
6425 AssertMsgFailed(("hmR0SvmExitUnexpected: ExitCode=%#RX64\n", pSvmTransient->u64ExitCode));
6426 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
6427 return VERR_SVM_UNEXPECTED_EXIT;
6428}
6429
6430
6431/**
6432 * \#VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional \#VMEXIT.
6433 */
6434HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6435{
6436 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6437
6438 Log4(("hmR0SvmExitReadCRx: CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
6439 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0]);
6440
6441 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu, pCtx);
6442 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
6443 if ( fSupportsDecodeAssists
6444 && fSupportsNextRipSave)
6445 {
6446 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
6447 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
6448 if (fMovCRx)
6449 {
6450 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
6451 uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0;
6452 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
6453 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6454 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6455 return VBOXSTRICTRC_VAL(rcStrict);
6456 }
6457 /* else: SMSW instruction, fall back below to IEM for this. */
6458 }
6459
6460 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
6461 int rc = VBOXSTRICTRC_VAL(rc2);
6462 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3,
6463 ("hmR0SvmExitReadCRx: EMInterpretInstruction failed rc=%Rrc\n", rc));
6464 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15);
6465 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6466 return rc;
6467}
6468
6469
6470/**
6471 * \#VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional \#VMEXIT.
6472 */
6473HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6474{
6475 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6476
6477 uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0;
6478 Assert(iCrReg <= 15);
6479
6480 VBOXSTRICTRC rcStrict = VERR_SVM_IPE_5;
6481 bool fDecodedInstr = false;
6482 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu, pCtx);
6483 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
6484 if ( fSupportsDecodeAssists
6485 && fSupportsNextRipSave)
6486 {
6487 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
6488 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
6489 if (fMovCRx)
6490 {
6491 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
6492 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
6493 Log4(("hmR0SvmExitWriteCRx: Mov CR%u w/ iGReg=%#x\n", iCrReg, iGReg));
6494 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6495 fDecodedInstr = true;
6496 }
6497 /* else: LMSW or CLTS instruction, fall back below to IEM for this. */
6498 }
6499
6500 if (!fDecodedInstr)
6501 {
6502 Log4(("hmR0SvmExitWriteCRx: iCrReg=%#x\n", iCrReg));
6503 rcStrict = IEMExecOneBypassEx(pVCpu, CPUMCTX2CORE(pCtx), NULL);
6504 if (RT_UNLIKELY( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
6505 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED))
6506 rcStrict = VERR_EM_INTERPRETER;
6507 }
6508
6509 if (rcStrict == VINF_SUCCESS)
6510 {
6511 switch (iCrReg)
6512 {
6513 case 0: /* CR0. */
6514 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
6515 break;
6516
6517 case 3: /* CR3. */
6518 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
6519 break;
6520
6521 case 4: /* CR4. */
6522 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
6523 break;
6524
6525 case 8: /* CR8 (TPR). */
6526 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
6527 break;
6528
6529 default:
6530 AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x\n",
6531 pSvmTransient->u64ExitCode, iCrReg));
6532 break;
6533 }
6534 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6535 }
6536 else
6537 Assert(rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_PGM_CHANGE_MODE || rcStrict == VINF_PGM_SYNC_CR3);
6538 return VBOXSTRICTRC_TODO(rcStrict);
6539}
6540
6541
6542/**
6543 * \#VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional
6544 * \#VMEXIT.
6545 */
6546HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6547{
6548 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6549 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
6550 PVM pVM = pVCpu->CTX_SUFF(pVM);
6551
6552 int rc;
6553 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
6554 {
6555 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
6556 Log4(("MSR Write: idMsr=%#RX32\n", pCtx->ecx));
6557
6558 /* Handle TPR patching; intercepted LSTAR write. */
6559 if ( pVM->hm.s.fTPRPatchingActive
6560 && pCtx->ecx == MSR_K8_LSTAR)
6561 {
6562 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr)
6563 {
6564 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */
6565 int rc2 = APICSetTpr(pVCpu, pCtx->eax & 0xff);
6566 AssertRC(rc2);
6567 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
6568 }
6569 rc = VINF_SUCCESS;
6570 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2);
6571 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6572 return rc;
6573 }
6574
6575 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
6576 if (fSupportsNextRipSave)
6577 {
6578 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
6579 if (RT_LIKELY(rc == VINF_SUCCESS))
6580 {
6581 pCtx->rip = pVmcb->ctrl.u64NextRIP;
6582 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6583 }
6584 else
6585 AssertMsg( rc == VERR_EM_INTERPRETER
6586 || rc == VINF_CPUM_R3_MSR_WRITE, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc));
6587 }
6588 else
6589 {
6590 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */));
6591 if (RT_LIKELY(rc == VINF_SUCCESS))
6592 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); /* RIP updated by EMInterpretInstruction(). */
6593 else
6594 AssertMsg( rc == VERR_EM_INTERPRETER
6595 || rc == VINF_CPUM_R3_MSR_WRITE, ("hmR0SvmExitMsr: WrMsr. EMInterpretInstruction failed rc=%Rrc\n", rc));
6596 }
6597
6598 if (rc == VINF_SUCCESS)
6599 {
6600 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
6601 if ( pCtx->ecx >= MSR_IA32_X2APIC_START
6602 && pCtx->ecx <= MSR_IA32_X2APIC_END)
6603 {
6604 /*
6605 * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest(). When full APIC register
6606 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCB before
6607 * EMInterpretWrmsr() changes it.
6608 */
6609 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
6610 }
6611 else
6612 {
6613 switch (pCtx->ecx)
6614 {
6615 case MSR_K6_EFER: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR); break;
6616 case MSR_IA32_TSC: pSvmTransient->fUpdateTscOffsetting = true; break;
6617 case MSR_K8_FS_BASE:
6618 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break;
6619 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
6620 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
6621 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
6622 }
6623 }
6624 }
6625 }
6626 else
6627 {
6628 /* MSR Read access. */
6629 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
6630 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ);
6631 Log4(("MSR Read: idMsr=%#RX32\n", pCtx->ecx));
6632
6633 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
6634 if (fSupportsNextRipSave)
6635 {
6636 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
6637 if (RT_LIKELY(rc == VINF_SUCCESS))
6638 {
6639 pCtx->rip = pVmcb->ctrl.u64NextRIP;
6640 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6641 }
6642 else
6643 AssertMsg( rc == VERR_EM_INTERPRETER
6644 || rc == VINF_CPUM_R3_MSR_READ, ("hmR0SvmExitMsr: EMInterpretRdmsr failed rc=%Rrc\n", rc));
6645 }
6646 else
6647 {
6648 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0));
6649 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6650 {
6651 AssertMsg( rc == VERR_EM_INTERPRETER
6652 || rc == VINF_CPUM_R3_MSR_READ, ("hmR0SvmExitMsr: RdMsr. EMInterpretInstruction failed rc=%Rrc\n", rc));
6653 }
6654 /* RIP updated by EMInterpretInstruction(). */
6655 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6656 }
6657 }
6658
6659 /* RIP has been updated by EMInterpret[Rd|Wr]msr() or EMInterpretInstruction(). */
6660 return rc;
6661}
6662
6663
6664/**
6665 * \#VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional \#VMEXIT.
6666 */
6667HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6668{
6669 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6670 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
6671
6672 /** @todo Stepping with nested-guest. */
6673 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
6674 {
6675 /* We should -not- get this #VMEXIT if the guest's debug registers were active. */
6676 if (pSvmTransient->fWasGuestDebugStateActive)
6677 {
6678 AssertMsgFailed(("hmR0SvmExitReadDRx: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
6679 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
6680 return VERR_SVM_UNEXPECTED_EXIT;
6681 }
6682
6683 /*
6684 * Lazy DR0-3 loading.
6685 */
6686 if (!pSvmTransient->fWasHyperDebugStateActive)
6687 {
6688 Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
6689 Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
6690
6691 /* Don't intercept DRx read and writes. */
6692 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
6693 pVmcb->ctrl.u16InterceptRdDRx = 0;
6694 pVmcb->ctrl.u16InterceptWrDRx = 0;
6695 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
6696
6697 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
6698 VMMRZCallRing3Disable(pVCpu);
6699 HM_DISABLE_PREEMPT();
6700
6701 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
6702 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
6703 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
6704
6705 HM_RESTORE_PREEMPT();
6706 VMMRZCallRing3Enable(pVCpu);
6707
6708 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
6709 return VINF_SUCCESS;
6710 }
6711 }
6712
6713 /*
6714 * Interpret the read/writing of DRx.
6715 */
6716 /** @todo Decode assist. */
6717 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
6718 Log5(("hmR0SvmExitReadDRx: Emulated DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
6719 if (RT_LIKELY(rc == VINF_SUCCESS))
6720 {
6721 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
6722 /** @todo CPUM should set this flag! */
6723 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
6724 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6725 }
6726 else
6727 Assert(rc == VERR_EM_INTERPRETER);
6728 return VBOXSTRICTRC_TODO(rc);
6729}
6730
6731
6732/**
6733 * \#VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional \#VMEXIT.
6734 */
6735HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6736{
6737 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6738 /* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
6739 int rc = hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
6740 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
6741 STAM_COUNTER_DEC(&pVCpu->hm.s.StatExitDRxRead);
6742 return rc;
6743}
6744
6745
6746/**
6747 * \#VMEXIT handler for XCRx write (SVM_EXIT_XSETBV). Conditional \#VMEXIT.
6748 */
6749HMSVM_EXIT_DECL hmR0SvmExitXsetbv(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6750{
6751 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6752
6753 /** @todo decode assists... */
6754 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6755 if (rcStrict == VINF_IEM_RAISED_XCPT)
6756 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
6757
6758 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
6759 Log4(("hmR0SvmExitXsetbv: New XCR0=%#RX64 fLoadSaveGuestXcr0=%d (cr4=%RX64) rcStrict=%Rrc\n",
6760 pCtx->aXcr[0], pVCpu->hm.s.fLoadSaveGuestXcr0, pCtx->cr4, VBOXSTRICTRC_VAL(rcStrict)));
6761
6762 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6763 return VBOXSTRICTRC_TODO(rcStrict);
6764}
6765
6766
6767/**
6768 * \#VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional \#VMEXIT.
6769 */
6770HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6771{
6772 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6773
6774 /* I/O operation lookup arrays. */
6775 static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
6776 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
6777 the result (in AL/AX/EAX). */
6778 Log4(("hmR0SvmExitIOInstr: CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
6779
6780 PVM pVM = pVCpu->CTX_SUFF(pVM);
6781 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
6782
6783 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
6784 SVMIOIOEXITINFO IoExitInfo;
6785 IoExitInfo.u = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
6786 uint32_t uIOWidth = (IoExitInfo.u >> 4) & 0x7;
6787 uint32_t cbValue = s_aIOSize[uIOWidth];
6788 uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
6789
6790 if (RT_UNLIKELY(!cbValue))
6791 {
6792 AssertMsgFailed(("hmR0SvmExitIOInstr: Invalid IO operation. uIOWidth=%u\n", uIOWidth));
6793 return VERR_EM_INTERPRETER;
6794 }
6795
6796 VBOXSTRICTRC rcStrict;
6797 bool fUpdateRipAlready = false;
6798 if (IoExitInfo.n.u1STR)
6799 {
6800#ifdef VBOX_WITH_2ND_IEM_STEP
6801 /* INS/OUTS - I/O String instruction. */
6802 /** @todo Huh? why can't we use the segment prefix information given by AMD-V
6803 * in EXITINFO1? Investigate once this thing is up and running. */
6804 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, IoExitInfo.n.u16Port, cbValue,
6805 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? 'w' : 'r'));
6806 AssertReturn(pCtx->dx == IoExitInfo.n.u16Port, VERR_SVM_IPE_2);
6807 static IEMMODE const s_aenmAddrMode[8] =
6808 {
6809 (IEMMODE)-1, IEMMODE_16BIT, IEMMODE_32BIT, (IEMMODE)-1, IEMMODE_64BIT, (IEMMODE)-1, (IEMMODE)-1, (IEMMODE)-1
6810 };
6811 IEMMODE enmAddrMode = s_aenmAddrMode[(IoExitInfo.u >> 7) & 0x7];
6812 if (enmAddrMode != (IEMMODE)-1)
6813 {
6814 uint64_t cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip;
6815 if (cbInstr <= 15 && cbInstr >= 1)
6816 {
6817 Assert(cbInstr >= 1U + IoExitInfo.n.u1REP);
6818 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
6819 {
6820 /* Don't know exactly how to detect whether u3SEG is valid, currently
6821 only enabling it for Bulldozer and later with NRIP. OS/2 broke on
6822 2384 Opterons when only checking NRIP. */
6823 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
6824 if ( fSupportsNextRipSave
6825 && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First)
6826 {
6827 AssertMsg(IoExitInfo.n.u3SEG == X86_SREG_DS || cbInstr > 1U + IoExitInfo.n.u1REP,
6828 ("u32Seg=%d cbInstr=%d u1REP=%d", IoExitInfo.n.u3SEG, cbInstr, IoExitInfo.n.u1REP));
6829 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1REP, (uint8_t)cbInstr,
6830 IoExitInfo.n.u3SEG, true /*fIoChecked*/);
6831 }
6832 else if (cbInstr == 1U + IoExitInfo.n.u1REP)
6833 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1REP, (uint8_t)cbInstr,
6834 X86_SREG_DS, true /*fIoChecked*/);
6835 else
6836 rcStrict = IEMExecOne(pVCpu);
6837 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
6838 }
6839 else
6840 {
6841 AssertMsg(IoExitInfo.n.u3SEG == X86_SREG_ES /*=0*/, ("%#x\n", IoExitInfo.n.u3SEG));
6842 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1REP, (uint8_t)cbInstr,
6843 true /*fIoChecked*/);
6844 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
6845 }
6846 }
6847 else
6848 {
6849 AssertMsgFailed(("rip=%RX64 nrip=%#RX64 cbInstr=%#RX64\n", pCtx->rip, pVmcb->ctrl.u64ExitInfo2, cbInstr));
6850 rcStrict = IEMExecOne(pVCpu);
6851 }
6852 }
6853 else
6854 {
6855 AssertMsgFailed(("IoExitInfo=%RX64\n", IoExitInfo.u));
6856 rcStrict = IEMExecOne(pVCpu);
6857 }
6858 fUpdateRipAlready = true;
6859
6860#else
6861 /* INS/OUTS - I/O String instruction. */
6862 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
6863
6864 /** @todo Huh? why can't we use the segment prefix information given by AMD-V
6865 * in EXITINFO1? Investigate once this thing is up and running. */
6866
6867 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
6868 if (rcStrict == VINF_SUCCESS)
6869 {
6870 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
6871 {
6872 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
6873 (DISCPUMODE)pDis->uAddrMode, cbValue);
6874 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
6875 }
6876 else
6877 {
6878 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
6879 (DISCPUMODE)pDis->uAddrMode, cbValue);
6880 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
6881 }
6882 }
6883 else
6884 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
6885#endif
6886 }
6887 else
6888 {
6889 /* IN/OUT - I/O instruction. */
6890 Assert(!IoExitInfo.n.u1REP);
6891
6892 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
6893 {
6894 rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue);
6895 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
6896 }
6897 else
6898 {
6899 uint32_t u32Val = 0;
6900 rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue);
6901 if (IOM_SUCCESS(rcStrict))
6902 {
6903 /* Save result of I/O IN instr. in AL/AX/EAX. */
6904 /** @todo r=bird: 32-bit op size should clear high bits of rax! */
6905 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
6906 }
6907 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
6908 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, cbValue);
6909
6910 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
6911 }
6912 }
6913
6914 if (IOM_SUCCESS(rcStrict))
6915 {
6916 /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */
6917 if (!fUpdateRipAlready)
6918 pCtx->rip = pVmcb->ctrl.u64ExitInfo2;
6919
6920 /*
6921 * If any I/O breakpoints are armed, we need to check if one triggered
6922 * and take appropriate action.
6923 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
6924 */
6925 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
6926 * execution engines about whether hyper BPs and such are pending. */
6927 uint32_t const uDr7 = pCtx->dr[7];
6928 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6929 && X86_DR7_ANY_RW_IO(uDr7)
6930 && (pCtx->cr4 & X86_CR4_DE))
6931 || DBGFBpIsHwIoArmed(pVM)))
6932 {
6933 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
6934 VMMRZCallRing3Disable(pVCpu);
6935 HM_DISABLE_PREEMPT();
6936
6937 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
6938 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/);
6939
6940 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, IoExitInfo.n.u16Port, cbValue);
6941 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
6942 {
6943 /* Raise #DB. */
6944 pVmcb->guest.u64DR6 = pCtx->dr[6];
6945 pVmcb->guest.u64DR7 = pCtx->dr[7];
6946 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
6947 hmR0SvmSetPendingXcptDB(pVCpu);
6948 }
6949 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
6950 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
6951 else if ( rcStrict2 != VINF_SUCCESS
6952 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
6953 rcStrict = rcStrict2;
6954 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
6955
6956 HM_RESTORE_PREEMPT();
6957 VMMRZCallRing3Enable(pVCpu);
6958 }
6959
6960 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6961 }
6962
6963#ifdef VBOX_STRICT
6964 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
6965 Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ);
6966 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE)
6967 Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE);
6968 else
6969 {
6970 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
6971 * statuses, that the VMM device and some others may return. See
6972 * IOM_SUCCESS() for guidance. */
6973 AssertMsg( RT_FAILURE(rcStrict)
6974 || rcStrict == VINF_SUCCESS
6975 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
6976 || rcStrict == VINF_EM_DBG_BREAKPOINT
6977 || rcStrict == VINF_EM_RAW_GUEST_TRAP
6978 || rcStrict == VINF_EM_RAW_TO_R3
6979 || rcStrict == VINF_TRPM_XCPT_DISPATCHED
6980 || rcStrict == VINF_EM_TRIPLE_FAULT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6981 }
6982#endif
6983 return VBOXSTRICTRC_TODO(rcStrict);
6984}
6985
6986
6987/**
6988 * \#VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional \#VMEXIT.
6989 */
6990HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
6991{
6992 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
6993 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
6994
6995 PVM pVM = pVCpu->CTX_SUFF(pVM);
6996 Assert(pVM->hm.s.fNestedPaging);
6997
6998 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
6999
7000 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
7001 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
7002 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1;
7003 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
7004
7005 Log4(("#NPF at CS:RIP=%04x:%#RX64 faultaddr=%RGp errcode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr, u32ErrCode));
7006
7007#ifdef VBOX_HM_WITH_GUEST_PATCHING
7008 /* TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions. */
7009 if ( pVM->hm.s.fTprPatchingAllowed
7010 && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == XAPIC_OFF_TPR
7011 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */
7012 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */
7013 && !CPUMIsGuestInLongModeEx(pCtx)
7014 && !CPUMGetGuestCPL(pVCpu)
7015 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
7016 {
7017 RTGCPHYS GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
7018 GCPhysApicBase &= PAGE_BASE_GC_MASK;
7019
7020 if (GCPhysFaultAddr == GCPhysApicBase + XAPIC_OFF_TPR)
7021 {
7022 /* Only attempt to patch the instruction once. */
7023 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
7024 if (!pPatch)
7025 return VINF_EM_HM_PATCH_TPR_INSTR;
7026 }
7027 }
7028#endif
7029
7030 /*
7031 * Determine the nested paging mode.
7032 */
7033 PGMMODE enmNestedPagingMode;
7034#if HC_ARCH_BITS == 32
7035 if (CPUMIsGuestInLongModeEx(pCtx))
7036 enmNestedPagingMode = PGMMODE_AMD64_NX;
7037 else
7038#endif
7039 enmNestedPagingMode = PGMGetHostMode(pVM);
7040
7041 /*
7042 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
7043 */
7044 int rc;
7045 Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
7046 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
7047 {
7048 /* If event delivery causes an MMIO #NPF, go back to instruction emulation as
7049 otherwise injecting the original pending event would most likely cause the same MMIO #NPF. */
7050 if (pVCpu->hm.s.Event.fPending)
7051 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7052
7053 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr,
7054 u32ErrCode);
7055 rc = VBOXSTRICTRC_VAL(rc2);
7056
7057 /*
7058 * If we succeed, resume guest execution.
7059 * If we fail in interpreting the instruction because we couldn't get the guest physical address
7060 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
7061 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
7062 * weird case. See @bugref{6043}.
7063 */
7064 if ( rc == VINF_SUCCESS
7065 || rc == VERR_PAGE_TABLE_NOT_PRESENT
7066 || rc == VERR_PAGE_NOT_PRESENT)
7067 {
7068 /* Successfully handled MMIO operation. */
7069 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
7070 rc = VINF_SUCCESS;
7071 }
7072 return rc;
7073 }
7074
7075 TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode);
7076 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr);
7077 TRPMResetTrap(pVCpu);
7078
7079 Log4(("#NPF: PGMR0Trap0eHandlerNestedPaging returned %Rrc CS:RIP=%04x:%#RX64\n", rc, pCtx->cs.Sel, pCtx->rip));
7080
7081 /*
7082 * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
7083 */
7084 if ( rc == VINF_SUCCESS
7085 || rc == VERR_PAGE_TABLE_NOT_PRESENT
7086 || rc == VERR_PAGE_NOT_PRESENT)
7087 {
7088 /* We've successfully synced our shadow page tables. */
7089 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
7090 rc = VINF_SUCCESS;
7091 }
7092
7093 return rc;
7094}
7095
7096
7097/**
7098 * \#VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional
7099 * \#VMEXIT.
7100 */
7101HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7102{
7103 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7104 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
7105
7106 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
7107 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
7108 hmR0SvmClearVirtIntrIntercept(pVmcb);
7109
7110 /* Deliver the pending interrupt via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
7111 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
7112 return VINF_SUCCESS;
7113}
7114
7115
7116/**
7117 * \#VMEXIT handler for task switches (SVM_EXIT_TASK_SWITCH). Conditional
7118 * \#VMEXIT.
7119 */
7120HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7121{
7122 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7123
7124 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
7125
7126#ifndef HMSVM_ALWAYS_TRAP_TASK_SWITCH
7127 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
7128#endif
7129
7130 /* Check if this task-switch occurred while delivering an event through the guest IDT. */
7131 if (pVCpu->hm.s.Event.fPending) /* Can happen with exceptions/NMI. See @bugref{8411}. */
7132 {
7133 /*
7134 * AMD-V provides us with the exception which caused the TS; we collect
7135 * the information in the call to hmR0SvmCheckExitDueToEventDelivery.
7136 */
7137 Log4(("hmR0SvmExitTaskSwitch: TS occurred during event delivery.\n"));
7138 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
7139 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7140 }
7141
7142 /** @todo Emulate task switch someday, currently just going back to ring-3 for
7143 * emulation. */
7144 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
7145 return VERR_EM_INTERPRETER;
7146}
7147
7148
7149/**
7150 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
7151 */
7152HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7153{
7154 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7155 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
7156
7157 bool fRipUpdated;
7158 VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fRipUpdated);
7159 if (RT_SUCCESS(rcStrict))
7160 {
7161 /* Only update the RIP if we're continuing guest execution and not
7162 in the case of say VINF_GIM_R3_HYPERCALL. */
7163 if ( rcStrict == VINF_SUCCESS
7164 && !fRipUpdated)
7165 {
7166 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 3 /* cbInstr */);
7167 }
7168
7169 /* If the hypercall or TPR patching changes anything other than guest's general-purpose registers,
7170 we would need to reload the guest changed bits here before VM-entry. */
7171 return VBOXSTRICTRC_VAL(rcStrict);
7172 }
7173
7174 hmR0SvmSetPendingXcptUD(pVCpu);
7175 return VINF_SUCCESS;
7176}
7177
7178
7179/**
7180 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
7181 */
7182HMSVM_EXIT_DECL hmR0SvmExitPause(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7183{
7184 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7185 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
7186 return VINF_EM_RAW_INTERRUPT;
7187}
7188
7189
7190/**
7191 * \#VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional \#VMEXIT.
7192 */
7193HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7194{
7195 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7196
7197 /* Clear NMI blocking. */
7198 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
7199
7200 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
7201 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
7202 hmR0SvmClearIretIntercept(pVmcb);
7203
7204 /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
7205 return VINF_SUCCESS;
7206}
7207
7208
7209/**
7210 * \#VMEXIT handler for page-fault exceptions (SVM_EXIT_EXCEPTION_14).
7211 * Conditional \#VMEXIT.
7212 */
7213HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7214{
7215 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7216 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
7217
7218 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
7219
7220 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
7221 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
7222 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1;
7223 RTGCUINTPTR uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
7224 PVM pVM = pVCpu->CTX_SUFF(pVM);
7225
7226#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(HMSVM_ALWAYS_TRAP_PF)
7227 if (pVM->hm.s.fNestedPaging)
7228 {
7229 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
7230 if (!pSvmTransient->fVectoringDoublePF)
7231 {
7232 /* A genuine guest #PF, reflect it to the guest. */
7233 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
7234 Log4(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RGv ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip,
7235 uFaultAddress, u32ErrCode));
7236 }
7237 else
7238 {
7239 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7240 hmR0SvmSetPendingXcptDF(pVCpu);
7241 Log4(("Pending #DF due to vectoring #PF. NP\n"));
7242 }
7243 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
7244 return VINF_SUCCESS;
7245 }
7246#endif
7247
7248 Assert(!pVM->hm.s.fNestedPaging);
7249
7250#ifdef VBOX_HM_WITH_GUEST_PATCHING
7251 /* Shortcut for APIC TPR reads and writes; only applicable to 32-bit guests. */
7252 if ( pVM->hm.s.fTprPatchingAllowed
7253 && (uFaultAddress & 0xfff) == XAPIC_OFF_TPR
7254 && !(u32ErrCode & X86_TRAP_PF_P) /* Not present. */
7255 && !CPUMIsGuestInLongModeEx(pCtx)
7256 && !CPUMGetGuestCPL(pVCpu)
7257 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
7258 {
7259 RTGCPHYS GCPhysApicBase;
7260 GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
7261 GCPhysApicBase &= PAGE_BASE_GC_MASK;
7262
7263 /* Check if the page at the fault-address is the APIC base. */
7264 RTGCPHYS GCPhysPage;
7265 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);
7266 if ( rc2 == VINF_SUCCESS
7267 && GCPhysPage == GCPhysApicBase)
7268 {
7269 /* Only attempt to patch the instruction once. */
7270 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
7271 if (!pPatch)
7272 return VINF_EM_HM_PATCH_TPR_INSTR;
7273 }
7274 }
7275#endif
7276
7277 Log4(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 u32ErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
7278 pCtx->rip, u32ErrCode, pCtx->cr3));
7279
7280 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
7281 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
7282 if (pSvmTransient->fVectoringPF)
7283 {
7284 Assert(pVCpu->hm.s.Event.fPending);
7285 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7286 }
7287
7288 TRPMAssertXcptPF(pVCpu, uFaultAddress, u32ErrCode);
7289 int rc = PGMTrap0eHandler(pVCpu, u32ErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
7290
7291 Log4(("#PF rc=%Rrc\n", rc));
7292
7293 if (rc == VINF_SUCCESS)
7294 {
7295 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
7296 TRPMResetTrap(pVCpu);
7297 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
7298 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7299 return rc;
7300 }
7301 else if (rc == VINF_EM_RAW_GUEST_TRAP)
7302 {
7303 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
7304
7305 if (!pSvmTransient->fVectoringDoublePF)
7306 {
7307 /* It's a guest page fault and needs to be reflected to the guest. */
7308 u32ErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */
7309 TRPMResetTrap(pVCpu);
7310 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
7311 }
7312 else
7313 {
7314 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7315 TRPMResetTrap(pVCpu);
7316 hmR0SvmSetPendingXcptDF(pVCpu);
7317 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
7318 }
7319
7320 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
7321 return VINF_SUCCESS;
7322 }
7323
7324 TRPMResetTrap(pVCpu);
7325 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
7326 return rc;
7327}
7328
7329
7330/**
7331 * \#VMEXIT handler for device-not-available exceptions (SVM_EXIT_EXCEPTION_7).
7332 * Conditional \#VMEXIT.
7333 */
7334HMSVM_EXIT_DECL hmR0SvmExitXcptNM(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7335{
7336 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7337
7338 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
7339 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
7340 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
7341
7342 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
7343 VMMRZCallRing3Disable(pVCpu);
7344 HM_DISABLE_PREEMPT();
7345
7346 int rc;
7347 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
7348 if (pSvmTransient->fWasGuestFPUStateActive)
7349 {
7350 rc = VINF_EM_RAW_GUEST_TRAP;
7351 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
7352 }
7353 else
7354 {
7355#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
7356 Assert(!pSvmTransient->fWasGuestFPUStateActive);
7357#endif
7358 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu); /* (No need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
7359 Assert( rc == VINF_EM_RAW_GUEST_TRAP
7360 || ((rc == VINF_SUCCESS || rc == VINF_CPUM_HOST_CR0_MODIFIED) && CPUMIsGuestFPUStateActive(pVCpu)));
7361 }
7362
7363 HM_RESTORE_PREEMPT();
7364 VMMRZCallRing3Enable(pVCpu);
7365
7366 if (rc == VINF_SUCCESS || rc == VINF_CPUM_HOST_CR0_MODIFIED)
7367 {
7368 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
7369 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
7370 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
7371 pVCpu->hm.s.fPreloadGuestFpu = true;
7372 }
7373 else
7374 {
7375 /* Forward #NM to the guest. */
7376 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
7377 hmR0SvmSetPendingXcptNM(pVCpu);
7378 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
7379 }
7380 return VINF_SUCCESS;
7381}
7382
7383
7384/**
7385 * \#VMEXIT handler for undefined opcode (SVM_EXIT_EXCEPTION_6).
7386 * Conditional \#VMEXIT.
7387 */
7388HMSVM_EXIT_DECL hmR0SvmExitXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7389{
7390 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7391
7392 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
7393 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
7394 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
7395
7396 int rc = VERR_SVM_UNEXPECTED_XCPT_EXIT;
7397 if (pVCpu->hm.s.fGIMTrapXcptUD)
7398 {
7399 uint8_t cbInstr = 0;
7400 VBOXSTRICTRC rcStrict = GIMXcptUD(pVCpu, pCtx, NULL /* pDis */, &cbInstr);
7401 if (rcStrict == VINF_SUCCESS)
7402 {
7403 /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */
7404 hmR0SvmAdvanceRipDumb(pVCpu, pCtx, cbInstr);
7405 rc = VINF_SUCCESS;
7406 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
7407 }
7408 else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
7409 rc = VINF_SUCCESS;
7410 else if (rcStrict == VINF_GIM_R3_HYPERCALL)
7411 rc = VINF_GIM_R3_HYPERCALL;
7412 else
7413 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7414 }
7415
7416 /* If the GIM #UD exception handler didn't succeed for some reason or wasn't needed, raise #UD. */
7417 if (RT_FAILURE(rc))
7418 {
7419 hmR0SvmSetPendingXcptUD(pVCpu);
7420 rc = VINF_SUCCESS;
7421 }
7422
7423 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
7424 return rc;
7425}
7426
7427
7428/**
7429 * \#VMEXIT handler for math-fault exceptions (SVM_EXIT_EXCEPTION_16).
7430 * Conditional \#VMEXIT.
7431 */
7432HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7433{
7434 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7435
7436 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
7437 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
7438 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
7439
7440 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
7441
7442 if (!(pCtx->cr0 & X86_CR0_NE))
7443 {
7444 PVM pVM = pVCpu->CTX_SUFF(pVM);
7445 PDISSTATE pDis = &pVCpu->hm.s.DisState;
7446 unsigned cbOp;
7447 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
7448 if (RT_SUCCESS(rc))
7449 {
7450 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7451 /** @todo FERR intercept when in nested-guest mode? */
7452 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
7453 if (RT_SUCCESS(rc))
7454 pCtx->rip += cbOp;
7455 }
7456 else
7457 Log4(("hmR0SvmExitXcptMF: EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
7458 return rc;
7459 }
7460
7461 hmR0SvmSetPendingXcptMF(pVCpu);
7462 return VINF_SUCCESS;
7463}
7464
7465
7466/**
7467 * \#VMEXIT handler for debug exceptions (SVM_EXIT_EXCEPTION_1). Conditional
7468 * \#VMEXIT.
7469 */
7470HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7471{
7472 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7473
7474 /* If this #DB is the result of delivering an event, go back to the interpreter. */
7475 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
7476 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
7477 {
7478 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
7479 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7480 }
7481
7482 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
7483
7484 /* This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data breakpoint). However, for both cases
7485 DR6 and DR7 are updated to what the exception handler expects. See AMD spec. 15.12.2 "#DB (Debug)". */
7486 PVM pVM = pVCpu->CTX_SUFF(pVM);
7487 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
7488 int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
7489 if (rc == VINF_EM_RAW_GUEST_TRAP)
7490 {
7491 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
7492 if (CPUMIsHyperDebugStateActive(pVCpu))
7493 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
7494
7495 /* Reflect the exception back to the guest. */
7496 hmR0SvmSetPendingXcptDB(pVCpu);
7497 rc = VINF_SUCCESS;
7498 }
7499
7500 /*
7501 * Update DR6.
7502 */
7503 if (CPUMIsHyperDebugStateActive(pVCpu))
7504 {
7505 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
7506 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
7507 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
7508 }
7509 else
7510 {
7511 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
7512 Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu));
7513 }
7514
7515 return rc;
7516}
7517
7518
7519/**
7520 * \#VMEXIT handler for alignment check exceptions (SVM_EXIT_EXCEPTION_17).
7521 * Conditional \#VMEXIT.
7522 */
7523HMSVM_EXIT_DECL hmR0SvmExitXcptAC(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7524{
7525 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7526
7527 /** @todo if triple-fault is returned in nested-guest scenario convert to a
7528 * shutdown VMEXIT. */
7529 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
7530
7531 SVMEVENT Event;
7532 Event.u = 0;
7533 Event.n.u1Valid = 1;
7534 Event.n.u3Type = SVM_EVENT_EXCEPTION;
7535 Event.n.u8Vector = X86_XCPT_AC;
7536 Event.n.u1ErrorCodeValid = 1;
7537 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
7538 return VINF_SUCCESS;
7539}
7540
7541
7542/**
7543 * \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_EXCEPTION_3).
7544 * Conditional \#VMEXIT.
7545 */
7546HMSVM_EXIT_DECL hmR0SvmExitXcptBP(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7547{
7548 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7549
7550 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
7551
7552 int rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7553 if (rc == VINF_EM_RAW_GUEST_TRAP)
7554 {
7555 SVMEVENT Event;
7556 Event.u = 0;
7557 Event.n.u1Valid = 1;
7558 Event.n.u3Type = SVM_EVENT_EXCEPTION;
7559 Event.n.u8Vector = X86_XCPT_BP;
7560 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
7561 }
7562
7563 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
7564 return rc;
7565}
7566
7567
7568#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT)
7569/**
7570 * \#VMEXIT handler for generic exceptions. Conditional \#VMEXIT.
7571 */
7572HMSVM_EXIT_DECL hmR0SvmExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7573{
7574 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7575
7576 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
7577
7578 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
7579 uint8_t const uVector = pVmcb->ctrl.u64ExitCode - SVM_EXIT_EXCEPTION_0;
7580 uint32_t const uErrCode = pVmcb->ctrl.u64ExitInfo1;
7581 Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode);
7582 Assert(uVector <= X86_XCPT_LAST);
7583 Log4(("hmR0SvmExitXcptGeneric: uVector=%#x uErrCode=%u\n", uVector, uErrCode));
7584
7585 SVMEVENT Event;
7586 Event.u = 0;
7587 Event.n.u1Valid = 1;
7588 Event.n.u3Type = SVM_EVENT_EXCEPTION;
7589 Event.n.u8Vector = uVector;
7590 switch (uVector)
7591 {
7592 /* Shouldn't be here for reflecting #PFs (among other things, the fault address isn't passed along). */
7593 case X86_XCPT_PF: AssertMsgFailed(("hmR0SvmExitXcptGeneric: Unexpected exception")); return VERR_SVM_IPE_5;
7594 case X86_XCPT_DF:
7595 case X86_XCPT_TS:
7596 case X86_XCPT_NP:
7597 case X86_XCPT_SS:
7598 case X86_XCPT_GP:
7599 case X86_XCPT_AC:
7600 {
7601 Event.n.u1ErrorCodeValid = 1;
7602 Event.n.u32ErrorCode = uErrCode;
7603 break;
7604 }
7605 }
7606
7607 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
7608 return VINF_SUCCESS;
7609}
7610#endif
7611
7612#ifdef VBOX_WITH_NESTED_HWVIRT
7613/**
7614 * \#VMEXIT handler for #PF occuring while in nested-guest execution
7615 * (SVM_EXIT_EXCEPTION_14). Conditional \#VMEXIT.
7616 */
7617HMSVM_EXIT_DECL hmR0SvmExitXcptPFNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7618{
7619 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7620
7621 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
7622
7623 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
7624 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
7625 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1;
7626 uint64_t const uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
7627
7628 Log4(("#PFNested: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 u32ErrCode=%#RX32 CR3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
7629 pCtx->rip, u32ErrCode, pCtx->cr3));
7630
7631 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
7632 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
7633 if (pSvmTransient->fVectoringPF)
7634 {
7635 Assert(pVCpu->hm.s.Event.fPending);
7636 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7637 }
7638
7639 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
7640
7641 TRPMAssertXcptPF(pVCpu, uFaultAddress, u32ErrCode);
7642 int rc = PGMTrap0eHandler(pVCpu, u32ErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
7643
7644 Log4(("#PFNested: rc=%Rrc\n", rc));
7645
7646 if (rc == VINF_SUCCESS)
7647 {
7648 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
7649 TRPMResetTrap(pVCpu);
7650 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
7651 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7652 return rc;
7653 }
7654
7655 if (rc == VINF_EM_RAW_GUEST_TRAP)
7656 {
7657 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
7658
7659 if (!pSvmTransient->fVectoringDoublePF)
7660 {
7661 /* It's a nested-guest page fault and needs to be reflected to the nested-guest. */
7662 u32ErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */
7663 TRPMResetTrap(pVCpu);
7664 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
7665 }
7666 else
7667 {
7668 /* A nested-guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7669 TRPMResetTrap(pVCpu);
7670 hmR0SvmSetPendingXcptDF(pVCpu);
7671 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
7672 }
7673
7674 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
7675 return VINF_SUCCESS;
7676 }
7677
7678 TRPMResetTrap(pVCpu);
7679 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
7680 return rc;
7681}
7682
7683
7684/**
7685 * \#VMEXIT handler for CLGI (SVM_EXIT_CLGI). Conditional \#VMEXIT.
7686 */
7687HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7688{
7689 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7690
7691#ifdef VBOX_STRICT
7692 PCSVMVMCB pVmcbTmp = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
7693 Assert(pVmcbTmp);
7694 Assert(!pVmcbTmp->ctrl.IntCtrl.n.u1VGifEnable);
7695 RT_NOREF(pVmcbTmp);
7696#endif
7697
7698 /** @todo Stat. */
7699 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClgi); */
7700 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
7701 VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr);
7702 return VBOXSTRICTRC_VAL(rcStrict);
7703}
7704
7705
7706/**
7707 * \#VMEXIT handler for STGI (SVM_EXIT_STGI). Conditional \#VMEXIT.
7708 */
7709HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7710{
7711 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7712
7713#ifdef VBOX_STRICT
7714 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
7715 Assert(pVmcb);
7716 Assert(!pVmcb->ctrl.IntCtrl.n.u1VGifEnable);
7717 RT_NOREF(pVmcb);
7718#endif
7719
7720 /** @todo Stat. */
7721 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */
7722 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
7723 VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr);
7724 return VBOXSTRICTRC_VAL(rcStrict);
7725}
7726
7727
7728/**
7729 * \#VMEXIT handler for VMLOAD (SVM_EXIT_VMLOAD). Conditional \#VMEXIT.
7730 */
7731HMSVM_EXIT_DECL hmR0SvmExitVmload(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7732{
7733 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7734
7735#ifdef VBOX_STRICT
7736 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
7737 Assert(pVmcb);
7738 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
7739 RT_NOREF(pVmcb);
7740#endif
7741
7742 /** @todo Stat. */
7743 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmload); */
7744 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
7745 VBOXSTRICTRC rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr);
7746 if (rcStrict == VINF_SUCCESS)
7747 {
7748 /* We skip flagging changes made to LSTAR, STAR, SFMASK and other MSRs as they are always re-loaded. */
7749 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS
7750 | HM_CHANGED_GUEST_TR
7751 | HM_CHANGED_GUEST_LDTR);
7752 }
7753 return VBOXSTRICTRC_VAL(rcStrict);
7754}
7755
7756
7757/**
7758 * \#VMEXIT handler for VMSAVE (SVM_EXIT_VMSAVE). Conditional \#VMEXIT.
7759 */
7760HMSVM_EXIT_DECL hmR0SvmExitVmsave(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7761{
7762 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7763
7764#ifdef VBOX_STRICT
7765 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
7766 Assert(pVmcb);
7767 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
7768 RT_NOREF(pVmcb);
7769#endif
7770
7771 /** @todo Stat. */
7772 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmsave); */
7773 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
7774 VBOXSTRICTRC rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr);
7775 return VBOXSTRICTRC_VAL(rcStrict);
7776}
7777
7778
7779/**
7780 * \#VMEXIT handler for INVLPGA (SVM_EXIT_INVLPGA). Conditional \#VMEXIT.
7781 */
7782HMSVM_EXIT_DECL hmR0SvmExitInvlpga(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7783{
7784 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7785 /** @todo Stat. */
7786 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpga); */
7787 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
7788 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpga(pVCpu, cbInstr);
7789 return VBOXSTRICTRC_VAL(rcStrict);
7790}
7791
7792
7793/**
7794 * \#VMEXIT handler for STGI (SVM_EXIT_VMRUN). Conditional \#VMEXIT.
7795 */
7796HMSVM_EXIT_DECL hmR0SvmExitVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7797{
7798 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7799 /** @todo Stat. */
7800 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmrun); */
7801#if 0
7802 VBOXSTRICTRC rcStrict;
7803 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
7804 rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr);
7805 Log4(("IEMExecDecodedVmrun: returned %d\n", VBOXSTRICTRC_VAL(rcStrict)));
7806 if (rcStrict == VINF_SUCCESS)
7807 {
7808 rcStrict = VINF_SVM_VMRUN;
7809 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7810 }
7811 return VBOXSTRICTRC_VAL(rcStrict);
7812#endif
7813 return VERR_EM_INTERPRETER;
7814}
7815
7816
7817/**
7818 * Nested-guest \#VMEXIT handler for debug exceptions (SVM_EXIT_EXCEPTION_1).
7819 * Unconditional \#VMEXIT.
7820 */
7821HMSVM_EXIT_DECL hmR0SvmNestedExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7822{
7823 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7824
7825 /* If this #DB is the result of delivering an event, go back to the interpreter. */
7826 /** @todo if triple-fault is returned in nested-guest scenario convert to a
7827 * shutdown VMEXIT. */
7828 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
7829 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
7830 {
7831 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
7832 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7833 }
7834
7835 hmR0SvmSetPendingXcptDB(pVCpu);
7836 return VINF_SUCCESS;
7837}
7838
7839
7840/**
7841 * Nested-guest \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_EXCEPTION_3).
7842 * Conditional \#VMEXIT.
7843 */
7844HMSVM_EXIT_DECL hmR0SvmNestedExitXcptBP(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
7845{
7846 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
7847
7848 /** @todo if triple-fault is returned in nested-guest scenario convert to a
7849 * shutdown VMEXIT. */
7850 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
7851
7852 SVMEVENT Event;
7853 Event.u = 0;
7854 Event.n.u1Valid = 1;
7855 Event.n.u3Type = SVM_EVENT_EXCEPTION;
7856 Event.n.u8Vector = X86_XCPT_BP;
7857 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
7858 return VINF_SUCCESS;
7859}
7860
7861#endif /* VBOX_WITH_NESTED_HWVIRT */
7862
7863
7864/** @} */
7865
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette