VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp@ 94125

Last change on this file since 94125 was 93963, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Add HM ring-0 API for querying transient VMX/SVM info.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 379.3 KB
Line 
1/* $Id: HMSVMR0.cpp 93963 2022-02-28 08:39:08Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2013-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/asm-amd64-x86.h>
25#include <iprt/thread.h>
26
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/dbgf.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/gim.h>
34#include <VBox/vmm/apic.h>
35#include "HMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include <VBox/err.h>
38#include "HMSVMR0.h"
39#include "dtrace/VBoxVMM.h"
40
41#ifdef DEBUG_ramshankar
42# define HMSVM_SYNC_FULL_GUEST_STATE
43# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
44# define HMSVM_ALWAYS_TRAP_PF
45# define HMSVM_ALWAYS_TRAP_TASK_SWITCH
46#endif
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52#ifdef VBOX_WITH_STATISTICS
53# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
54 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \
55 if ((u64ExitCode) == SVM_EXIT_NPF) \
56 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
57 else \
58 STAM_COUNTER_INC(&pVCpu->hm.s.aStatExitReason[(u64ExitCode) & MASK_EXITREASON_STAT]); \
59 } while (0)
60
61# define HMSVM_DEBUG_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
62 STAM_COUNTER_INC(&pVCpu->hm.s.StatDebugExitAll); \
63 if ((u64ExitCode) == SVM_EXIT_NPF) \
64 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
65 else \
66 STAM_COUNTER_INC(&pVCpu->hm.s.aStatExitReason[(u64ExitCode) & MASK_EXITREASON_STAT]); \
67 } while (0)
68
69# define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
70 STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitAll); \
71 if ((u64ExitCode) == SVM_EXIT_NPF) \
72 STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitReasonNpf); \
73 else \
74 STAM_COUNTER_INC(&pVCpu->hm.s.aStatNestedExitReason[(u64ExitCode) & MASK_EXITREASON_STAT]); \
75 } while (0)
76#else
77# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
78# define HMSVM_DEBUG_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
79# define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
80#endif /* !VBOX_WITH_STATISTICS */
81
82/** If we decide to use a function table approach this can be useful to
83 * switch to a "static DECLCALLBACK(int)". */
84#define HMSVM_EXIT_DECL static VBOXSTRICTRC
85
86/**
87 * Subset of the guest-CPU state that is kept by SVM R0 code while executing the
88 * guest using hardware-assisted SVM.
89 *
90 * This excludes state like TSC AUX, GPRs (other than RSP, RAX) which are always
91 * are swapped and restored across the world-switch and also registers like
92 * EFER, PAT MSR etc. which cannot be modified by the guest without causing a
93 * \#VMEXIT.
94 */
95#define HMSVM_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
96 | CPUMCTX_EXTRN_RFLAGS \
97 | CPUMCTX_EXTRN_RAX \
98 | CPUMCTX_EXTRN_RSP \
99 | CPUMCTX_EXTRN_SREG_MASK \
100 | CPUMCTX_EXTRN_CR0 \
101 | CPUMCTX_EXTRN_CR2 \
102 | CPUMCTX_EXTRN_CR3 \
103 | CPUMCTX_EXTRN_TABLE_MASK \
104 | CPUMCTX_EXTRN_DR6 \
105 | CPUMCTX_EXTRN_DR7 \
106 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
107 | CPUMCTX_EXTRN_SYSCALL_MSRS \
108 | CPUMCTX_EXTRN_SYSENTER_MSRS \
109 | CPUMCTX_EXTRN_HWVIRT \
110 | CPUMCTX_EXTRN_INHIBIT_INT \
111 | CPUMCTX_EXTRN_HM_SVM_MASK)
112
113/**
114 * Subset of the guest-CPU state that is shared between the guest and host.
115 */
116#define HMSVM_CPUMCTX_SHARED_STATE CPUMCTX_EXTRN_DR_MASK
117
118/** Macro for importing guest state from the VMCB back into CPUMCTX. */
119#define HMSVM_CPUMCTX_IMPORT_STATE(a_pVCpu, a_fWhat) \
120 do { \
121 if ((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fWhat)) \
122 hmR0SvmImportGuestState((a_pVCpu), (a_fWhat)); \
123 } while (0)
124
125/** Assert that the required state bits are fetched. */
126#define HMSVM_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
127 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
128 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
129
130/** Assert that preemption is disabled or covered by thread-context hooks. */
131#define HMSVM_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
132 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
133
134/** Assert that we haven't migrated CPUs when thread-context hooks are not
135 * used. */
136#define HMSVM_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
137 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
138 ("Illegal migration! Entered on CPU %u Current %u\n", \
139 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()));
140
141/** Assert that we're not executing a nested-guest. */
142#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
143# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
144#else
145# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
146#endif
147
148/** Assert that we're executing a nested-guest. */
149#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
150# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
151#else
152# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
153#endif
154
155/** Macro for checking and returning from the using function for
156 * \#VMEXIT intercepts that maybe caused during delivering of another
157 * event in the guest. */
158#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
159# define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \
160 do \
161 { \
162 int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \
163 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
164 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
165 else if ( rc == VINF_EM_RESET \
166 && CPUMIsGuestSvmCtrlInterceptSet((a_pVCpu), &(a_pVCpu)->cpum.GstCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \
167 { \
168 HMSVM_CPUMCTX_IMPORT_STATE((a_pVCpu), HMSVM_CPUMCTX_EXTRN_ALL); \
169 return IEMExecSvmVmexit((a_pVCpu), SVM_EXIT_SHUTDOWN, 0, 0); \
170 } \
171 else \
172 return rc; \
173 } while (0)
174#else
175# define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \
176 do \
177 { \
178 int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \
179 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
180 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
181 else \
182 return rc; \
183 } while (0)
184#endif
185
186/** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an
187 * instruction that exited. */
188#define HMSVM_CHECK_SINGLE_STEP(a_pVCpu, a_rc) \
189 do { \
190 if ((a_pVCpu)->hm.s.fSingleInstruction && (a_rc) == VINF_SUCCESS) \
191 (a_rc) = VINF_EM_DBG_STEPPED; \
192 } while (0)
193
194/** Validate segment descriptor granularity bit. */
195#ifdef VBOX_STRICT
196# define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) \
197 AssertMsg( !(a_pCtx)->reg.Attr.n.u1Present \
198 || ( (a_pCtx)->reg.Attr.n.u1Granularity \
199 ? ((a_pCtx)->reg.u32Limit & 0xfff) == 0xfff \
200 : (a_pCtx)->reg.u32Limit <= UINT32_C(0xfffff)), \
201 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", (a_pCtx)->reg.u32Limit, \
202 (a_pCtx)->reg.Attr.u, (a_pCtx)->reg.u64Base))
203#else
204# define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) do { } while (0)
205#endif
206
207/**
208 * Exception bitmap mask for all contributory exceptions.
209 *
210 * Page fault is deliberately excluded here as it's conditional as to whether
211 * it's contributory or benign. Page faults are handled separately.
212 */
213#define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
214 | RT_BIT(X86_XCPT_DE))
215
216/**
217 * Mandatory/unconditional guest control intercepts.
218 *
219 * SMIs can and do happen in normal operation. We need not intercept them
220 * while executing the guest (or nested-guest).
221 */
222#define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS ( SVM_CTRL_INTERCEPT_INTR \
223 | SVM_CTRL_INTERCEPT_NMI \
224 | SVM_CTRL_INTERCEPT_INIT \
225 | SVM_CTRL_INTERCEPT_RDPMC \
226 | SVM_CTRL_INTERCEPT_CPUID \
227 | SVM_CTRL_INTERCEPT_RSM \
228 | SVM_CTRL_INTERCEPT_HLT \
229 | SVM_CTRL_INTERCEPT_IOIO_PROT \
230 | SVM_CTRL_INTERCEPT_MSR_PROT \
231 | SVM_CTRL_INTERCEPT_INVLPGA \
232 | SVM_CTRL_INTERCEPT_SHUTDOWN \
233 | SVM_CTRL_INTERCEPT_FERR_FREEZE \
234 | SVM_CTRL_INTERCEPT_VMRUN \
235 | SVM_CTRL_INTERCEPT_SKINIT \
236 | SVM_CTRL_INTERCEPT_WBINVD \
237 | SVM_CTRL_INTERCEPT_MONITOR \
238 | SVM_CTRL_INTERCEPT_MWAIT \
239 | SVM_CTRL_INTERCEPT_CR0_SEL_WRITE \
240 | SVM_CTRL_INTERCEPT_XSETBV)
241
242/** @name VMCB Clean Bits.
243 *
244 * These flags are used for VMCB-state caching. A set VMCB Clean bit indicates
245 * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
246 * memory.
247 *
248 * @{ */
249/** All intercepts vectors, TSC offset, PAUSE filter counter. */
250#define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
251/** I/O permission bitmap, MSR permission bitmap. */
252#define HMSVM_VMCB_CLEAN_IOPM_MSRPM RT_BIT(1)
253/** ASID. */
254#define HMSVM_VMCB_CLEAN_ASID RT_BIT(2)
255/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
256V_INTR_VECTOR. */
257#define HMSVM_VMCB_CLEAN_INT_CTRL RT_BIT(3)
258/** Nested Paging: Nested CR3 (nCR3), PAT. */
259#define HMSVM_VMCB_CLEAN_NP RT_BIT(4)
260/** Control registers (CR0, CR3, CR4, EFER). */
261#define HMSVM_VMCB_CLEAN_CRX_EFER RT_BIT(5)
262/** Debug registers (DR6, DR7). */
263#define HMSVM_VMCB_CLEAN_DRX RT_BIT(6)
264/** GDT, IDT limit and base. */
265#define HMSVM_VMCB_CLEAN_DT RT_BIT(7)
266/** Segment register: CS, SS, DS, ES limit and base. */
267#define HMSVM_VMCB_CLEAN_SEG RT_BIT(8)
268/** CR2.*/
269#define HMSVM_VMCB_CLEAN_CR2 RT_BIT(9)
270/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
271#define HMSVM_VMCB_CLEAN_LBR RT_BIT(10)
272/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
273PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
274#define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11)
275/** Mask of all valid VMCB Clean bits. */
276#define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \
277 | HMSVM_VMCB_CLEAN_IOPM_MSRPM \
278 | HMSVM_VMCB_CLEAN_ASID \
279 | HMSVM_VMCB_CLEAN_INT_CTRL \
280 | HMSVM_VMCB_CLEAN_NP \
281 | HMSVM_VMCB_CLEAN_CRX_EFER \
282 | HMSVM_VMCB_CLEAN_DRX \
283 | HMSVM_VMCB_CLEAN_DT \
284 | HMSVM_VMCB_CLEAN_SEG \
285 | HMSVM_VMCB_CLEAN_CR2 \
286 | HMSVM_VMCB_CLEAN_LBR \
287 | HMSVM_VMCB_CLEAN_AVIC)
288/** @} */
289
290/**
291 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
292 */
293typedef enum SVMMSREXITREAD
294{
295 /** Reading this MSR causes a \#VMEXIT. */
296 SVMMSREXIT_INTERCEPT_READ = 0xb,
297 /** Reading this MSR does not cause a \#VMEXIT. */
298 SVMMSREXIT_PASSTHRU_READ
299} SVMMSREXITREAD;
300
301/**
302 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
303 */
304typedef enum SVMMSREXITWRITE
305{
306 /** Writing to this MSR causes a \#VMEXIT. */
307 SVMMSREXIT_INTERCEPT_WRITE = 0xd,
308 /** Writing to this MSR does not cause a \#VMEXIT. */
309 SVMMSREXIT_PASSTHRU_WRITE
310} SVMMSREXITWRITE;
311
312/**
313 * SVM \#VMEXIT handler.
314 *
315 * @returns Strict VBox status code.
316 * @param pVCpu The cross context virtual CPU structure.
317 * @param pSvmTransient Pointer to the SVM-transient structure.
318 */
319typedef VBOXSTRICTRC FNSVMEXITHANDLER(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
320
321
322/*********************************************************************************************************************************
323* Internal Functions *
324*********************************************************************************************************************************/
325static void hmR0SvmPendingEventToTrpmTrap(PVMCPUCC pVCpu);
326static void hmR0SvmLeave(PVMCPUCC pVCpu, bool fImportState);
327
328
329/** @name \#VMEXIT handlers.
330 * @{
331 */
332static FNSVMEXITHANDLER hmR0SvmExitIntr;
333static FNSVMEXITHANDLER hmR0SvmExitWbinvd;
334static FNSVMEXITHANDLER hmR0SvmExitInvd;
335static FNSVMEXITHANDLER hmR0SvmExitCpuid;
336static FNSVMEXITHANDLER hmR0SvmExitRdtsc;
337static FNSVMEXITHANDLER hmR0SvmExitRdtscp;
338static FNSVMEXITHANDLER hmR0SvmExitRdpmc;
339static FNSVMEXITHANDLER hmR0SvmExitInvlpg;
340static FNSVMEXITHANDLER hmR0SvmExitHlt;
341static FNSVMEXITHANDLER hmR0SvmExitMonitor;
342static FNSVMEXITHANDLER hmR0SvmExitMwait;
343static FNSVMEXITHANDLER hmR0SvmExitShutdown;
344static FNSVMEXITHANDLER hmR0SvmExitUnexpected;
345static FNSVMEXITHANDLER hmR0SvmExitReadCRx;
346static FNSVMEXITHANDLER hmR0SvmExitWriteCRx;
347static FNSVMEXITHANDLER hmR0SvmExitMsr;
348static FNSVMEXITHANDLER hmR0SvmExitReadDRx;
349static FNSVMEXITHANDLER hmR0SvmExitWriteDRx;
350static FNSVMEXITHANDLER hmR0SvmExitXsetbv;
351static FNSVMEXITHANDLER hmR0SvmExitIOInstr;
352static FNSVMEXITHANDLER hmR0SvmExitNestedPF;
353static FNSVMEXITHANDLER hmR0SvmExitVIntr;
354static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch;
355static FNSVMEXITHANDLER hmR0SvmExitVmmCall;
356static FNSVMEXITHANDLER hmR0SvmExitPause;
357static FNSVMEXITHANDLER hmR0SvmExitFerrFreeze;
358static FNSVMEXITHANDLER hmR0SvmExitIret;
359static FNSVMEXITHANDLER hmR0SvmExitXcptPF;
360static FNSVMEXITHANDLER hmR0SvmExitXcptUD;
361static FNSVMEXITHANDLER hmR0SvmExitXcptMF;
362static FNSVMEXITHANDLER hmR0SvmExitXcptDB;
363static FNSVMEXITHANDLER hmR0SvmExitXcptAC;
364static FNSVMEXITHANDLER hmR0SvmExitXcptBP;
365static FNSVMEXITHANDLER hmR0SvmExitXcptGP;
366static FNSVMEXITHANDLER hmR0SvmExitXcptGeneric;
367static FNSVMEXITHANDLER hmR0SvmExitSwInt;
368static FNSVMEXITHANDLER hmR0SvmExitTrRead;
369static FNSVMEXITHANDLER hmR0SvmExitTrWrite;
370#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
371static FNSVMEXITHANDLER hmR0SvmExitClgi;
372static FNSVMEXITHANDLER hmR0SvmExitStgi;
373static FNSVMEXITHANDLER hmR0SvmExitVmload;
374static FNSVMEXITHANDLER hmR0SvmExitVmsave;
375static FNSVMEXITHANDLER hmR0SvmExitInvlpga;
376static FNSVMEXITHANDLER hmR0SvmExitVmrun;
377static FNSVMEXITHANDLER hmR0SvmNestedExitXcptDB;
378static FNSVMEXITHANDLER hmR0SvmNestedExitXcptBP;
379#endif
380/** @} */
381
382static VBOXSTRICTRC hmR0SvmHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
383#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
384static VBOXSTRICTRC hmR0SvmHandleExitNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
385#endif
386static VBOXSTRICTRC hmR0SvmRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops);
387
388
389/*********************************************************************************************************************************
390* Global Variables *
391*********************************************************************************************************************************/
392/** Ring-0 memory object for the IO bitmap. */
393static RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
394/** Physical address of the IO bitmap. */
395static RTHCPHYS g_HCPhysIOBitmap;
396/** Pointer to the IO bitmap. */
397static R0PTRTYPE(void *) g_pvIOBitmap;
398
399#ifdef VBOX_STRICT
400# define HMSVM_LOG_RBP_RSP RT_BIT_32(0)
401# define HMSVM_LOG_CR_REGS RT_BIT_32(1)
402# define HMSVM_LOG_CS RT_BIT_32(2)
403# define HMSVM_LOG_SS RT_BIT_32(3)
404# define HMSVM_LOG_FS RT_BIT_32(4)
405# define HMSVM_LOG_GS RT_BIT_32(5)
406# define HMSVM_LOG_LBR RT_BIT_32(6)
407# define HMSVM_LOG_ALL ( HMSVM_LOG_RBP_RSP \
408 | HMSVM_LOG_CR_REGS \
409 | HMSVM_LOG_CS \
410 | HMSVM_LOG_SS \
411 | HMSVM_LOG_FS \
412 | HMSVM_LOG_GS \
413 | HMSVM_LOG_LBR)
414
415/**
416 * Dumps virtual CPU state and additional info. to the logger for diagnostics.
417 *
418 * @param pVCpu The cross context virtual CPU structure.
419 * @param pVmcb Pointer to the VM control block.
420 * @param pszPrefix Log prefix.
421 * @param fFlags Log flags, see HMSVM_LOG_XXX.
422 * @param uVerbose The verbosity level, currently unused.
423 */
424static void hmR0SvmLogState(PVMCPUCC pVCpu, PCSVMVMCB pVmcb, const char *pszPrefix, uint32_t fFlags, uint8_t uVerbose)
425{
426 RT_NOREF2(pVCpu, uVerbose);
427 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
428
429 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
430 Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX64\n", pszPrefix, pCtx->cs.Sel, pCtx->rip, pCtx->rflags.u));
431
432 if (fFlags & HMSVM_LOG_RBP_RSP)
433 {
434 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RBP);
435 Log4(("%s: rsp=%#RX64 rbp=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp));
436 }
437
438 if (fFlags & HMSVM_LOG_CR_REGS)
439 {
440 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4);
441 Log4(("%s: cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", pszPrefix, pCtx->cr0, pCtx->cr3, pCtx->cr4));
442 }
443
444 if (fFlags & HMSVM_LOG_CS)
445 {
446 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
447 Log4(("%s: cs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base,
448 pCtx->cs.u32Limit, pCtx->cs.Attr.u));
449 }
450 if (fFlags & HMSVM_LOG_SS)
451 {
452 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
453 Log4(("%s: ss={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base,
454 pCtx->ss.u32Limit, pCtx->ss.Attr.u));
455 }
456 if (fFlags & HMSVM_LOG_FS)
457 {
458 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
459 Log4(("%s: fs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base,
460 pCtx->fs.u32Limit, pCtx->fs.Attr.u));
461 }
462 if (fFlags & HMSVM_LOG_GS)
463 {
464 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
465 Log4(("%s: gs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base,
466 pCtx->gs.u32Limit, pCtx->gs.Attr.u));
467 }
468
469 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
470 if (fFlags & HMSVM_LOG_LBR)
471 {
472 Log4(("%s: br_from=%#RX64 br_to=%#RX64 lastxcpt_from=%#RX64 lastxcpt_to=%#RX64\n", pszPrefix, pVmcbGuest->u64BR_FROM,
473 pVmcbGuest->u64BR_TO, pVmcbGuest->u64LASTEXCPFROM, pVmcbGuest->u64LASTEXCPTO));
474 }
475 NOREF(pszPrefix); NOREF(pVmcbGuest); NOREF(pCtx);
476}
477#endif /* VBOX_STRICT */
478
479
480/**
481 * Sets up and activates AMD-V on the current CPU.
482 *
483 * @returns VBox status code.
484 * @param pHostCpu The HM physical-CPU structure.
485 * @param pVM The cross context VM structure. Can be
486 * NULL after a resume!
487 * @param pvCpuPage Pointer to the global CPU page.
488 * @param HCPhysCpuPage Physical address of the global CPU page.
489 * @param fEnabledByHost Whether the host OS has already initialized AMD-V.
490 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs (currently
491 * unused).
492 */
493VMMR0DECL(int) SVMR0EnableCpu(PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
494 PCSUPHWVIRTMSRS pHwvirtMsrs)
495{
496 Assert(!fEnabledByHost);
497 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
498 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
499 Assert(pvCpuPage); NOREF(pvCpuPage);
500 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
501
502 RT_NOREF2(fEnabledByHost, pHwvirtMsrs);
503
504 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */
505 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
506
507 /*
508 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
509 */
510 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
511 if (u64HostEfer & MSR_K6_EFER_SVME)
512 {
513 /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
514 if ( pVM
515 && pVM->hm.s.svm.fIgnoreInUseError)
516 pHostCpu->fIgnoreAMDVInUseError = true;
517
518 if (!pHostCpu->fIgnoreAMDVInUseError)
519 {
520 ASMSetFlags(fEFlags);
521 return VERR_SVM_IN_USE;
522 }
523 }
524
525 /* Turn on AMD-V in the EFER MSR. */
526 ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
527
528 /* Write the physical page address where the CPU will store the host state while executing the VM. */
529 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
530
531 /* Restore interrupts. */
532 ASMSetFlags(fEFlags);
533
534 /*
535 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all
536 * non-zero ASIDs when enabling SVM. AMD doesn't have an SVM instruction to flush all
537 * ASIDs (flushing is done upon VMRUN). Therefore, flag that we need to flush the TLB
538 * entirely with before executing any guest code.
539 */
540 pHostCpu->fFlushAsidBeforeUse = true;
541
542 /*
543 * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}.
544 */
545 ++pHostCpu->cTlbFlushes;
546
547 return VINF_SUCCESS;
548}
549
550
551/**
552 * Deactivates AMD-V on the current CPU.
553 *
554 * @returns VBox status code.
555 * @param pHostCpu The HM physical-CPU structure.
556 * @param pvCpuPage Pointer to the global CPU page.
557 * @param HCPhysCpuPage Physical address of the global CPU page.
558 */
559VMMR0DECL(int) SVMR0DisableCpu(PHMPHYSCPU pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
560{
561 RT_NOREF1(pHostCpu);
562 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
563 AssertReturn( HCPhysCpuPage
564 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
565 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
566
567 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
568 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
569
570 /* Turn off AMD-V in the EFER MSR. */
571 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
572 ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
573
574 /* Invalidate host state physical address. */
575 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
576
577 /* Restore interrupts. */
578 ASMSetFlags(fEFlags);
579
580 return VINF_SUCCESS;
581}
582
583
584/**
585 * Does global AMD-V initialization (called during module initialization).
586 *
587 * @returns VBox status code.
588 */
589VMMR0DECL(int) SVMR0GlobalInit(void)
590{
591 /*
592 * Allocate 12 KB (3 pages) for the IO bitmap. Since this is non-optional and we always
593 * intercept all IO accesses, it's done once globally here instead of per-VM.
594 */
595 Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ);
596 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */);
597 if (RT_FAILURE(rc))
598 return rc;
599
600 g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
601 g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
602
603 /* Set all bits to intercept all IO accesses. */
604 ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
605
606 return VINF_SUCCESS;
607}
608
609
610/**
611 * Does global AMD-V termination (called during module termination).
612 */
613VMMR0DECL(void) SVMR0GlobalTerm(void)
614{
615 if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
616 {
617 RTR0MemObjFree(g_hMemObjIOBitmap, true /* fFreeMappings */);
618 g_pvIOBitmap = NULL;
619 g_HCPhysIOBitmap = 0;
620 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
621 }
622}
623
624
625/**
626 * Frees any allocated per-VCPU structures for a VM.
627 *
628 * @param pVM The cross context VM structure.
629 */
630DECLINLINE(void) hmR0SvmFreeStructs(PVMCC pVM)
631{
632 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
633 {
634 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
635 AssertPtr(pVCpu);
636
637 if (pVCpu->hmr0.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
638 {
639 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjVmcbHost, false);
640 pVCpu->hmr0.s.svm.HCPhysVmcbHost = 0;
641 pVCpu->hmr0.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
642 }
643
644 if (pVCpu->hmr0.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ)
645 {
646 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjVmcb, false);
647 pVCpu->hmr0.s.svm.pVmcb = NULL;
648 pVCpu->hmr0.s.svm.HCPhysVmcb = 0;
649 pVCpu->hmr0.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
650 }
651
652 if (pVCpu->hmr0.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
653 {
654 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjMsrBitmap, false);
655 pVCpu->hmr0.s.svm.pvMsrBitmap = NULL;
656 pVCpu->hmr0.s.svm.HCPhysMsrBitmap = 0;
657 pVCpu->hmr0.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
658 }
659 }
660}
661
662
663/**
664 * Sets pfnVMRun to the best suited variant.
665 *
666 * This must be called whenever anything changes relative to the SVMR0VMRun
667 * variant selection:
668 * - pVCpu->hm.s.fLoadSaveGuestXcr0
669 * - CPUMCTX_WSF_IBPB_ENTRY in pVCpu->cpum.GstCtx.fWorldSwitcher
670 * - CPUMCTX_WSF_IBPB_EXIT in pVCpu->cpum.GstCtx.fWorldSwitcher
671 * - Perhaps: CPUMIsGuestFPUStateActive() (windows only)
672 * - Perhaps: CPUMCTX.fXStateMask (windows only)
673 *
674 * We currently ASSUME that neither CPUMCTX_WSF_IBPB_ENTRY nor
675 * CPUMCTX_WSF_IBPB_EXIT cannot be changed at runtime.
676 */
677static void hmR0SvmUpdateVmRunFunction(PVMCPUCC pVCpu)
678{
679 static const struct CLANGWORKAROUND { PFNHMSVMVMRUN pfn; } s_aHmR0SvmVmRunFunctions[] =
680 {
681 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit },
682 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit },
683 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit },
684 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit },
685 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit },
686 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit },
687 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit },
688 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit },
689 };
690 uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0 ? 1 : 0)
691 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ? 2 : 0)
692 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT ? 4 : 0);
693 PFNHMSVMVMRUN const pfnVMRun = s_aHmR0SvmVmRunFunctions[idx].pfn;
694 if (pVCpu->hmr0.s.svm.pfnVMRun != pfnVMRun)
695 pVCpu->hmr0.s.svm.pfnVMRun = pfnVMRun;
696}
697
698
699/**
700 * Selector FNHMSVMVMRUN implementation.
701 */
702static DECLCALLBACK(int) hmR0SvmVMRunSelector(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB)
703{
704 hmR0SvmUpdateVmRunFunction(pVCpu);
705 return pVCpu->hmr0.s.svm.pfnVMRun(pVM, pVCpu, HCPhysVMCB);
706}
707
708
709/**
710 * Does per-VM AMD-V initialization.
711 *
712 * @returns VBox status code.
713 * @param pVM The cross context VM structure.
714 */
715VMMR0DECL(int) SVMR0InitVM(PVMCC pVM)
716{
717 int rc = VERR_INTERNAL_ERROR_5;
718
719 /*
720 * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
721 */
722 uint32_t u32Family;
723 uint32_t u32Model;
724 uint32_t u32Stepping;
725 if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
726 {
727 Log4Func(("AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
728 pVM->hmr0.s.svm.fAlwaysFlushTLB = true;
729 }
730
731 /*
732 * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
733 */
734 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
735 {
736 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
737 pVCpu->hmr0.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
738 pVCpu->hmr0.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
739 pVCpu->hmr0.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
740 }
741
742 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
743 {
744 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
745
746 /*
747 * Initialize the hardware-assisted SVM guest-execution handler.
748 * We now use a single handler for both 32-bit and 64-bit guests, see @bugref{6208#c73}.
749 */
750 pVCpu->hmr0.s.svm.pfnVMRun = hmR0SvmVMRunSelector;
751
752 /*
753 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
754 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
755 */
756/** @todo Does this need to be below 4G? */
757 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjVmcbHost, SVM_VMCB_PAGES << HOST_PAGE_SHIFT, false /* fExecutable */);
758 if (RT_FAILURE(rc))
759 goto failure_cleanup;
760
761 void *pvVmcbHost = RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjVmcbHost);
762 pVCpu->hmr0.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjVmcbHost, 0 /* iPage */);
763 Assert(pVCpu->hmr0.s.svm.HCPhysVmcbHost < _4G);
764 RT_BZERO(pvVmcbHost, HOST_PAGE_SIZE);
765
766 /*
767 * Allocate one page for the guest-state VMCB.
768 */
769/** @todo Does this need to be below 4G? */
770 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjVmcb, SVM_VMCB_PAGES << HOST_PAGE_SHIFT, false /* fExecutable */);
771 if (RT_FAILURE(rc))
772 goto failure_cleanup;
773
774 pVCpu->hmr0.s.svm.pVmcb = (PSVMVMCB)RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjVmcb);
775 pVCpu->hmr0.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjVmcb, 0 /* iPage */);
776 Assert(pVCpu->hmr0.s.svm.HCPhysVmcb < _4G);
777 RT_BZERO(pVCpu->hmr0.s.svm.pVmcb, HOST_PAGE_SIZE);
778
779 /*
780 * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
781 * SVM to not require one.
782 */
783/** @todo Does this need to be below 4G? */
784 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjMsrBitmap, SVM_MSRPM_PAGES << HOST_PAGE_SHIFT,
785 false /* fExecutable */);
786 if (RT_FAILURE(rc))
787 goto failure_cleanup;
788
789 pVCpu->hmr0.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjMsrBitmap);
790 pVCpu->hmr0.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
791 /* Set all bits to intercept all MSR accesses (changed later on). */
792 ASMMemFill32(pVCpu->hmr0.s.svm.pvMsrBitmap, SVM_MSRPM_PAGES << HOST_PAGE_SHIFT, UINT32_C(0xffffffff));
793 }
794
795 return VINF_SUCCESS;
796
797failure_cleanup:
798 hmR0SvmFreeStructs(pVM);
799 return rc;
800}
801
802
803/**
804 * Does per-VM AMD-V termination.
805 *
806 * @returns VBox status code.
807 * @param pVM The cross context VM structure.
808 */
809VMMR0DECL(int) SVMR0TermVM(PVMCC pVM)
810{
811 hmR0SvmFreeStructs(pVM);
812 return VINF_SUCCESS;
813}
814
815
816/**
817 * Returns whether the VMCB Clean Bits feature is supported.
818 *
819 * @returns @c true if supported, @c false otherwise.
820 * @param pVCpu The cross context virtual CPU structure.
821 * @param fIsNestedGuest Whether we are currently executing the nested-guest.
822 */
823DECL_FORCE_INLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPUCC pVCpu, bool fIsNestedGuest)
824{
825 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
826 bool const fHostVmcbCleanBits = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN);
827 if (!fIsNestedGuest)
828 return fHostVmcbCleanBits;
829 return fHostVmcbCleanBits && pVM->cpum.ro.GuestFeatures.fSvmVmcbClean;
830}
831
832
833/**
834 * Returns whether the decode assists feature is supported.
835 *
836 * @returns @c true if supported, @c false otherwise.
837 * @param pVCpu The cross context virtual CPU structure.
838 */
839DECLINLINE(bool) hmR0SvmSupportsDecodeAssists(PVMCPUCC pVCpu)
840{
841 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
842#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
843 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
844 return (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS)
845 && pVM->cpum.ro.GuestFeatures.fSvmDecodeAssists;
846#endif
847 return RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
848}
849
850
851/**
852 * Returns whether the NRIP_SAVE feature is supported.
853 *
854 * @returns @c true if supported, @c false otherwise.
855 * @param pVCpu The cross context virtual CPU structure.
856 */
857DECLINLINE(bool) hmR0SvmSupportsNextRipSave(PVMCPUCC pVCpu)
858{
859 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
860#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
861 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
862 return (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
863 && pVM->cpum.ro.GuestFeatures.fSvmNextRipSave;
864#endif
865 return RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
866}
867
868
869/**
870 * Sets the permission bits for the specified MSR in the MSRPM bitmap.
871 *
872 * @param pVCpu The cross context virtual CPU structure.
873 * @param pbMsrBitmap Pointer to the MSR bitmap.
874 * @param idMsr The MSR for which the permissions are being set.
875 * @param enmRead MSR read permissions.
876 * @param enmWrite MSR write permissions.
877 *
878 * @remarks This function does -not- clear the VMCB clean bits for MSRPM. The
879 * caller needs to take care of this.
880 */
881static void hmR0SvmSetMsrPermission(PVMCPUCC pVCpu, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead,
882 SVMMSREXITWRITE enmWrite)
883{
884 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
885 uint16_t offMsrpm;
886 uint8_t uMsrpmBit;
887 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
888 AssertRC(rc);
889
890 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
891 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
892
893 pbMsrBitmap += offMsrpm;
894 if (enmRead == SVMMSREXIT_INTERCEPT_READ)
895 *pbMsrBitmap |= RT_BIT(uMsrpmBit);
896 else
897 {
898 if (!fInNestedGuestMode)
899 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit);
900#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
901 else
902 {
903 /* Only clear the bit if the nested-guest is also not intercepting the MSR read.*/
904 if (!(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit)))
905 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit);
906 else
907 Assert(*pbMsrBitmap & RT_BIT(uMsrpmBit));
908 }
909#endif
910 }
911
912 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
913 *pbMsrBitmap |= RT_BIT(uMsrpmBit + 1);
914 else
915 {
916 if (!fInNestedGuestMode)
917 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1);
918#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
919 else
920 {
921 /* Only clear the bit if the nested-guest is also not intercepting the MSR write.*/
922 if (!(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit + 1)))
923 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1);
924 else
925 Assert(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1));
926 }
927#endif
928 }
929}
930
931
932/**
933 * Sets up AMD-V for the specified VM.
934 * This function is only called once per-VM during initalization.
935 *
936 * @returns VBox status code.
937 * @param pVM The cross context VM structure.
938 */
939VMMR0DECL(int) SVMR0SetupVM(PVMCC pVM)
940{
941 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
942 AssertReturn(pVM, VERR_INVALID_PARAMETER);
943
944 /*
945 * Validate and copy over some parameters.
946 */
947 AssertReturn(pVM->hm.s.svm.fSupported, VERR_INCOMPATIBLE_CONFIG);
948 bool const fNestedPaging = pVM->hm.s.fNestedPagingCfg;
949 AssertReturn(!fNestedPaging || (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING), VERR_INCOMPATIBLE_CONFIG);
950 pVM->hmr0.s.fNestedPaging = fNestedPaging;
951 pVM->hmr0.s.fAllow64BitGuests = pVM->hm.s.fAllow64BitGuestsCfg;
952
953 /*
954 * Determin some configuration parameters.
955 */
956 bool const fPauseFilter = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
957 bool const fPauseFilterThreshold = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
958 bool const fUsePauseFilter = fPauseFilter && pVM->hm.s.svm.cPauseFilter;
959
960 bool const fLbrVirt = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT);
961 bool const fUseLbrVirt = fLbrVirt && pVM->hm.s.svm.fLbrVirt; /** @todo IEM implementation etc. */
962
963#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
964 bool const fVirtVmsaveVmload = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD);
965 bool const fUseVirtVmsaveVmload = fVirtVmsaveVmload && pVM->hm.s.svm.fVirtVmsaveVmload && fNestedPaging;
966
967 bool const fVGif = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF);
968 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
969#endif
970
971 PVMCPUCC pVCpu0 = VMCC_GET_CPU_0(pVM);
972 PSVMVMCB pVmcb0 = pVCpu0->hmr0.s.svm.pVmcb;
973 AssertMsgReturn(RT_VALID_PTR(pVmcb0), ("Invalid pVmcb (%p) for vcpu[0]\n", pVmcb0), VERR_SVM_INVALID_PVMCB);
974 PSVMVMCBCTRL pVmcbCtrl0 = &pVmcb0->ctrl;
975
976 /* Always trap #AC for reasons of security. */
977 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_AC);
978
979 /* Always trap #DB for reasons of security. */
980 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_DB);
981
982 /* Trap exceptions unconditionally (debug purposes). */
983#ifdef HMSVM_ALWAYS_TRAP_PF
984 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_PF);
985#endif
986#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
987 /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
988 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_BP)
989 | RT_BIT_32(X86_XCPT_DE)
990 | RT_BIT_32(X86_XCPT_NM)
991 | RT_BIT_32(X86_XCPT_UD)
992 | RT_BIT_32(X86_XCPT_NP)
993 | RT_BIT_32(X86_XCPT_SS)
994 | RT_BIT_32(X86_XCPT_GP)
995 | RT_BIT_32(X86_XCPT_PF)
996 | RT_BIT_32(X86_XCPT_MF)
997 ;
998#endif
999
1000 /* Apply the exceptions intercepts needed by the GIM provider. */
1001 if (pVCpu0->hm.s.fGIMTrapXcptUD || pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
1002 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
1003
1004 /* The mesa 3d driver hack needs #GP. */
1005 if (pVCpu0->hm.s.fTrapXcptGpForLovelyMesaDrv)
1006 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_GP);
1007
1008 /* Set up unconditional intercepts and conditions. */
1009 pVmcbCtrl0->u64InterceptCtrl = HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS
1010 | SVM_CTRL_INTERCEPT_VMMCALL
1011 | SVM_CTRL_INTERCEPT_VMSAVE
1012 | SVM_CTRL_INTERCEPT_VMLOAD
1013 | SVM_CTRL_INTERCEPT_CLGI
1014 | SVM_CTRL_INTERCEPT_STGI;
1015
1016#ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
1017 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH;
1018#endif
1019
1020#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1021 if (pVCpu0->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvm)
1022 {
1023 /* Virtualized VMSAVE/VMLOAD. */
1024 if (fUseVirtVmsaveVmload)
1025 {
1026 pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload = 1;
1027 pVmcbCtrl0->u64InterceptCtrl &= ~( SVM_CTRL_INTERCEPT_VMSAVE
1028 | SVM_CTRL_INTERCEPT_VMLOAD);
1029 }
1030 else
1031 Assert(!pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload);
1032
1033 /* Virtual GIF. */
1034 if (fUseVGif)
1035 {
1036 pVmcbCtrl0->IntCtrl.n.u1VGifEnable = 1;
1037 pVmcbCtrl0->u64InterceptCtrl &= ~( SVM_CTRL_INTERCEPT_CLGI
1038 | SVM_CTRL_INTERCEPT_STGI);
1039 }
1040 else
1041 Assert(!pVmcbCtrl0->IntCtrl.n.u1VGifEnable);
1042 }
1043 else
1044#endif
1045 {
1046 Assert(!pVCpu0->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvm);
1047 Assert(!pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload);
1048 Assert(!pVmcbCtrl0->IntCtrl.n.u1VGifEnable);
1049 }
1050
1051 /* CR4 writes must always be intercepted for tracking PGM mode changes. */
1052 pVmcbCtrl0->u16InterceptWrCRx = RT_BIT(4);
1053
1054 /* Intercept all DRx reads and writes by default. Changed later on. */
1055 pVmcbCtrl0->u16InterceptRdDRx = 0xffff;
1056 pVmcbCtrl0->u16InterceptWrDRx = 0xffff;
1057
1058 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
1059 pVmcbCtrl0->IntCtrl.n.u1VIntrMasking = 1;
1060
1061 /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts
1062 and we currently deliver both PIC and APIC interrupts alike, see hmR0SvmEvaluatePendingEvent() */
1063 pVmcbCtrl0->IntCtrl.n.u1IgnoreTPR = 1;
1064
1065 /* Set the IO permission bitmap physical addresses. */
1066 pVmcbCtrl0->u64IOPMPhysAddr = g_HCPhysIOBitmap;
1067
1068 /* LBR virtualization. */
1069 pVmcbCtrl0->LbrVirt.n.u1LbrVirt = fUseLbrVirt;
1070
1071 /* The host ASID MBZ, for the guest start with 1. */
1072 pVmcbCtrl0->TLBCtrl.n.u32ASID = 1;
1073
1074 /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
1075 pVmcbCtrl0->NestedPagingCtrl.n.u1NestedPaging = fNestedPaging;
1076
1077 /* Without Nested Paging, we need additionally intercepts. */
1078 if (!fNestedPaging)
1079 {
1080 /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
1081 pVmcbCtrl0->u16InterceptRdCRx |= RT_BIT(3);
1082 pVmcbCtrl0->u16InterceptWrCRx |= RT_BIT(3);
1083
1084 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
1085 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG
1086 | SVM_CTRL_INTERCEPT_TASK_SWITCH;
1087
1088 /* Page faults must be intercepted to implement shadow paging. */
1089 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
1090 }
1091
1092 /* Workaround for missing OS/2 TLB flush, see ticketref:20625. */
1093 if (pVM->hm.s.fMissingOS2TlbFlushWorkaround)
1094 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TR_WRITES;
1095
1096 /* Setup Pause Filter for guest pause-loop (spinlock) exiting. */
1097 if (fUsePauseFilter)
1098 {
1099 Assert(pVM->hm.s.svm.cPauseFilter > 0);
1100 pVmcbCtrl0->u16PauseFilterCount = pVM->hm.s.svm.cPauseFilter;
1101 if (fPauseFilterThreshold)
1102 pVmcbCtrl0->u16PauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
1103 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_PAUSE;
1104 }
1105
1106 /*
1107 * Setup the MSR permission bitmap.
1108 * The following MSRs are saved/restored automatically during the world-switch.
1109 * Don't intercept guest read/write accesses to these MSRs.
1110 */
1111 uint8_t *pbMsrBitmap0 = (uint8_t *)pVCpu0->hmr0.s.svm.pvMsrBitmap;
1112 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1113 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1114 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1115 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1116 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1117 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1118 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1119 if (!pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
1120 {
1121 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1122 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1123 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1124 }
1125 else
1126 {
1127 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1128 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1129 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1130 }
1131 pVmcbCtrl0->u64MSRPMPhysAddr = pVCpu0->hmr0.s.svm.HCPhysMsrBitmap;
1132
1133 /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
1134 Assert(pVmcbCtrl0->u32VmcbCleanBits == 0);
1135
1136 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
1137 {
1138 PVMCPUCC pVCpuCur = VMCC_GET_CPU(pVM, idCpu);
1139 PSVMVMCB pVmcbCur = pVCpuCur->hmr0.s.svm.pVmcb;
1140 AssertMsgReturn(RT_VALID_PTR(pVmcbCur), ("Invalid pVmcb (%p) for vcpu[%u]\n", pVmcbCur, idCpu), VERR_SVM_INVALID_PVMCB);
1141 PSVMVMCBCTRL pVmcbCtrlCur = &pVmcbCur->ctrl;
1142
1143 /* Copy the VMCB control area. */
1144 memcpy(pVmcbCtrlCur, pVmcbCtrl0, sizeof(*pVmcbCtrlCur));
1145
1146 /* Copy the MSR bitmap and setup the VCPU-specific host physical address. */
1147 uint8_t *pbMsrBitmapCur = (uint8_t *)pVCpuCur->hmr0.s.svm.pvMsrBitmap;
1148 memcpy(pbMsrBitmapCur, pbMsrBitmap0, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
1149 pVmcbCtrlCur->u64MSRPMPhysAddr = pVCpuCur->hmr0.s.svm.HCPhysMsrBitmap;
1150
1151 /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
1152 Assert(pVmcbCtrlCur->u32VmcbCleanBits == 0);
1153
1154 /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs initially. */
1155 Assert(pVCpuCur->hm.s.fGIMTrapXcptUD == pVCpu0->hm.s.fGIMTrapXcptUD);
1156 }
1157
1158#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1159 LogRel(("HM: fUsePauseFilter=%RTbool fUseLbrVirt=%RTbool fUseVGif=%RTbool fUseVirtVmsaveVmload=%RTbool\n", fUsePauseFilter,
1160 fUseLbrVirt, fUseVGif, fUseVirtVmsaveVmload));
1161#else
1162 LogRel(("HM: fUsePauseFilter=%RTbool fUseLbrVirt=%RTbool\n", fUsePauseFilter, fUseLbrVirt));
1163#endif
1164 return VINF_SUCCESS;
1165}
1166
1167
1168/**
1169 * Gets a pointer to the currently active guest (or nested-guest) VMCB.
1170 *
1171 * @returns Pointer to the current context VMCB.
1172 * @param pVCpu The cross context virtual CPU structure.
1173 */
1174DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPUCC pVCpu)
1175{
1176#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1177 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1178 return &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
1179#endif
1180 return pVCpu->hmr0.s.svm.pVmcb;
1181}
1182
1183
1184/**
1185 * Gets a pointer to the nested-guest VMCB cache.
1186 *
1187 * @returns Pointer to the nested-guest VMCB cache.
1188 * @param pVCpu The cross context virtual CPU structure.
1189 */
1190DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPUCC pVCpu)
1191{
1192#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1193 Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
1194 return &pVCpu->hm.s.svm.NstGstVmcbCache;
1195#else
1196 RT_NOREF(pVCpu);
1197 return NULL;
1198#endif
1199}
1200
1201
1202/**
1203 * Invalidates a guest page by guest virtual address.
1204 *
1205 * @returns VBox status code.
1206 * @param pVCpu The cross context virtual CPU structure.
1207 * @param GCVirt Guest virtual address of the page to invalidate.
1208 */
1209VMMR0DECL(int) SVMR0InvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCVirt)
1210{
1211 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
1212
1213 bool const fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH) || pVCpu->CTX_SUFF(pVM)->hmr0.s.svm.fAlwaysFlushTLB;
1214
1215 /* Skip it if a TLB flush is already pending. */
1216 if (!fFlushPending)
1217 {
1218 Log4Func(("%#RGv\n", GCVirt));
1219
1220 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
1221 AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
1222
1223 SVMR0InvlpgA(GCVirt, pVmcb->ctrl.TLBCtrl.n.u32ASID);
1224 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1225 }
1226 return VINF_SUCCESS;
1227}
1228
1229
1230/**
1231 * Flushes the appropriate tagged-TLB entries.
1232 *
1233 * @param pHostCpu The HM physical-CPU structure.
1234 * @param pVCpu The cross context virtual CPU structure.
1235 * @param pVmcb Pointer to the VM control block.
1236 */
1237static void hmR0SvmFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1238{
1239 /*
1240 * Force a TLB flush for the first world switch if the current CPU differs from the one
1241 * we ran on last. This can happen both for start & resume due to long jumps back to
1242 * ring-3.
1243 *
1244 * We also force a TLB flush every time when executing a nested-guest VCPU as there is no
1245 * correlation between it and the physical CPU.
1246 *
1247 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while
1248 * flushing the TLB, so we cannot reuse the ASIDs without flushing.
1249 */
1250 bool fNewAsid = false;
1251 Assert(pHostCpu->idCpu != NIL_RTCPUID);
1252 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu
1253 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes
1254#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1255 || CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)
1256#endif
1257 )
1258 {
1259 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1260 pVCpu->hmr0.s.fForceTLBFlush = true;
1261 fNewAsid = true;
1262 }
1263
1264 /* Set TLB flush state as checked until we return from the world switch. */
1265 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
1266
1267 /* Check for explicit TLB flushes. */
1268 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1269 {
1270 pVCpu->hmr0.s.fForceTLBFlush = true;
1271 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1272 }
1273
1274 /*
1275 * If the AMD CPU erratum 170, We need to flush the entire TLB for each world switch. Sad.
1276 * This Host CPU requirement takes precedence.
1277 */
1278 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1279 if (pVM->hmr0.s.svm.fAlwaysFlushTLB)
1280 {
1281 pHostCpu->uCurrentAsid = 1;
1282 pVCpu->hmr0.s.uCurrentAsid = 1;
1283 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
1284 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
1285 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1286
1287 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
1288 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1289 }
1290 else
1291 {
1292 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
1293 if (pVCpu->hmr0.s.fForceTLBFlush)
1294 {
1295 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
1296 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1297
1298 if (fNewAsid)
1299 {
1300 ++pHostCpu->uCurrentAsid;
1301
1302 bool fHitASIDLimit = false;
1303 if (pHostCpu->uCurrentAsid >= g_uHmMaxAsid)
1304 {
1305 pHostCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */
1306 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new ASID. */
1307 fHitASIDLimit = true;
1308 }
1309
1310 if ( fHitASIDLimit
1311 || pHostCpu->fFlushAsidBeforeUse)
1312 {
1313 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1314 pHostCpu->fFlushAsidBeforeUse = false;
1315 }
1316
1317 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid;
1318 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
1319 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
1320 }
1321 else
1322 {
1323 if (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
1324 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
1325 else
1326 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1327 }
1328
1329 pVCpu->hmr0.s.fForceTLBFlush = false;
1330 }
1331 }
1332
1333 /* Update VMCB with the ASID. */
1334 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hmr0.s.uCurrentAsid)
1335 {
1336 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hmr0.s.uCurrentAsid;
1337 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
1338 }
1339
1340 AssertMsg(pVCpu->hmr0.s.idLastCpu == pHostCpu->idCpu,
1341 ("vcpu idLastCpu=%u hostcpu idCpu=%u\n", pVCpu->hmr0.s.idLastCpu, pHostCpu->idCpu));
1342 AssertMsg(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes,
1343 ("Flush count mismatch for cpu %u (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hmr0.s.cTlbFlushes, pHostCpu->cTlbFlushes));
1344 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < g_uHmMaxAsid,
1345 ("cpu%d uCurrentAsid = %x\n", pHostCpu->idCpu, pHostCpu->uCurrentAsid));
1346 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < g_uHmMaxAsid,
1347 ("cpu%d VM uCurrentAsid = %x\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid));
1348
1349#ifdef VBOX_WITH_STATISTICS
1350 if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
1351 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1352 else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
1353 || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
1354 {
1355 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1356 }
1357 else
1358 {
1359 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE);
1360 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushEntire);
1361 }
1362#endif
1363}
1364
1365
1366/**
1367 * Sets an exception intercept in the specified VMCB.
1368 *
1369 * @param pVmcb Pointer to the VM control block.
1370 * @param uXcpt The exception (X86_XCPT_*).
1371 */
1372DECLINLINE(void) hmR0SvmSetXcptIntercept(PSVMVMCB pVmcb, uint8_t uXcpt)
1373{
1374 if (!(pVmcb->ctrl.u32InterceptXcpt & RT_BIT(uXcpt)))
1375 {
1376 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(uXcpt);
1377 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1378 }
1379}
1380
1381
1382/**
1383 * Clears an exception intercept in the specified VMCB.
1384 *
1385 * @param pVCpu The cross context virtual CPU structure.
1386 * @param pVmcb Pointer to the VM control block.
1387 * @param uXcpt The exception (X86_XCPT_*).
1388 *
1389 * @remarks This takes into account if we're executing a nested-guest and only
1390 * removes the exception intercept if both the guest -and- nested-guest
1391 * are not intercepting it.
1392 */
1393DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPUCC pVCpu, PSVMVMCB pVmcb, uint8_t uXcpt)
1394{
1395 Assert(uXcpt != X86_XCPT_DB);
1396 Assert(uXcpt != X86_XCPT_AC);
1397 Assert(uXcpt != X86_XCPT_GP);
1398#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
1399 if (pVmcb->ctrl.u32InterceptXcpt & RT_BIT(uXcpt))
1400 {
1401 bool fRemove = true;
1402# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1403 /* Only remove the intercept if the nested-guest is also not intercepting it! */
1404 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1405 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1406 {
1407 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1408 fRemove = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(uXcpt));
1409 }
1410# else
1411 RT_NOREF(pVCpu);
1412# endif
1413 if (fRemove)
1414 {
1415 pVmcb->ctrl.u32InterceptXcpt &= ~RT_BIT(uXcpt);
1416 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1417 }
1418 }
1419#else
1420 RT_NOREF3(pVCpu, pVmcb, uXcpt);
1421#endif
1422}
1423
1424
1425/**
1426 * Sets a control intercept in the specified VMCB.
1427 *
1428 * @param pVmcb Pointer to the VM control block.
1429 * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*).
1430 */
1431DECLINLINE(void) hmR0SvmSetCtrlIntercept(PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
1432{
1433 if (!(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept))
1434 {
1435 pVmcb->ctrl.u64InterceptCtrl |= fCtrlIntercept;
1436 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1437 }
1438}
1439
1440
1441/**
1442 * Clears a control intercept in the specified VMCB.
1443 *
1444 * @returns @c true if the intercept is still set, @c false otherwise.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pVmcb Pointer to the VM control block.
1447 * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*).
1448 *
1449 * @remarks This takes into account if we're executing a nested-guest and only
1450 * removes the control intercept if both the guest -and- nested-guest
1451 * are not intercepting it.
1452 */
1453static bool hmR0SvmClearCtrlIntercept(PVMCPUCC pVCpu, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
1454{
1455 if (pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept)
1456 {
1457 bool fRemove = true;
1458#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1459 /* Only remove the control intercept if the nested-guest is also not intercepting it! */
1460 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1461 {
1462 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1463 fRemove = !(pVmcbNstGstCache->u64InterceptCtrl & fCtrlIntercept);
1464 }
1465#else
1466 RT_NOREF(pVCpu);
1467#endif
1468 if (fRemove)
1469 {
1470 pVmcb->ctrl.u64InterceptCtrl &= ~fCtrlIntercept;
1471 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1472 }
1473 }
1474
1475 return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept);
1476}
1477
1478
1479/**
1480 * Exports the guest (or nested-guest) CR0 into the VMCB.
1481 *
1482 * @param pVCpu The cross context virtual CPU structure.
1483 * @param pVmcb Pointer to the VM control block.
1484 *
1485 * @remarks This assumes we always pre-load the guest FPU.
1486 * @remarks No-long-jump zone!!!
1487 */
1488static void hmR0SvmExportGuestCR0(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1489{
1490 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1491
1492 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1493 uint64_t const uGuestCr0 = pCtx->cr0;
1494 uint64_t uShadowCr0 = uGuestCr0;
1495
1496 /* Always enable caching. */
1497 uShadowCr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1498
1499 /* When Nested Paging is not available use shadow page tables and intercept #PFs (latter done in SVMR0SetupVM()). */
1500 if (!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
1501 {
1502 uShadowCr0 |= X86_CR0_PG /* Use shadow page tables. */
1503 | X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
1504 }
1505
1506 /*
1507 * Use the #MF style of legacy-FPU error reporting for now. Although AMD-V has MSRs that
1508 * lets us isolate the host from it, IEM/REM still needs work to emulate it properly,
1509 * see @bugref{7243#c103}.
1510 */
1511 if (!(uGuestCr0 & X86_CR0_NE))
1512 {
1513 uShadowCr0 |= X86_CR0_NE;
1514 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_MF);
1515 }
1516 else
1517 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_MF);
1518
1519 /*
1520 * If the shadow and guest CR0 are identical we can avoid intercepting CR0 reads.
1521 *
1522 * CR0 writes still needs interception as PGM requires tracking paging mode changes,
1523 * see @bugref{6944}.
1524 *
1525 * We also don't ever want to honor weird things like cache disable from the guest.
1526 * However, we can avoid intercepting changes to the TS & MP bits by clearing the CR0
1527 * write intercept below and keeping SVM_CTRL_INTERCEPT_CR0_SEL_WRITE instead.
1528 */
1529 if (uShadowCr0 == uGuestCr0)
1530 {
1531 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1532 {
1533 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(0);
1534 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(0);
1535 Assert(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_CR0_SEL_WRITE);
1536 }
1537 else
1538 {
1539 /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */
1540 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1541 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(0))
1542 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(0));
1543 pVmcb->ctrl.u16InterceptWrCRx = (pVmcb->ctrl.u16InterceptWrCRx & ~RT_BIT(0))
1544 | (pVmcbNstGstCache->u16InterceptWrCRx & RT_BIT(0));
1545 }
1546 }
1547 else
1548 {
1549 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(0);
1550 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(0);
1551 }
1552 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1553
1554 Assert(!RT_HI_U32(uShadowCr0));
1555 if (pVmcb->guest.u64CR0 != uShadowCr0)
1556 {
1557 pVmcb->guest.u64CR0 = uShadowCr0;
1558 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1559 }
1560}
1561
1562
1563/**
1564 * Exports the guest (or nested-guest) CR3 into the VMCB.
1565 *
1566 * @param pVCpu The cross context virtual CPU structure.
1567 * @param pVmcb Pointer to the VM control block.
1568 *
1569 * @remarks No-long-jump zone!!!
1570 */
1571static void hmR0SvmExportGuestCR3(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1572{
1573 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1574
1575 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1576 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1577 if (pVM->hmr0.s.fNestedPaging)
1578 {
1579 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetHyperCR3(pVCpu);
1580 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1581 pVmcb->guest.u64CR3 = pCtx->cr3;
1582 Assert(pVmcb->ctrl.u64NestedPagingCR3);
1583 }
1584 else
1585 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
1586
1587 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1588}
1589
1590
1591/**
1592 * Exports the guest (or nested-guest) CR4 into the VMCB.
1593 *
1594 * @param pVCpu The cross context virtual CPU structure.
1595 * @param pVmcb Pointer to the VM control block.
1596 *
1597 * @remarks No-long-jump zone!!!
1598 */
1599static int hmR0SvmExportGuestCR4(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1600{
1601 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1602
1603 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1604 uint64_t uShadowCr4 = pCtx->cr4;
1605 if (!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
1606 {
1607 switch (pVCpu->hm.s.enmShadowMode)
1608 {
1609 case PGMMODE_REAL:
1610 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
1611 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1612
1613 case PGMMODE_32_BIT: /* 32-bit paging. */
1614 uShadowCr4 &= ~X86_CR4_PAE;
1615 break;
1616
1617 case PGMMODE_PAE: /* PAE paging. */
1618 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
1619 /** Must use PAE paging as we could use physical memory > 4 GB */
1620 uShadowCr4 |= X86_CR4_PAE;
1621 break;
1622
1623 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
1624 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
1625#ifdef VBOX_WITH_64_BITS_GUESTS
1626 break;
1627#else
1628 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1629#endif
1630
1631 default: /* shut up gcc */
1632 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1633 }
1634 }
1635
1636 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
1637 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
1638 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
1639 {
1640 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
1641 hmR0SvmUpdateVmRunFunction(pVCpu);
1642 }
1643
1644 /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */
1645 if (uShadowCr4 == pCtx->cr4)
1646 {
1647 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1648 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(4);
1649 else
1650 {
1651 /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */
1652 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1653 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(4))
1654 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(4));
1655 }
1656 }
1657 else
1658 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(4);
1659
1660 /* CR4 writes are always intercepted (both guest, nested-guest) for tracking PGM mode changes. */
1661 Assert(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(4));
1662
1663 /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */
1664 Assert(!RT_HI_U32(uShadowCr4));
1665 pVmcb->guest.u64CR4 = uShadowCr4;
1666 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS);
1667
1668 return VINF_SUCCESS;
1669}
1670
1671
1672/**
1673 * Exports the guest (or nested-guest) control registers into the VMCB.
1674 *
1675 * @returns VBox status code.
1676 * @param pVCpu The cross context virtual CPU structure.
1677 * @param pVmcb Pointer to the VM control block.
1678 *
1679 * @remarks No-long-jump zone!!!
1680 */
1681static int hmR0SvmExportGuestControlRegs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1682{
1683 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1684
1685 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR_MASK)
1686 {
1687 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR0)
1688 hmR0SvmExportGuestCR0(pVCpu, pVmcb);
1689
1690 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR2)
1691 {
1692 pVmcb->guest.u64CR2 = pVCpu->cpum.GstCtx.cr2;
1693 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
1694 }
1695
1696 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR3)
1697 hmR0SvmExportGuestCR3(pVCpu, pVmcb);
1698
1699 /* CR4 re-loading is ASSUMED to be done everytime we get in from ring-3! (XCR0) */
1700 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR4)
1701 {
1702 int rc = hmR0SvmExportGuestCR4(pVCpu, pVmcb);
1703 if (RT_FAILURE(rc))
1704 return rc;
1705 }
1706
1707 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_CR_MASK;
1708 }
1709 return VINF_SUCCESS;
1710}
1711
1712
1713/**
1714 * Exports the guest (or nested-guest) segment registers into the VMCB.
1715 *
1716 * @returns VBox status code.
1717 * @param pVCpu The cross context virtual CPU structure.
1718 * @param pVmcb Pointer to the VM control block.
1719 *
1720 * @remarks No-long-jump zone!!!
1721 */
1722static void hmR0SvmExportGuestSegmentRegs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1723{
1724 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1725 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1726
1727 /* Guest segment registers. */
1728 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SREG_MASK)
1729 {
1730 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CS)
1731 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, CS, cs);
1732
1733 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SS)
1734 {
1735 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, SS, ss);
1736 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
1737 }
1738
1739 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DS)
1740 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, DS, ds);
1741
1742 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_ES)
1743 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, ES, es);
1744
1745 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_FS)
1746 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs);
1747
1748 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GS)
1749 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs);
1750
1751 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
1752 }
1753
1754 /* Guest TR. */
1755 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_TR)
1756 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr);
1757
1758 /* Guest LDTR. */
1759 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_LDTR)
1760 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);
1761
1762 /* Guest GDTR. */
1763 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GDTR)
1764 {
1765 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
1766 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
1767 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1768 }
1769
1770 /* Guest IDTR. */
1771 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_IDTR)
1772 {
1773 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
1774 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
1775 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1776 }
1777
1778 pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SREG_MASK
1779 | HM_CHANGED_GUEST_TABLE_MASK);
1780}
1781
1782
1783/**
1784 * Exports the guest (or nested-guest) MSRs into the VMCB.
1785 *
1786 * @param pVCpu The cross context virtual CPU structure.
1787 * @param pVmcb Pointer to the VM control block.
1788 *
1789 * @remarks No-long-jump zone!!!
1790 */
1791static void hmR0SvmExportGuestMsrs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1792{
1793 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1794 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1795
1796 /* Guest Sysenter MSRs. */
1797 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
1798 {
1799 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
1800 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
1801
1802 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
1803 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
1804
1805 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
1806 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
1807 }
1808
1809 /*
1810 * Guest EFER MSR.
1811 * AMD-V requires guest EFER.SVME to be set. Weird.
1812 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
1813 */
1814 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_EFER_MSR)
1815 {
1816 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
1817 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1818 }
1819
1820 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit, otherwise SVM expects amd64 shadow paging. */
1821 if ( !CPUMIsGuestInLongModeEx(pCtx)
1822 && (pCtx->msrEFER & MSR_K6_EFER_LME))
1823 {
1824 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
1825 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1826 }
1827
1828 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSCALL_MSRS)
1829 {
1830 pVmcb->guest.u64STAR = pCtx->msrSTAR;
1831 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
1832 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
1833 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
1834 }
1835
1836 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_KERNEL_GS_BASE)
1837 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1838
1839 pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SYSENTER_MSR_MASK
1840 | HM_CHANGED_GUEST_EFER_MSR
1841 | HM_CHANGED_GUEST_SYSCALL_MSRS
1842 | HM_CHANGED_GUEST_KERNEL_GS_BASE);
1843
1844 /*
1845 * Setup the PAT MSR (applicable for Nested Paging only).
1846 *
1847 * The default value should be MSR_IA32_CR_PAT_INIT_VAL, but we treat all guest memory
1848 * as WB, so choose type 6 for all PAT slots, see @bugref{9634}.
1849 *
1850 * While guests can modify and see the modified values through the shadow values,
1851 * we shall not honor any guest modifications of this MSR to ensure caching is always
1852 * enabled similar to how we clear CR0.CD and NW bits.
1853 *
1854 * For nested-guests this needs to always be set as well, see @bugref{7243#c109}.
1855 */
1856 pVmcb->guest.u64PAT = UINT64_C(0x0006060606060606);
1857
1858 /* Enable the last branch record bit if LBR virtualization is enabled. */
1859 if (pVmcb->ctrl.LbrVirt.n.u1LbrVirt)
1860 pVmcb->guest.u64DBGCTL = MSR_IA32_DEBUGCTL_LBR;
1861}
1862
1863
1864/**
1865 * Exports the guest (or nested-guest) debug state into the VMCB and programs
1866 * the necessary intercepts accordingly.
1867 *
1868 * @param pVCpu The cross context virtual CPU structure.
1869 * @param pVmcb Pointer to the VM control block.
1870 *
1871 * @remarks No-long-jump zone!!!
1872 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
1873 */
1874static void hmR0SvmExportSharedDebugState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1875{
1876 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1877
1878 /** @todo Figure out stepping with nested-guest. */
1879 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1880 {
1881 /*
1882 * We don't want to always intercept DRx read/writes for nested-guests as it causes
1883 * problems when the nested hypervisor isn't intercepting them, see @bugref{10080}.
1884 * Instead, they are strictly only requested when the nested hypervisor intercepts
1885 * them -- handled while merging VMCB controls.
1886 *
1887 * If neither the outer nor the nested-hypervisor is intercepting DRx read/writes,
1888 * then the nested-guest debug state should be actively loaded on the host so that
1889 * nested-guest reads/writes its own debug registers without causing VM-exits.
1890 */
1891 if ( ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
1892 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
1893 && !CPUMIsGuestDebugStateActive(pVCpu))
1894 {
1895 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
1896 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1897 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1898 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1899 }
1900
1901 pVmcb->guest.u64DR6 = pCtx->dr[6];
1902 pVmcb->guest.u64DR7 = pCtx->dr[7];
1903 return;
1904 }
1905
1906 /*
1907 * Anyone single stepping on the host side? If so, we'll have to use the
1908 * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
1909 * the VMM level like the VT-x implementations does.
1910 */
1911 bool fInterceptMovDRx = false;
1912 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
1913 if (fStepping)
1914 {
1915 pVCpu->hmr0.s.fClearTrapFlag = true;
1916 pVmcb->guest.u64RFlags |= X86_EFL_TF;
1917 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
1918 }
1919
1920 if ( fStepping
1921 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
1922 {
1923 /*
1924 * Use the combined guest and host DRx values found in the hypervisor
1925 * register set because the debugger has breakpoints active or someone
1926 * is single stepping on the host side.
1927 *
1928 * Note! DBGF expects a clean DR6 state before executing guest code.
1929 */
1930 if (!CPUMIsHyperDebugStateActive(pVCpu))
1931 {
1932 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1933 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1934 Assert(CPUMIsHyperDebugStateActive(pVCpu));
1935 }
1936
1937 /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
1938 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
1939 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu))
1940 {
1941 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
1942 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
1943 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1944 }
1945
1946 /** @todo If we cared, we could optimize to allow the guest to read registers
1947 * with the same values. */
1948 fInterceptMovDRx = true;
1949 pVCpu->hmr0.s.fUsingHyperDR7 = true;
1950 Log5(("hmR0SvmExportSharedDebugState: Loaded hyper DRx\n"));
1951 }
1952 else
1953 {
1954 /*
1955 * Update DR6, DR7 with the guest values if necessary.
1956 */
1957 if ( pVmcb->guest.u64DR7 != pCtx->dr[7]
1958 || pVmcb->guest.u64DR6 != pCtx->dr[6])
1959 {
1960 pVmcb->guest.u64DR7 = pCtx->dr[7];
1961 pVmcb->guest.u64DR6 = pCtx->dr[6];
1962 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1963 }
1964 pVCpu->hmr0.s.fUsingHyperDR7 = false;
1965
1966 /*
1967 * If the guest has enabled debug registers, we need to load them prior to
1968 * executing guest code so they'll trigger at the right time.
1969 */
1970 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
1971 {
1972 if (!CPUMIsGuestDebugStateActive(pVCpu))
1973 {
1974 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
1975 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1976 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1977 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1978 }
1979 Log5(("hmR0SvmExportSharedDebugState: Loaded guest DRx\n"));
1980 }
1981 /*
1982 * If no debugging enabled, we'll lazy load DR0-3. We don't need to
1983 * intercept #DB as DR6 is updated in the VMCB.
1984 *
1985 * Note! If we cared and dared, we could skip intercepting \#DB here.
1986 * However, \#DB shouldn't be performance critical, so we'll play safe
1987 * and keep the code similar to the VT-x code and always intercept it.
1988 */
1989 else if (!CPUMIsGuestDebugStateActive(pVCpu))
1990 fInterceptMovDRx = true;
1991 }
1992
1993 Assert(pVmcb->ctrl.u32InterceptXcpt & RT_BIT_32(X86_XCPT_DB));
1994 if (fInterceptMovDRx)
1995 {
1996 if ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
1997 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
1998 {
1999 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
2000 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
2001 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2002 }
2003 }
2004 else
2005 {
2006 if ( pVmcb->ctrl.u16InterceptRdDRx
2007 || pVmcb->ctrl.u16InterceptWrDRx)
2008 {
2009 pVmcb->ctrl.u16InterceptRdDRx = 0;
2010 pVmcb->ctrl.u16InterceptWrDRx = 0;
2011 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2012 }
2013 }
2014 Log4Func(("DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7]));
2015}
2016
2017/**
2018 * Exports the hardware virtualization state into the nested-guest
2019 * VMCB.
2020 *
2021 * @param pVCpu The cross context virtual CPU structure.
2022 * @param pVmcb Pointer to the VM control block.
2023 *
2024 * @remarks No-long-jump zone!!!
2025 */
2026static void hmR0SvmExportGuestHwvirtState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2027{
2028 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2029
2030 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_HWVIRT)
2031 {
2032 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
2033 {
2034 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2035 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2036
2037 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); /* Nested VGIF is not supported yet. */
2038 Assert(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF); /* Physical hardware supports VGIF. */
2039 Assert(HMIsSvmVGifActive(pVM)); /* Outer VM has enabled VGIF. */
2040 NOREF(pVM);
2041
2042 pVmcb->ctrl.IntCtrl.n.u1VGif = CPUMGetGuestGif(pCtx);
2043 }
2044
2045 /*
2046 * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp.
2047 * since SVM doesn't have a preemption timer.
2048 *
2049 * We do this here rather than in hmR0SvmSetupVmcbNested() as we may have been executing the
2050 * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters
2051 * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between.
2052 */
2053 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2054 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
2055 uint16_t const uGuestPauseFilterCount = pVM->hm.s.svm.cPauseFilter;
2056 uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
2057 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_PAUSE))
2058 {
2059 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2060 pVmcbCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount);
2061 pVmcbCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold);
2062 }
2063 else
2064 {
2065 /** @todo r=ramshankar: We can turn these assignments into assertions. */
2066 pVmcbCtrl->u16PauseFilterCount = uGuestPauseFilterCount;
2067 pVmcbCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold;
2068 }
2069 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2070
2071 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_HWVIRT;
2072 }
2073}
2074
2075
2076/**
2077 * Exports the guest APIC TPR state into the VMCB.
2078 *
2079 * @returns VBox status code.
2080 * @param pVCpu The cross context virtual CPU structure.
2081 * @param pVmcb Pointer to the VM control block.
2082 */
2083static int hmR0SvmExportGuestApicTpr(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2084{
2085 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2086
2087 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
2088 {
2089 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2090 if ( PDMHasApic(pVM)
2091 && APICIsEnabled(pVCpu))
2092 {
2093 bool fPendingIntr;
2094 uint8_t u8Tpr;
2095 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
2096 AssertRCReturn(rc, rc);
2097
2098 /* Assume that we need to trap all TPR accesses and thus need not check on
2099 every #VMEXIT if we should update the TPR. */
2100 Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking);
2101 pVCpu->hmr0.s.svm.fSyncVTpr = false;
2102
2103 if (!pVM->hm.s.fTprPatchingActive)
2104 {
2105 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2106 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
2107
2108 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we
2109 can deliver the interrupt to the guest. */
2110 if (fPendingIntr)
2111 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
2112 else
2113 {
2114 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
2115 pVCpu->hmr0.s.svm.fSyncVTpr = true;
2116 }
2117
2118 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL);
2119 }
2120 else
2121 {
2122 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
2123 pVmcb->guest.u64LSTAR = u8Tpr;
2124 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hmr0.s.svm.pvMsrBitmap;
2125
2126 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
2127 if (fPendingIntr)
2128 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
2129 else
2130 {
2131 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
2132 pVCpu->hmr0.s.svm.fSyncVTpr = true;
2133 }
2134 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
2135 }
2136 }
2137 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
2138 }
2139 return VINF_SUCCESS;
2140}
2141
2142
2143/**
2144 * Sets up the exception interrupts required for guest execution in the VMCB.
2145 *
2146 * @param pVCpu The cross context virtual CPU structure.
2147 * @param pVmcb Pointer to the VM control block.
2148 *
2149 * @remarks No-long-jump zone!!!
2150 */
2151static void hmR0SvmExportGuestXcptIntercepts(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2152{
2153 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2154
2155 /* If we modify intercepts from here, please check & adjust hmR0SvmMergeVmcbCtrlsNested() if required. */
2156 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_SVM_XCPT_INTERCEPTS)
2157 {
2158 /* Trap #UD for GIM provider (e.g. for hypercalls). */
2159 if (pVCpu->hm.s.fGIMTrapXcptUD || pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
2160 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_UD);
2161 else
2162 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_UD);
2163
2164 /* Trap #BP for INT3 debug breakpoints set by the VM debugger. */
2165 if (pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
2166 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_BP);
2167 else
2168 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_BP);
2169
2170 /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmExportGuestCR0(). */
2171 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_SVM_XCPT_INTERCEPTS);
2172 }
2173}
2174
2175
2176#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2177/**
2178 * Merges guest and nested-guest intercepts for executing the nested-guest using
2179 * hardware-assisted SVM.
2180 *
2181 * This merges the guest and nested-guest intercepts in a way that if the outer
2182 * guest intercept is set we need to intercept it in the nested-guest as
2183 * well.
2184 *
2185 * @param pVCpu The cross context virtual CPU structure.
2186 * @param pVmcbNstGst Pointer to the nested-guest VM control block.
2187 */
2188static void hmR0SvmMergeVmcbCtrlsNested(PVMCPUCC pVCpu)
2189{
2190 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2191 PCSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
2192 PSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
2193 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2194
2195 /* Merge the guest's CR intercepts into the nested-guest VMCB. */
2196 pVmcbNstGstCtrl->u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
2197 pVmcbNstGstCtrl->u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
2198
2199 /* Always intercept CR4 writes for tracking PGM mode changes. */
2200 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(4);
2201
2202 /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */
2203 if (!pVM->hmr0.s.fNestedPaging)
2204 {
2205 pVmcbNstGstCtrl->u16InterceptRdCRx |= RT_BIT(3);
2206 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(3);
2207 }
2208
2209 /* Merge the guest's DR intercepts into the nested-guest VMCB. */
2210 pVmcbNstGstCtrl->u16InterceptRdDRx |= pVmcb->ctrl.u16InterceptRdDRx;
2211 pVmcbNstGstCtrl->u16InterceptWrDRx |= pVmcb->ctrl.u16InterceptWrDRx;
2212
2213 /*
2214 * Merge the guest's exception intercepts into the nested-guest VMCB.
2215 *
2216 * - #UD: Exclude these as the outer guest's GIM hypercalls are not applicable
2217 * while executing the nested-guest.
2218 *
2219 * - #BP: Exclude breakpoints set by the VM debugger for the outer guest. This can
2220 * be tweaked later depending on how we wish to implement breakpoints.
2221 *
2222 * - #GP: Exclude these as it's the inner VMMs problem to get vmsvga 3d drivers
2223 * loaded into their guests, not ours.
2224 *
2225 * Warning!! This ASSUMES we only intercept \#UD for hypercall purposes and \#BP
2226 * for VM debugger breakpoints, see hmR0SvmExportGuestXcptIntercepts().
2227 */
2228#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
2229 pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt
2230 & ~( RT_BIT(X86_XCPT_UD)
2231 | RT_BIT(X86_XCPT_BP)
2232 | (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv ? RT_BIT(X86_XCPT_GP) : 0));
2233#else
2234 pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt;
2235#endif
2236
2237 /*
2238 * Adjust intercepts while executing the nested-guest that differ from the
2239 * outer guest intercepts.
2240 *
2241 * - VINTR: Exclude the outer guest intercept as we don't need to cause VINTR #VMEXITs
2242 * that belong to the nested-guest to the outer guest.
2243 *
2244 * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by
2245 * the nested-guest, the physical CPU raises a \#UD exception as expected.
2246 */
2247 pVmcbNstGstCtrl->u64InterceptCtrl |= (pVmcb->ctrl.u64InterceptCtrl & ~( SVM_CTRL_INTERCEPT_VINTR
2248 | SVM_CTRL_INTERCEPT_VMMCALL))
2249 | HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;
2250
2251 Assert( (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
2252 == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
2253
2254 /* Finally, update the VMCB clean bits. */
2255 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2256}
2257#endif
2258
2259
2260/**
2261 * Enters the AMD-V session.
2262 *
2263 * @returns VBox status code.
2264 * @param pVCpu The cross context virtual CPU structure.
2265 */
2266VMMR0DECL(int) SVMR0Enter(PVMCPUCC pVCpu)
2267{
2268 AssertPtr(pVCpu);
2269 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
2270 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2271
2272 LogFlowFunc(("pVCpu=%p\n", pVCpu));
2273 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
2274 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
2275
2276 pVCpu->hmr0.s.fLeaveDone = false;
2277 return VINF_SUCCESS;
2278}
2279
2280
2281/**
2282 * Thread-context callback for AMD-V.
2283 *
2284 * This is used together with RTThreadCtxHookCreate() on platforms which
2285 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
2286 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
2287 *
2288 * @param enmEvent The thread-context event.
2289 * @param pVCpu The cross context virtual CPU structure.
2290 * @param fGlobalInit Whether global VT-x/AMD-V init. is used.
2291 * @thread EMT(pVCpu)
2292 */
2293VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit)
2294{
2295 NOREF(fGlobalInit);
2296
2297 switch (enmEvent)
2298 {
2299 case RTTHREADCTXEVENT_OUT:
2300 {
2301 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2302 VMCPU_ASSERT_EMT(pVCpu);
2303
2304 /* No longjmps (log-flush, locks) in this fragile context. */
2305 VMMRZCallRing3Disable(pVCpu);
2306
2307 if (!pVCpu->hmr0.s.fLeaveDone)
2308 {
2309 hmR0SvmLeave(pVCpu, false /* fImportState */);
2310 pVCpu->hmr0.s.fLeaveDone = true;
2311 }
2312
2313 /* Leave HM context, takes care of local init (term). */
2314 int rc = HMR0LeaveCpu(pVCpu);
2315 AssertRC(rc); NOREF(rc);
2316
2317 /* Restore longjmp state. */
2318 VMMRZCallRing3Enable(pVCpu);
2319 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
2320 break;
2321 }
2322
2323 case RTTHREADCTXEVENT_IN:
2324 {
2325 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2326 VMCPU_ASSERT_EMT(pVCpu);
2327
2328 /* No longjmps (log-flush, locks) in this fragile context. */
2329 VMMRZCallRing3Disable(pVCpu);
2330
2331 /*
2332 * Initialize the bare minimum state required for HM. This takes care of
2333 * initializing AMD-V if necessary (onlined CPUs, local init etc.)
2334 */
2335 int rc = hmR0EnterCpu(pVCpu);
2336 AssertRC(rc); NOREF(rc);
2337 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
2338 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
2339
2340 pVCpu->hmr0.s.fLeaveDone = false;
2341
2342 /* Restore longjmp state. */
2343 VMMRZCallRing3Enable(pVCpu);
2344 break;
2345 }
2346
2347 default:
2348 break;
2349 }
2350}
2351
2352
2353/**
2354 * Saves the host state.
2355 *
2356 * @returns VBox status code.
2357 * @param pVCpu The cross context virtual CPU structure.
2358 *
2359 * @remarks No-long-jump zone!!!
2360 */
2361VMMR0DECL(int) SVMR0ExportHostState(PVMCPUCC pVCpu)
2362{
2363 NOREF(pVCpu);
2364
2365 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
2366 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_HOST_CONTEXT);
2367 return VINF_SUCCESS;
2368}
2369
2370
2371/**
2372 * Exports the guest or nested-guest state from the virtual-CPU context into the
2373 * VMCB.
2374 *
2375 * Also sets up the appropriate VMRUN function to execute guest or nested-guest
2376 * code based on the virtual-CPU mode.
2377 *
2378 * @returns VBox status code.
2379 * @param pVCpu The cross context virtual CPU structure.
2380 * @param pSvmTransient Pointer to the SVM-transient structure.
2381 *
2382 * @remarks No-long-jump zone!!!
2383 */
2384static int hmR0SvmExportGuestState(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient)
2385{
2386 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
2387
2388 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
2389 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2390 Assert(pVmcb);
2391
2392 pVmcb->guest.u64RIP = pCtx->rip;
2393 pVmcb->guest.u64RSP = pCtx->rsp;
2394 pVmcb->guest.u64RFlags = pCtx->eflags.u32;
2395 pVmcb->guest.u64RAX = pCtx->rax;
2396
2397 bool const fIsNestedGuest = pSvmTransient->fIsNestedGuest;
2398 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
2399
2400 int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcb);
2401 AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc);
2402 hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcb);
2403 hmR0SvmExportGuestMsrs(pVCpu, pVmcb);
2404 hmR0SvmExportGuestHwvirtState(pVCpu, pVmcb);
2405
2406 ASMSetFlags(fEFlags);
2407
2408 if (!fIsNestedGuest)
2409 {
2410 /* hmR0SvmExportGuestApicTpr() must be called -after- hmR0SvmExportGuestMsrs() as we
2411 otherwise we would overwrite the LSTAR MSR that we use for TPR patching. */
2412 hmR0SvmExportGuestApicTpr(pVCpu, pVmcb);
2413 hmR0SvmExportGuestXcptIntercepts(pVCpu, pVmcb);
2414 }
2415
2416 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
2417 uint64_t fUnusedMask = HM_CHANGED_GUEST_RIP
2418 | HM_CHANGED_GUEST_RFLAGS
2419 | HM_CHANGED_GUEST_GPRS_MASK
2420 | HM_CHANGED_GUEST_X87
2421 | HM_CHANGED_GUEST_SSE_AVX
2422 | HM_CHANGED_GUEST_OTHER_XSAVE
2423 | HM_CHANGED_GUEST_XCRx
2424 | HM_CHANGED_GUEST_TSC_AUX
2425 | HM_CHANGED_GUEST_OTHER_MSRS;
2426 if (fIsNestedGuest)
2427 fUnusedMask |= HM_CHANGED_SVM_XCPT_INTERCEPTS
2428 | HM_CHANGED_GUEST_APIC_TPR;
2429
2430 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( fUnusedMask
2431 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_MASK)));
2432
2433#ifdef VBOX_STRICT
2434 /*
2435 * All of the guest-CPU state and SVM keeper bits should be exported here by now,
2436 * except for the host-context and/or shared host-guest context bits.
2437 */
2438 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
2439 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)),
2440 ("fCtxChanged=%#RX64\n", fCtxChanged));
2441
2442 /*
2443 * If we need to log state that isn't always imported, we'll need to import them here.
2444 * See hmR0SvmPostRunGuest() for which part of the state is imported uncondtionally.
2445 */
2446 hmR0SvmLogState(pVCpu, pVmcb, "hmR0SvmExportGuestState", 0 /* fFlags */, 0 /* uVerbose */);
2447#endif
2448
2449 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
2450 return VINF_SUCCESS;
2451}
2452
2453#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2454
2455/**
2456 * Merges the guest and nested-guest MSR permission bitmap.
2457 *
2458 * If the guest is intercepting an MSR we need to intercept it regardless of
2459 * whether the nested-guest is intercepting it or not.
2460 *
2461 * @param pHostCpu The HM physical-CPU structure.
2462 * @param pVCpu The cross context virtual CPU structure.
2463 *
2464 * @remarks No-long-jmp zone!!!
2465 */
2466DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu)
2467{
2468 uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hmr0.s.svm.pvMsrBitmap;
2469 uint64_t const *pu64NstGstMsrpm = (uint64_t const *)&pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[0];
2470 uint64_t *pu64DstMsrpm = (uint64_t *)pHostCpu->n.svm.pvNstGstMsrpm;
2471
2472 /* MSRPM bytes from offset 0x1800 are reserved, so we stop merging there. */
2473 uint32_t const offRsvdQwords = 0x1800 >> 3;
2474 for (uint32_t i = 0; i < offRsvdQwords; i++)
2475 pu64DstMsrpm[i] = pu64NstGstMsrpm[i] | pu64GstMsrpm[i];
2476}
2477
2478
2479/**
2480 * Caches the nested-guest VMCB fields before we modify them for execution using
2481 * hardware-assisted SVM.
2482 *
2483 * @returns true if the VMCB was previously already cached, false otherwise.
2484 * @param pVCpu The cross context virtual CPU structure.
2485 *
2486 * @sa HMNotifySvmNstGstVmexit.
2487 */
2488static bool hmR0SvmCacheVmcbNested(PVMCPUCC pVCpu)
2489{
2490 /*
2491 * Cache the nested-guest programmed VMCB fields if we have not cached it yet.
2492 * Otherwise we risk re-caching the values we may have modified, see @bugref{7243#c44}.
2493 *
2494 * Nested-paging CR3 is not saved back into the VMCB on #VMEXIT, hence no need to
2495 * cache and restore it, see AMD spec. 15.25.4 "Nested Paging and VMRUN/#VMEXIT".
2496 */
2497 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
2498 bool const fWasCached = pVmcbNstGstCache->fCacheValid;
2499 if (!fWasCached)
2500 {
2501 PCSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
2502 PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2503 pVmcbNstGstCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
2504 pVmcbNstGstCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
2505 pVmcbNstGstCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
2506 pVmcbNstGstCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
2507 pVmcbNstGstCache->u16PauseFilterThreshold = pVmcbNstGstCtrl->u16PauseFilterThreshold;
2508 pVmcbNstGstCache->u16PauseFilterCount = pVmcbNstGstCtrl->u16PauseFilterCount;
2509 pVmcbNstGstCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt;
2510 pVmcbNstGstCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl;
2511 pVmcbNstGstCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset;
2512 pVmcbNstGstCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
2513 pVmcbNstGstCache->fNestedPaging = pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging;
2514 pVmcbNstGstCache->fLbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
2515 pVmcbNstGstCache->fCacheValid = true;
2516 Log4Func(("Cached VMCB fields\n"));
2517 }
2518
2519 return fWasCached;
2520}
2521
2522
2523/**
2524 * Sets up the nested-guest VMCB for execution using hardware-assisted SVM.
2525 *
2526 * This is done the first time we enter nested-guest execution using SVM R0
2527 * until the nested-guest \#VMEXIT (not to be confused with physical CPU
2528 * \#VMEXITs which may or may not cause a corresponding nested-guest \#VMEXIT).
2529 *
2530 * @param pVCpu The cross context virtual CPU structure.
2531 */
2532static void hmR0SvmSetupVmcbNested(PVMCPUCC pVCpu)
2533{
2534 PSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
2535 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2536
2537 HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2538
2539 /*
2540 * First cache the nested-guest VMCB fields we may potentially modify.
2541 */
2542 bool const fVmcbCached = hmR0SvmCacheVmcbNested(pVCpu);
2543 if (!fVmcbCached)
2544 {
2545 /*
2546 * The IOPM of the nested-guest can be ignored because the the guest always
2547 * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
2548 * than the nested-guest IOPM and swap the field back on the #VMEXIT.
2549 */
2550 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
2551
2552 /*
2553 * Use the same nested-paging as the outer guest. We can't dynamically switch off
2554 * nested-paging suddenly while executing a VM (see assertion at the end of
2555 * Trap0eHandler() in PGMAllBth.h).
2556 */
2557 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging;
2558
2559 /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */
2560 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = 1;
2561
2562 /*
2563 * Turn off TPR syncing on #VMEXIT for nested-guests as CR8 intercepts are subject
2564 * to the nested-guest intercepts and we always run with V_INTR_MASKING.
2565 */
2566 pVCpu->hmr0.s.svm.fSyncVTpr = false;
2567
2568# ifdef DEBUG_ramshankar
2569 /* For debugging purposes - copy the LBR info. from outer guest VMCB. */
2570 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt;
2571# endif
2572
2573 /*
2574 * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we
2575 * need to intercept VMSAVE/VMLOAD instructions executed by the nested-guest.
2576 */
2577 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
2578 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
2579 | SVM_CTRL_INTERCEPT_VMLOAD;
2580
2581 /*
2582 * If we don't expose Virtual GIF feature to the outer guest, we need to intercept
2583 * CLGI/STGI instructions executed by the nested-guest.
2584 */
2585 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif)
2586 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
2587 | SVM_CTRL_INTERCEPT_STGI;
2588
2589 /* Merge the guest and nested-guest intercepts. */
2590 hmR0SvmMergeVmcbCtrlsNested(pVCpu);
2591
2592 /* Update the VMCB clean bits. */
2593 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2594 }
2595 else
2596 {
2597 Assert(!pVCpu->hmr0.s.svm.fSyncVTpr);
2598 Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap);
2599 Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
2600 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPagingCfg == pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
2601 }
2602}
2603
2604#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
2605
2606/**
2607 * Exports the state shared between the host and guest (or nested-guest) into
2608 * the VMCB.
2609 *
2610 * @param pVCpu The cross context virtual CPU structure.
2611 * @param pVmcb Pointer to the VM control block.
2612 *
2613 * @remarks No-long-jump zone!!!
2614 */
2615static void hmR0SvmExportSharedState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2616{
2617 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2618 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2619
2620 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
2621 hmR0SvmExportSharedDebugState(pVCpu, pVmcb);
2622
2623 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
2624 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE),
2625 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
2626}
2627
2628
2629/**
2630 * Worker for SVMR0ImportStateOnDemand.
2631 *
2632 * @param pVCpu The cross context virtual CPU structure.
2633 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2634 */
2635static void hmR0SvmImportGuestState(PVMCPUCC pVCpu, uint64_t fWhat)
2636{
2637 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
2638
2639 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2640 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
2641 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
2642 PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
2643
2644 /*
2645 * We disable interrupts to make the updating of the state and in particular
2646 * the fExtrn modification atomic wrt to preemption hooks.
2647 */
2648 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
2649
2650 fWhat &= pCtx->fExtrn;
2651 if (fWhat)
2652 {
2653#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2654 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
2655 {
2656 if (pVmcbCtrl->IntCtrl.n.u1VGifEnable)
2657 {
2658 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); /* We don't yet support passing VGIF feature to the guest. */
2659 Assert(HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM))); /* VM has configured it. */
2660 CPUMSetGuestGif(pCtx, pVmcbCtrl->IntCtrl.n.u1VGif);
2661 }
2662 }
2663
2664 if (fWhat & CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)
2665 {
2666 if ( !pVmcbCtrl->IntCtrl.n.u1VIrqPending
2667 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
2668 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2669 }
2670#endif
2671
2672 if (fWhat & CPUMCTX_EXTRN_INHIBIT_INT)
2673 {
2674 if (pVmcbCtrl->IntShadow.n.u1IntShadow)
2675 EMSetInhibitInterruptsPC(pVCpu, pVmcbGuest->u64RIP);
2676 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2677 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2678 }
2679
2680 if (fWhat & CPUMCTX_EXTRN_RIP)
2681 pCtx->rip = pVmcbGuest->u64RIP;
2682
2683 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
2684 pCtx->eflags.u32 = pVmcbGuest->u64RFlags;
2685
2686 if (fWhat & CPUMCTX_EXTRN_RSP)
2687 pCtx->rsp = pVmcbGuest->u64RSP;
2688
2689 if (fWhat & CPUMCTX_EXTRN_RAX)
2690 pCtx->rax = pVmcbGuest->u64RAX;
2691
2692 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
2693 {
2694 if (fWhat & CPUMCTX_EXTRN_CS)
2695 {
2696 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, CS, cs);
2697 /* Correct the CS granularity bit. Haven't seen it being wrong in any other register (yet). */
2698 /** @todo SELM might need to be fixed as it too should not care about the
2699 * granularity bit. See @bugref{6785}. */
2700 if ( !pCtx->cs.Attr.n.u1Granularity
2701 && pCtx->cs.Attr.n.u1Present
2702 && pCtx->cs.u32Limit > UINT32_C(0xfffff))
2703 {
2704 Assert((pCtx->cs.u32Limit & 0xfff) == 0xfff);
2705 pCtx->cs.Attr.n.u1Granularity = 1;
2706 }
2707 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, cs);
2708 }
2709 if (fWhat & CPUMCTX_EXTRN_SS)
2710 {
2711 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, SS, ss);
2712 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ss);
2713 /*
2714 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the
2715 * VMCB and uses that and thus it's possible that when the CPL changes during
2716 * guest execution that the SS DPL isn't updated by AMD-V. Observed on some
2717 * AMD Fusion CPUs with 64-bit guests.
2718 *
2719 * See AMD spec. 15.5.1 "Basic operation".
2720 */
2721 Assert(!(pVmcbGuest->u8CPL & ~0x3));
2722 uint8_t const uCpl = pVmcbGuest->u8CPL;
2723 if (pCtx->ss.Attr.n.u2Dpl != uCpl)
2724 pCtx->ss.Attr.n.u2Dpl = uCpl & 0x3;
2725 }
2726 if (fWhat & CPUMCTX_EXTRN_DS)
2727 {
2728 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, DS, ds);
2729 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ds);
2730 }
2731 if (fWhat & CPUMCTX_EXTRN_ES)
2732 {
2733 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, ES, es);
2734 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, es);
2735 }
2736 if (fWhat & CPUMCTX_EXTRN_FS)
2737 {
2738 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, FS, fs);
2739 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, fs);
2740 }
2741 if (fWhat & CPUMCTX_EXTRN_GS)
2742 {
2743 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, GS, gs);
2744 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, gs);
2745 }
2746 }
2747
2748 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
2749 {
2750 if (fWhat & CPUMCTX_EXTRN_TR)
2751 {
2752 /*
2753 * Fixup TR attributes so it's compatible with Intel. Important when saved-states
2754 * are used between Intel and AMD, see @bugref{6208#c39}.
2755 * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode.
2756 */
2757 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, TR, tr);
2758 if (pCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
2759 {
2760 if ( pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2761 || CPUMIsGuestInLongModeEx(pCtx))
2762 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2763 else if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
2764 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
2765 }
2766 }
2767
2768 if (fWhat & CPUMCTX_EXTRN_LDTR)
2769 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, LDTR, ldtr);
2770
2771 if (fWhat & CPUMCTX_EXTRN_GDTR)
2772 {
2773 pCtx->gdtr.cbGdt = pVmcbGuest->GDTR.u32Limit;
2774 pCtx->gdtr.pGdt = pVmcbGuest->GDTR.u64Base;
2775 }
2776
2777 if (fWhat & CPUMCTX_EXTRN_IDTR)
2778 {
2779 pCtx->idtr.cbIdt = pVmcbGuest->IDTR.u32Limit;
2780 pCtx->idtr.pIdt = pVmcbGuest->IDTR.u64Base;
2781 }
2782 }
2783
2784 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2785 {
2786 pCtx->msrSTAR = pVmcbGuest->u64STAR;
2787 pCtx->msrLSTAR = pVmcbGuest->u64LSTAR;
2788 pCtx->msrCSTAR = pVmcbGuest->u64CSTAR;
2789 pCtx->msrSFMASK = pVmcbGuest->u64SFMASK;
2790 }
2791
2792 if ( (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2793 && !pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit /* Intercepted. AMD-V would clear the high 32 bits of EIP & ESP. */)
2794 {
2795 pCtx->SysEnter.cs = pVmcbGuest->u64SysEnterCS;
2796 pCtx->SysEnter.eip = pVmcbGuest->u64SysEnterEIP;
2797 pCtx->SysEnter.esp = pVmcbGuest->u64SysEnterESP;
2798 }
2799
2800 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
2801 pCtx->msrKERNELGSBASE = pVmcbGuest->u64KernelGSBase;
2802
2803 if (fWhat & CPUMCTX_EXTRN_DR_MASK)
2804 {
2805 if (fWhat & CPUMCTX_EXTRN_DR6)
2806 {
2807 if (!pVCpu->hmr0.s.fUsingHyperDR7)
2808 pCtx->dr[6] = pVmcbGuest->u64DR6;
2809 else
2810 CPUMSetHyperDR6(pVCpu, pVmcbGuest->u64DR6);
2811 }
2812
2813 if (fWhat & CPUMCTX_EXTRN_DR7)
2814 {
2815 if (!pVCpu->hmr0.s.fUsingHyperDR7)
2816 pCtx->dr[7] = pVmcbGuest->u64DR7;
2817 else
2818 Assert(pVmcbGuest->u64DR7 == CPUMGetHyperDR7(pVCpu));
2819 }
2820 }
2821
2822 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
2823 {
2824 if (fWhat & CPUMCTX_EXTRN_CR0)
2825 {
2826 /* We intercept changes to all CR0 bits except maybe TS & MP bits. */
2827 uint64_t const uCr0 = (pCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP))
2828 | (pVmcbGuest->u64CR0 & (X86_CR0_TS | X86_CR0_MP));
2829 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */
2830 CPUMSetGuestCR0(pVCpu, uCr0);
2831 VMMRZCallRing3Enable(pVCpu);
2832 }
2833
2834 if (fWhat & CPUMCTX_EXTRN_CR2)
2835 pCtx->cr2 = pVmcbGuest->u64CR2;
2836
2837 if (fWhat & CPUMCTX_EXTRN_CR3)
2838 {
2839 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
2840 && pCtx->cr3 != pVmcbGuest->u64CR3)
2841 {
2842 CPUMSetGuestCR3(pVCpu, pVmcbGuest->u64CR3);
2843 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2844 }
2845 }
2846
2847 /* Changes to CR4 are always intercepted. */
2848 }
2849
2850 /* Update fExtrn. */
2851 pCtx->fExtrn &= ~fWhat;
2852
2853 /* If everything has been imported, clear the HM keeper bit. */
2854 if (!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL))
2855 {
2856 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
2857 Assert(!pCtx->fExtrn);
2858 }
2859 }
2860 else
2861 Assert(!pCtx->fExtrn || (pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
2862
2863 ASMSetFlags(fEFlags);
2864
2865 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x);
2866
2867 /*
2868 * Honor any pending CR3 updates.
2869 *
2870 * Consider this scenario: #VMEXIT -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp
2871 * -> SVMR0CallRing3Callback() -> VMMRZCallRing3Disable() -> hmR0SvmImportGuestState()
2872 * -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp -> continue with #VMEXIT
2873 * handling -> hmR0SvmImportGuestState() and here we are.
2874 *
2875 * The reason for such complicated handling is because VM-exits that call into PGM expect
2876 * CR3 to be up-to-date and thus any CR3-saves -before- the VM-exit (longjmp) would've
2877 * postponed the CR3 update via the force-flag and cleared CR3 from fExtrn. Any SVM R0
2878 * VM-exit handler that requests CR3 to be saved will end up here and we call PGMUpdateCR3().
2879 *
2880 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again,
2881 * and does not process force-flag like regular exits to ring-3 either, we cover for it here.
2882 */
2883 if ( VMMRZCallRing3IsEnabled(pVCpu)
2884 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
2885 {
2886 AssertMsg(pCtx->cr3 == pVmcbGuest->u64CR3, ("cr3=%#RX64 vmcb_cr3=%#RX64\n", pCtx->cr3, pVmcbGuest->u64CR3));
2887 PGMUpdateCR3(pVCpu, pCtx->cr3);
2888 }
2889}
2890
2891
2892/**
2893 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU
2894 * context.
2895 *
2896 * Currently there is no residual state left in the CPU that is not updated in the
2897 * VMCB.
2898 *
2899 * @returns VBox status code.
2900 * @param pVCpu The cross context virtual CPU structure.
2901 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2902 */
2903VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2904{
2905 hmR0SvmImportGuestState(pVCpu, fWhat);
2906 return VINF_SUCCESS;
2907}
2908
2909
2910/**
2911 * Gets SVM \#VMEXIT auxiliary information.
2912 *
2913 * @returns VBox status code.
2914 * @param pVCpu The cross context virtual CPU structure.
2915 * @param pSvmExitAux Where to store the auxiliary info.
2916 */
2917VMMR0DECL(int) SVMR0GetExitAuxInfo(PVMCPUCC pVCpu, PSVMEXITAUX pSvmExitAux)
2918{
2919 PCSVMTRANSIENT pSvmTransient = pVCpu->hmr0.s.svm.pSvmTransient;
2920 if (RT_LIKELY(pSvmTransient))
2921 {
2922 PCSVMVMCB pVmcb = pSvmTransient->pVmcb;
2923 if (RT_LIKELY(pVmcb))
2924 {
2925 pSvmExitAux->u64ExitCode = pVmcb->ctrl.u64ExitCode;
2926 pSvmExitAux->u64ExitInfo1 = pVmcb->ctrl.u64ExitInfo1;
2927 pSvmExitAux->u64ExitInfo2 = pVmcb->ctrl.u64ExitInfo2;
2928 pSvmExitAux->ExitIntInfo = pVmcb->ctrl.ExitIntInfo;
2929 return VINF_SUCCESS;
2930 }
2931 return VERR_SVM_IPE_5;
2932 }
2933 return VERR_NOT_AVAILABLE;
2934}
2935
2936
2937/**
2938 * Does the necessary state syncing before returning to ring-3 for any reason
2939 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
2940 *
2941 * @param pVCpu The cross context virtual CPU structure.
2942 * @param fImportState Whether to import the guest state from the VMCB back
2943 * to the guest-CPU context.
2944 *
2945 * @remarks No-long-jmp zone!!!
2946 */
2947static void hmR0SvmLeave(PVMCPUCC pVCpu, bool fImportState)
2948{
2949 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2950 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2951
2952 /*
2953 * !!! IMPORTANT !!!
2954 * If you modify code here, make sure to check whether SVMR0CallRing3Callback() needs to be updated too.
2955 */
2956
2957 /* Save the guest state if necessary. */
2958 if (fImportState)
2959 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
2960
2961 /* Restore host FPU state if necessary and resync on next R0 reentry. */
2962 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
2963 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
2964
2965 /*
2966 * Restore host debug registers if necessary and resync on next R0 reentry.
2967 */
2968#ifdef VBOX_STRICT
2969 if (CPUMIsHyperDebugStateActive(pVCpu))
2970 {
2971 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb; /** @todo nested-guest. */
2972 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
2973 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
2974 }
2975#endif
2976 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
2977 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
2978 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
2979
2980 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
2981 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
2982 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
2983 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
2984 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
2985 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitVmentry);
2986 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
2987
2988 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
2989}
2990
2991
2992/**
2993 * Leaves the AMD-V session.
2994 *
2995 * Only used while returning to ring-3 either due to longjump or exits to
2996 * ring-3.
2997 *
2998 * @returns VBox status code.
2999 * @param pVCpu The cross context virtual CPU structure.
3000 */
3001static int hmR0SvmLeaveSession(PVMCPUCC pVCpu)
3002{
3003 HM_DISABLE_PREEMPT(pVCpu);
3004 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3005 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3006
3007 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
3008 and done this from the SVMR0ThreadCtxCallback(). */
3009 if (!pVCpu->hmr0.s.fLeaveDone)
3010 {
3011 hmR0SvmLeave(pVCpu, true /* fImportState */);
3012 pVCpu->hmr0.s.fLeaveDone = true;
3013 }
3014
3015 /*
3016 * !!! IMPORTANT !!!
3017 * If you modify code here, make sure to check whether SVMR0CallRing3Callback() needs to be updated too.
3018 */
3019
3020 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
3021 /* Deregister hook now that we've left HM context before re-enabling preemption. */
3022 VMMR0ThreadCtxHookDisable(pVCpu);
3023
3024 /* Leave HM context. This takes care of local init (term). */
3025 int rc = HMR0LeaveCpu(pVCpu);
3026
3027 HM_RESTORE_PREEMPT();
3028 return rc;
3029}
3030
3031
3032/**
3033 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
3034 * any remaining host state) before we go back to ring-3 due to an assertion.
3035 *
3036 * @param pVCpu The cross context virtual CPU structure.
3037 */
3038VMMR0DECL(int) SVMR0AssertionCallback(PVMCPUCC pVCpu)
3039{
3040 /*
3041 * !!! IMPORTANT !!!
3042 * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
3043 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
3044 */
3045 VMMR0AssertionRemoveNotification(pVCpu);
3046 VMMRZCallRing3Disable(pVCpu);
3047 HM_DISABLE_PREEMPT(pVCpu);
3048
3049 /* Import the entire guest state. */
3050 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3051
3052 /* Restore host FPU state if necessary and resync on next R0 reentry. */
3053 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
3054
3055 /* Restore host debug registers if necessary and resync on next R0 reentry. */
3056 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
3057
3058 /* Deregister the hook now that we've left HM context before re-enabling preemption. */
3059 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
3060 VMMR0ThreadCtxHookDisable(pVCpu);
3061
3062 /* Leave HM context. This takes care of local init (term). */
3063 HMR0LeaveCpu(pVCpu);
3064
3065 HM_RESTORE_PREEMPT();
3066 return VINF_SUCCESS;
3067}
3068
3069
3070/**
3071 * Take necessary actions before going back to ring-3.
3072 *
3073 * An action requires us to go back to ring-3. This function does the necessary
3074 * steps before we can safely return to ring-3. This is not the same as longjmps
3075 * to ring-3, this is voluntary.
3076 *
3077 * @returns Strict VBox status code.
3078 * @param pVCpu The cross context virtual CPU structure.
3079 * @param rcExit The reason for exiting to ring-3. Can be
3080 * VINF_VMM_UNKNOWN_RING3_CALL.
3081 */
3082static VBOXSTRICTRC hmR0SvmExitToRing3(PVMCPUCC pVCpu, VBOXSTRICTRC rcExit)
3083{
3084 Assert(pVCpu);
3085 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
3086
3087 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
3088 VMMRZCallRing3Disable(pVCpu);
3089 Log4Func(("rcExit=%d LocalFF=%#RX64 GlobalFF=%#RX32\n", VBOXSTRICTRC_VAL(rcExit), (uint64_t)pVCpu->fLocalForcedActions,
3090 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions));
3091
3092 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
3093 if (pVCpu->hm.s.Event.fPending)
3094 {
3095 hmR0SvmPendingEventToTrpmTrap(pVCpu);
3096 Assert(!pVCpu->hm.s.Event.fPending);
3097 }
3098
3099 /* Sync. the necessary state for going back to ring-3. */
3100 hmR0SvmLeaveSession(pVCpu);
3101 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
3102
3103 /* Thread-context hooks are unregistered at this point!!! */
3104 /* Ring-3 callback notifications are unregistered at this point!!! */
3105
3106 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
3107 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
3108 | CPUM_CHANGED_LDTR
3109 | CPUM_CHANGED_GDTR
3110 | CPUM_CHANGED_IDTR
3111 | CPUM_CHANGED_TR
3112 | CPUM_CHANGED_HIDDEN_SEL_REGS);
3113 if ( pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging
3114 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
3115 {
3116 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
3117 }
3118
3119 /* Update the exit-to-ring 3 reason. */
3120 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
3121
3122 /* On our way back from ring-3, reload the guest-CPU state if it may change while in ring-3. */
3123 if ( rcExit != VINF_EM_RAW_INTERRUPT
3124 || CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
3125 {
3126 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
3127 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
3128 }
3129
3130 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
3131 VMMRZCallRing3Enable(pVCpu);
3132
3133 /*
3134 * If we're emulating an instruction, we shouldn't have any TRPM traps pending
3135 * and if we're injecting an event we should have a TRPM trap pending.
3136 */
3137 AssertReturnStmt(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu),
3138 pVCpu->hm.s.u32HMError = VBOXSTRICTRC_VAL(rcExit),
3139 VERR_SVM_IPE_5);
3140 AssertReturnStmt(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu),
3141 pVCpu->hm.s.u32HMError = VBOXSTRICTRC_VAL(rcExit),
3142 VERR_SVM_IPE_4);
3143
3144 return rcExit;
3145}
3146
3147
3148/**
3149 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
3150 * intercepts.
3151 *
3152 * @param pVCpu The cross context virtual CPU structure.
3153 * @param pVmcb Pointer to the VM control block.
3154 *
3155 * @remarks No-long-jump zone!!!
3156 */
3157static void hmR0SvmUpdateTscOffsetting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3158{
3159 /*
3160 * Avoid intercepting RDTSC/RDTSCP if we determined the host TSC (++) is stable
3161 * and in case of a nested-guest, if the nested-VMCB specifies it is not intercepting
3162 * RDTSC/RDTSCP as well.
3163 */
3164 bool fParavirtTsc;
3165 uint64_t uTscOffset;
3166 bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu->CTX_SUFF(pVM), pVCpu, &uTscOffset, &fParavirtTsc);
3167
3168 bool fIntercept;
3169 if (fCanUseRealTsc)
3170 fIntercept = hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
3171 else
3172 {
3173 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
3174 fIntercept = true;
3175 }
3176
3177 if (!fIntercept)
3178 {
3179#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3180 /* Apply the nested-guest VMCB's TSC offset over the guest TSC offset. */
3181 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
3182 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
3183#endif
3184
3185 /* Update the TSC offset in the VMCB and the relevant clean bits. */
3186 pVmcb->ctrl.u64TSCOffset = uTscOffset;
3187 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
3188 }
3189
3190 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
3191 information before every VM-entry, hence we have nothing to do here at the moment. */
3192 if (fParavirtTsc)
3193 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
3194}
3195
3196
3197/**
3198 * Sets an event as a pending event to be injected into the guest.
3199 *
3200 * @param pVCpu The cross context virtual CPU structure.
3201 * @param pEvent Pointer to the SVM event.
3202 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3203 * page-fault.
3204 *
3205 * @remarks Statistics counter assumes this is a guest event being reflected to
3206 * the guest i.e. 'StatInjectPendingReflect' is incremented always.
3207 */
3208DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPUCC pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
3209{
3210 Assert(!pVCpu->hm.s.Event.fPending);
3211 Assert(pEvent->n.u1Valid);
3212
3213 pVCpu->hm.s.Event.u64IntInfo = pEvent->u;
3214 pVCpu->hm.s.Event.fPending = true;
3215 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
3216
3217 Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
3218 (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
3219}
3220
3221
3222/**
3223 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3224 *
3225 * @param pVCpu The cross context virtual CPU structure.
3226 */
3227DECLINLINE(void) hmR0SvmSetPendingXcptUD(PVMCPUCC pVCpu)
3228{
3229 SVMEVENT Event;
3230 Event.u = 0;
3231 Event.n.u1Valid = 1;
3232 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3233 Event.n.u8Vector = X86_XCPT_UD;
3234 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3235}
3236
3237
3238/**
3239 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3240 *
3241 * @param pVCpu The cross context virtual CPU structure.
3242 */
3243DECLINLINE(void) hmR0SvmSetPendingXcptDB(PVMCPUCC pVCpu)
3244{
3245 SVMEVENT Event;
3246 Event.u = 0;
3247 Event.n.u1Valid = 1;
3248 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3249 Event.n.u8Vector = X86_XCPT_DB;
3250 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3251}
3252
3253
3254/**
3255 * Sets a page fault (\#PF) exception as pending-for-injection into the VM.
3256 *
3257 * @param pVCpu The cross context virtual CPU structure.
3258 * @param u32ErrCode The error-code for the page-fault.
3259 * @param uFaultAddress The page fault address (CR2).
3260 *
3261 * @remarks This updates the guest CR2 with @a uFaultAddress!
3262 */
3263DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPUCC pVCpu, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)
3264{
3265 SVMEVENT Event;
3266 Event.u = 0;
3267 Event.n.u1Valid = 1;
3268 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3269 Event.n.u8Vector = X86_XCPT_PF;
3270 Event.n.u1ErrorCodeValid = 1;
3271 Event.n.u32ErrorCode = u32ErrCode;
3272
3273 /* Update CR2 of the guest. */
3274 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR2);
3275 if (pVCpu->cpum.GstCtx.cr2 != uFaultAddress)
3276 {
3277 pVCpu->cpum.GstCtx.cr2 = uFaultAddress;
3278 /* The VMCB clean bit for CR2 will be updated while re-loading the guest state. */
3279 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2);
3280 }
3281
3282 hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress);
3283}
3284
3285
3286/**
3287 * Sets a math-fault (\#MF) exception as pending-for-injection into the VM.
3288 *
3289 * @param pVCpu The cross context virtual CPU structure.
3290 */
3291DECLINLINE(void) hmR0SvmSetPendingXcptMF(PVMCPUCC pVCpu)
3292{
3293 SVMEVENT Event;
3294 Event.u = 0;
3295 Event.n.u1Valid = 1;
3296 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3297 Event.n.u8Vector = X86_XCPT_MF;
3298 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3299}
3300
3301
3302/**
3303 * Sets a double fault (\#DF) exception as pending-for-injection into the VM.
3304 *
3305 * @param pVCpu The cross context virtual CPU structure.
3306 */
3307DECLINLINE(void) hmR0SvmSetPendingXcptDF(PVMCPUCC pVCpu)
3308{
3309 SVMEVENT Event;
3310 Event.u = 0;
3311 Event.n.u1Valid = 1;
3312 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3313 Event.n.u8Vector = X86_XCPT_DF;
3314 Event.n.u1ErrorCodeValid = 1;
3315 Event.n.u32ErrorCode = 0;
3316 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3317}
3318
3319
3320/**
3321 * Injects an event into the guest upon VMRUN by updating the relevant field
3322 * in the VMCB.
3323 *
3324 * @param pVCpu The cross context virtual CPU structure.
3325 * @param pVmcb Pointer to the guest VM control block.
3326 * @param pEvent Pointer to the event.
3327 *
3328 * @remarks No-long-jump zone!!!
3329 * @remarks Requires CR0!
3330 */
3331DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPUCC pVCpu, PSVMVMCB pVmcb, PSVMEVENT pEvent)
3332{
3333 Assert(!pVmcb->ctrl.EventInject.n.u1Valid);
3334 pVmcb->ctrl.EventInject.u = pEvent->u;
3335 if ( pVmcb->ctrl.EventInject.n.u3Type == SVM_EVENT_EXCEPTION
3336 || pVmcb->ctrl.EventInject.n.u3Type == SVM_EVENT_NMI)
3337 {
3338 Assert(pEvent->n.u8Vector <= X86_XCPT_LAST);
3339 STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedXcpts[pEvent->n.u8Vector]);
3340 }
3341 else
3342 STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedIrqs[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
3343 RT_NOREF(pVCpu);
3344
3345 Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
3346 (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
3347}
3348
3349
3350
3351/**
3352 * Converts any TRPM trap into a pending HM event. This is typically used when
3353 * entering from ring-3 (not longjmp returns).
3354 *
3355 * @param pVCpu The cross context virtual CPU structure.
3356 */
3357static void hmR0SvmTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3358{
3359 Assert(TRPMHasTrap(pVCpu));
3360 Assert(!pVCpu->hm.s.Event.fPending);
3361
3362 uint8_t uVector;
3363 TRPMEVENT enmTrpmEvent;
3364 uint32_t uErrCode;
3365 RTGCUINTPTR GCPtrFaultAddress;
3366 uint8_t cbInstr;
3367
3368 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, NULL /* pfIcebp */);
3369 AssertRC(rc);
3370
3371 SVMEVENT Event;
3372 Event.u = 0;
3373 Event.n.u1Valid = 1;
3374 Event.n.u8Vector = uVector;
3375
3376 /* Refer AMD spec. 15.20 "Event Injection" for the format. */
3377 if (enmTrpmEvent == TRPM_TRAP)
3378 {
3379 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3380 switch (uVector)
3381 {
3382 case X86_XCPT_NMI:
3383 {
3384 Event.n.u3Type = SVM_EVENT_NMI;
3385 break;
3386 }
3387
3388 case X86_XCPT_BP:
3389 case X86_XCPT_OF:
3390 AssertMsgFailed(("Invalid TRPM vector %d for event type %d\n", uVector, enmTrpmEvent));
3391 RT_FALL_THRU();
3392
3393 case X86_XCPT_PF:
3394 case X86_XCPT_DF:
3395 case X86_XCPT_TS:
3396 case X86_XCPT_NP:
3397 case X86_XCPT_SS:
3398 case X86_XCPT_GP:
3399 case X86_XCPT_AC:
3400 {
3401 Event.n.u1ErrorCodeValid = 1;
3402 Event.n.u32ErrorCode = uErrCode;
3403 break;
3404 }
3405 }
3406 }
3407 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
3408 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3409 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
3410 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
3411 else
3412 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
3413
3414 rc = TRPMResetTrap(pVCpu);
3415 AssertRC(rc);
3416
3417 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
3418 !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode));
3419
3420 hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress);
3421}
3422
3423
3424/**
3425 * Converts any pending SVM event into a TRPM trap. Typically used when leaving
3426 * AMD-V to execute any instruction.
3427 *
3428 * @param pVCpu The cross context virtual CPU structure.
3429 */
3430static void hmR0SvmPendingEventToTrpmTrap(PVMCPUCC pVCpu)
3431{
3432 Assert(pVCpu->hm.s.Event.fPending);
3433 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
3434
3435 SVMEVENT Event;
3436 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3437
3438 uint8_t uVector = Event.n.u8Vector;
3439 TRPMEVENT enmTrapType = HMSvmEventToTrpmEventType(&Event, uVector);
3440
3441 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, Event.n.u3Type));
3442
3443 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
3444 AssertRC(rc);
3445
3446 if (Event.n.u1ErrorCodeValid)
3447 TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode);
3448
3449 if ( enmTrapType == TRPM_TRAP
3450 && uVector == X86_XCPT_PF)
3451 {
3452 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
3453 Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
3454 }
3455 else if (enmTrapType == TRPM_SOFTWARE_INT)
3456 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
3457 pVCpu->hm.s.Event.fPending = false;
3458}
3459
3460
3461/**
3462 * Checks if the guest (or nested-guest) has an interrupt shadow active right
3463 * now.
3464 *
3465 * @returns @c true if the interrupt shadow is active, @c false otherwise.
3466 * @param pVCpu The cross context virtual CPU structure.
3467 *
3468 * @remarks No-long-jump zone!!!
3469 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
3470 */
3471static bool hmR0SvmIsIntrShadowActive(PVMCPUCC pVCpu)
3472{
3473 /*
3474 * Instructions like STI and MOV SS inhibit interrupts till the next instruction
3475 * completes. Check if we should inhibit interrupts or clear any existing
3476 * interrupt inhibition.
3477 */
3478 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3479 {
3480 if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
3481 {
3482 /*
3483 * We can clear the inhibit force flag as even if we go back to the recompiler
3484 * without executing guest code in AMD-V, the flag's condition to be cleared is
3485 * met and thus the cleared state is correct.
3486 */
3487 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3488 return false;
3489 }
3490 return true;
3491 }
3492 return false;
3493}
3494
3495
3496/**
3497 * Sets the virtual interrupt intercept control in the VMCB.
3498 *
3499 * @param pVCpu The cross context virtual CPU structure.
3500 * @param pVmcb Pointer to the VM control block.
3501 */
3502static void hmR0SvmSetIntWindowExiting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3503{
3504 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); NOREF(pVCpu);
3505
3506 /*
3507 * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when the guest
3508 * is ready to accept interrupts. At #VMEXIT, we then get the interrupt from the APIC
3509 * (updating ISR at the right time) and inject the interrupt.
3510 *
3511 * With AVIC is supported, we could make use of the asynchronously delivery without
3512 * #VMEXIT and we would be passing the AVIC page to SVM.
3513 *
3514 * In AMD-V, an interrupt window is achieved using a combination of V_IRQ (an interrupt
3515 * is pending), V_IGN_TPR (ignore TPR priorities) and the VINTR intercept all being set.
3516 */
3517 Assert(pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR);
3518 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
3519 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
3520 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_VINTR);
3521 Log4(("Set VINTR intercept\n"));
3522}
3523
3524
3525/**
3526 * Clears the virtual interrupt intercept control in the VMCB as
3527 * we are figured the guest is unable process any interrupts
3528 * at this point of time.
3529 *
3530 * @param pVCpu The cross context virtual CPU structure.
3531 * @param pVmcb Pointer to the VM control block.
3532 */
3533static void hmR0SvmClearIntWindowExiting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3534{
3535 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); NOREF(pVCpu);
3536
3537 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
3538 if ( pVmcbCtrl->IntCtrl.n.u1VIrqPending
3539 || (pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
3540 {
3541 pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
3542 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
3543 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_VINTR);
3544 Log4(("Cleared VINTR intercept\n"));
3545 }
3546}
3547
3548
3549/**
3550 * Evaluates the event to be delivered to the guest and sets it as the pending
3551 * event.
3552 *
3553 * @returns Strict VBox status code.
3554 * @param pVCpu The cross context virtual CPU structure.
3555 * @param pSvmTransient Pointer to the SVM transient structure.
3556 */
3557static VBOXSTRICTRC hmR0SvmEvaluatePendingEvent(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient)
3558{
3559 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3560 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT
3561 | CPUMCTX_EXTRN_RFLAGS
3562 | CPUMCTX_EXTRN_INHIBIT_INT
3563 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ);
3564
3565 Assert(!pVCpu->hm.s.Event.fPending);
3566 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
3567 Assert(pVmcb);
3568
3569 bool const fGif = CPUMGetGuestGif(pCtx);
3570 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);
3571 bool const fBlockNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3572
3573 Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntPending=%RTbool fNmiPending=%RTbool\n",
3574 fGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
3575 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
3576
3577 /** @todo SMI. SMIs take priority over NMIs. */
3578
3579 /*
3580 * Check if the guest or nested-guest can receive NMIs.
3581 * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
3582 * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
3583 */
3584 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
3585 && !fBlockNmi)
3586 {
3587 if ( fGif
3588 && !fIntShadow)
3589 {
3590#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3591 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI))
3592 {
3593 Log4(("Intercepting NMI -> #VMEXIT\n"));
3594 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3595 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
3596 }
3597#endif
3598 Log4(("Setting NMI pending for injection\n"));
3599 SVMEVENT Event;
3600 Event.u = 0;
3601 Event.n.u1Valid = 1;
3602 Event.n.u8Vector = X86_XCPT_NMI;
3603 Event.n.u3Type = SVM_EVENT_NMI;
3604 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3605 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3606 }
3607 else if (!fGif)
3608 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
3609 else if (!pSvmTransient->fIsNestedGuest)
3610 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
3611 /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCB controls. */
3612 }
3613 /*
3614 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt()
3615 * returns a valid interrupt we -must- deliver the interrupt. We can no longer re-request
3616 * it from the APIC device.
3617 *
3618 * For nested-guests, physical interrupts always take priority over virtual interrupts.
3619 * We don't need to inject nested-guest virtual interrupts here, we can let the hardware
3620 * do that work when we execute nested-guest code esp. since all the required information
3621 * is in the VMCB, unlike physical interrupts where we need to fetch the interrupt from
3622 * the virtual interrupt controller.
3623 *
3624 * See AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
3625 */
3626 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
3627 && !pVCpu->hm.s.fSingleInstruction)
3628 {
3629 bool const fBlockInt = !pSvmTransient->fIsNestedGuest ? !(pCtx->eflags.u32 & X86_EFL_IF)
3630 : CPUMIsGuestSvmPhysIntrEnabled(pVCpu, pCtx);
3631 if ( fGif
3632 && !fBlockInt
3633 && !fIntShadow)
3634 {
3635#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3636 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
3637 {
3638 Log4(("Intercepting INTR -> #VMEXIT\n"));
3639 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3640 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
3641 }
3642#endif
3643 uint8_t u8Interrupt;
3644 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
3645 if (RT_SUCCESS(rc))
3646 {
3647 Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
3648 SVMEVENT Event;
3649 Event.u = 0;
3650 Event.n.u1Valid = 1;
3651 Event.n.u8Vector = u8Interrupt;
3652 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3653 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3654 }
3655 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
3656 {
3657 /*
3658 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
3659 * updated eventually when the TPR is written by the guest.
3660 */
3661 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
3662 }
3663 else
3664 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
3665 }
3666 else if (!fGif)
3667 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
3668 else if (!pSvmTransient->fIsNestedGuest)
3669 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
3670 /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCB controls. */
3671 }
3672
3673 return VINF_SUCCESS;
3674}
3675
3676
3677/**
3678 * Injects any pending events into the guest (or nested-guest).
3679 *
3680 * @param pVCpu The cross context virtual CPU structure.
3681 * @param pVmcb Pointer to the VM control block.
3682 *
3683 * @remarks Must only be called when we are guaranteed to enter
3684 * hardware-assisted SVM execution and not return to ring-3
3685 * prematurely.
3686 */
3687static void hmR0SvmInjectPendingEvent(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3688{
3689 Assert(!TRPMHasTrap(pVCpu));
3690 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3691
3692 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);
3693#ifdef VBOX_STRICT
3694 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3695 bool const fGif = CPUMGetGuestGif(pCtx);
3696 bool fAllowInt = fGif;
3697 if (fGif)
3698 {
3699 /*
3700 * For nested-guests we have no way to determine if we're injecting a physical or
3701 * virtual interrupt at this point. Hence the partial verification below.
3702 */
3703 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3704 fAllowInt = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, pCtx) || CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
3705 else
3706 fAllowInt = RT_BOOL(pCtx->eflags.u32 & X86_EFL_IF);
3707 }
3708#endif
3709
3710 if (pVCpu->hm.s.Event.fPending)
3711 {
3712 SVMEVENT Event;
3713 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3714 Assert(Event.n.u1Valid);
3715
3716 /*
3717 * Validate event injection pre-conditions.
3718 */
3719 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
3720 {
3721 Assert(fAllowInt);
3722 Assert(!fIntShadow);
3723 }
3724 else if (Event.n.u3Type == SVM_EVENT_NMI)
3725 {
3726 Assert(fGif);
3727 Assert(!fIntShadow);
3728 }
3729
3730 /*
3731 * Before injecting an NMI we must set VMCPU_FF_BLOCK_NMIS to prevent nested NMIs. We
3732 * do this only when we are surely going to inject the NMI as otherwise if we return
3733 * to ring-3 prematurely we could leave NMIs blocked indefinitely upon re-entry into
3734 * SVM R0.
3735 *
3736 * With VT-x, this is handled by the Guest interruptibility information VMCS field
3737 * which will set the VMCS field after actually delivering the NMI which we read on
3738 * VM-exit to determine the state.
3739 */
3740 if ( Event.n.u3Type == SVM_EVENT_NMI
3741 && Event.n.u8Vector == X86_XCPT_NMI
3742 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3743 {
3744 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3745 }
3746
3747 /*
3748 * Inject it (update VMCB for injection by the hardware).
3749 */
3750 Log4(("Injecting pending HM event\n"));
3751 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, &Event);
3752 pVCpu->hm.s.Event.fPending = false;
3753
3754 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
3755 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
3756 else
3757 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
3758 }
3759 else
3760 Assert(pVmcb->ctrl.EventInject.n.u1Valid == 0);
3761
3762 /*
3763 * We could have injected an NMI through IEM and continue guest execution using
3764 * hardware-assisted SVM. In which case, we would not have any events pending (above)
3765 * but we still need to intercept IRET in order to eventually clear NMI inhibition.
3766 */
3767 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3768 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET);
3769
3770 /*
3771 * Update the guest interrupt shadow in the guest (or nested-guest) VMCB.
3772 *
3773 * For nested-guests: We need to update it too for the scenario where IEM executes
3774 * the nested-guest but execution later continues here with an interrupt shadow active.
3775 */
3776 pVmcb->ctrl.IntShadow.n.u1IntShadow = fIntShadow;
3777}
3778
3779
3780/**
3781 * Reports world-switch error and dumps some useful debug info.
3782 *
3783 * @param pVCpu The cross context virtual CPU structure.
3784 * @param rcVMRun The return code from VMRUN (or
3785 * VERR_SVM_INVALID_GUEST_STATE for invalid
3786 * guest-state).
3787 */
3788static void hmR0SvmReportWorldSwitchError(PVMCPUCC pVCpu, int rcVMRun)
3789{
3790 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
3791 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
3792 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3793
3794 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
3795 {
3796#ifdef VBOX_STRICT
3797 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
3798 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
3799 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits));
3800 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx));
3801 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx));
3802 Log4(("ctrl.u16InterceptRdDRx %#x\n", pVmcb->ctrl.u16InterceptRdDRx));
3803 Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx));
3804 Log4(("ctrl.u32InterceptXcpt %#x\n", pVmcb->ctrl.u32InterceptXcpt));
3805 Log4(("ctrl.u64InterceptCtrl %#RX64\n", pVmcb->ctrl.u64InterceptCtrl));
3806 Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr));
3807 Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr));
3808 Log4(("ctrl.u64TSCOffset %#RX64\n", pVmcb->ctrl.u64TSCOffset));
3809
3810 Log4(("ctrl.TLBCtrl.u32ASID %#x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID));
3811 Log4(("ctrl.TLBCtrl.u8TLBFlush %#x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush));
3812 Log4(("ctrl.TLBCtrl.u24Reserved %#x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved));
3813
3814 Log4(("ctrl.IntCtrl.u8VTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR));
3815 Log4(("ctrl.IntCtrl.u1VIrqPending %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqPending));
3816 Log4(("ctrl.IntCtrl.u1VGif %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGif));
3817 Log4(("ctrl.IntCtrl.u6Reserved0 %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved));
3818 Log4(("ctrl.IntCtrl.u4VIntrPrio %#x\n", pVmcb->ctrl.IntCtrl.n.u4VIntrPrio));
3819 Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR));
3820 Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved));
3821 Log4(("ctrl.IntCtrl.u1VIntrMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIntrMasking));
3822 Log4(("ctrl.IntCtrl.u1VGifEnable %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGifEnable));
3823 Log4(("ctrl.IntCtrl.u5Reserved1 %#x\n", pVmcb->ctrl.IntCtrl.n.u5Reserved));
3824 Log4(("ctrl.IntCtrl.u8VIntrVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIntrVector));
3825 Log4(("ctrl.IntCtrl.u24Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved));
3826
3827 Log4(("ctrl.IntShadow.u1IntShadow %#x\n", pVmcb->ctrl.IntShadow.n.u1IntShadow));
3828 Log4(("ctrl.IntShadow.u1GuestIntMask %#x\n", pVmcb->ctrl.IntShadow.n.u1GuestIntMask));
3829 Log4(("ctrl.u64ExitCode %#RX64\n", pVmcb->ctrl.u64ExitCode));
3830 Log4(("ctrl.u64ExitInfo1 %#RX64\n", pVmcb->ctrl.u64ExitInfo1));
3831 Log4(("ctrl.u64ExitInfo2 %#RX64\n", pVmcb->ctrl.u64ExitInfo2));
3832 Log4(("ctrl.ExitIntInfo.u8Vector %#x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector));
3833 Log4(("ctrl.ExitIntInfo.u3Type %#x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type));
3834 Log4(("ctrl.ExitIntInfo.u1ErrorCodeValid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
3835 Log4(("ctrl.ExitIntInfo.u19Reserved %#x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved));
3836 Log4(("ctrl.ExitIntInfo.u1Valid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid));
3837 Log4(("ctrl.ExitIntInfo.u32ErrorCode %#x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
3838 Log4(("ctrl.NestedPagingCtrl.u1NestedPaging %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging));
3839 Log4(("ctrl.NestedPagingCtrl.u1Sev %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1Sev));
3840 Log4(("ctrl.NestedPagingCtrl.u1SevEs %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1SevEs));
3841 Log4(("ctrl.EventInject.u8Vector %#x\n", pVmcb->ctrl.EventInject.n.u8Vector));
3842 Log4(("ctrl.EventInject.u3Type %#x\n", pVmcb->ctrl.EventInject.n.u3Type));
3843 Log4(("ctrl.EventInject.u1ErrorCodeValid %#x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid));
3844 Log4(("ctrl.EventInject.u19Reserved %#x\n", pVmcb->ctrl.EventInject.n.u19Reserved));
3845 Log4(("ctrl.EventInject.u1Valid %#x\n", pVmcb->ctrl.EventInject.n.u1Valid));
3846 Log4(("ctrl.EventInject.u32ErrorCode %#x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode));
3847
3848 Log4(("ctrl.u64NestedPagingCR3 %#RX64\n", pVmcb->ctrl.u64NestedPagingCR3));
3849
3850 Log4(("ctrl.LbrVirt.u1LbrVirt %#x\n", pVmcb->ctrl.LbrVirt.n.u1LbrVirt));
3851 Log4(("ctrl.LbrVirt.u1VirtVmsaveVmload %#x\n", pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload));
3852
3853 Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel));
3854 Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr));
3855 Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit));
3856 Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base));
3857 Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel));
3858 Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr));
3859 Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit));
3860 Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base));
3861 Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel));
3862 Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr));
3863 Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit));
3864 Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base));
3865 Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel));
3866 Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr));
3867 Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit));
3868 Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base));
3869 Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel));
3870 Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr));
3871 Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit));
3872 Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base));
3873
3874 Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit));
3875 Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base));
3876
3877 Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel));
3878 Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr));
3879 Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit));
3880 Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base));
3881
3882 Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit));
3883 Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base));
3884
3885 Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel));
3886 Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr));
3887 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit));
3888 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base));
3889
3890 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL));
3891 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0));
3892 Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2));
3893 Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3));
3894 Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4));
3895 Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6));
3896 Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7));
3897
3898 Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP));
3899 Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP));
3900 Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX));
3901 Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags));
3902
3903 Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS));
3904 Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP));
3905 Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP));
3906
3907 Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER));
3908 Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR));
3909 Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR));
3910 Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR));
3911 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK));
3912 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase));
3913 Log4(("guest.u64PAT %#RX64\n", pVmcb->guest.u64PAT));
3914 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL));
3915 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM));
3916 Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO));
3917 Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM));
3918 Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO));
3919
3920 NOREF(pVmcb);
3921#endif /* VBOX_STRICT */
3922 }
3923 else
3924 Log4Func(("rcVMRun=%d\n", rcVMRun));
3925}
3926
3927
3928/**
3929 * Check per-VM and per-VCPU force flag actions that require us to go back to
3930 * ring-3 for one reason or another.
3931 *
3932 * @returns Strict VBox status code (information status code included).
3933 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3934 * ring-3.
3935 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3936 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3937 * interrupts)
3938 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3939 * all EMTs to be in ring-3.
3940 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3941 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3942 * to the EM loop.
3943 *
3944 * @param pVCpu The cross context virtual CPU structure.
3945 */
3946static VBOXSTRICTRC hmR0SvmCheckForceFlags(PVMCPUCC pVCpu)
3947{
3948 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3949
3950 /* Could happen as a result of longjump. */
3951 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
3952 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3953
3954 /* Update pending interrupts into the APIC's IRR. */
3955 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3956 APICUpdatePendingInterrupts(pVCpu);
3957
3958 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3959 if ( VM_FF_IS_ANY_SET(pVM, !pVCpu->hm.s.fSingleInstruction
3960 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
3961 || VMCPU_FF_IS_ANY_SET(pVCpu, !pVCpu->hm.s.fSingleInstruction
3962 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3963 {
3964 /* Pending PGM C3 sync. */
3965 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3966 {
3967 int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4,
3968 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3969 if (rc != VINF_SUCCESS)
3970 {
3971 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
3972 return rc;
3973 }
3974 }
3975
3976 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3977 /* -XXX- what was that about single stepping? */
3978 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3979 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3980 {
3981 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
3982 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3983 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3984 return rc;
3985 }
3986
3987 /* Pending VM request packets, such as hardware interrupts. */
3988 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3989 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3990 {
3991 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchVmReq);
3992 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3993 return VINF_EM_PENDING_REQUEST;
3994 }
3995
3996 /* Pending PGM pool flushes. */
3997 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3998 {
3999 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPgmPoolFlush);
4000 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4001 return VINF_PGM_POOL_FLUSH_PENDING;
4002 }
4003
4004 /* Pending DMA requests. */
4005 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4006 {
4007 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchDma);
4008 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4009 return VINF_EM_RAW_TO_R3;
4010 }
4011 }
4012
4013 return VINF_SUCCESS;
4014}
4015
4016
4017/**
4018 * Does the preparations before executing guest code in AMD-V.
4019 *
4020 * This may cause longjmps to ring-3 and may even result in rescheduling to the
4021 * recompiler. We must be cautious what we do here regarding committing
4022 * guest-state information into the VMCB assuming we assuredly execute the guest
4023 * in AMD-V. If we fall back to the recompiler after updating the VMCB and
4024 * clearing the common-state (TRPM/forceflags), we must undo those changes so
4025 * that the recompiler can (and should) use them when it resumes guest
4026 * execution. Otherwise such operations must be done when we can no longer
4027 * exit to ring-3.
4028 *
4029 * @returns Strict VBox status code (informational status codes included).
4030 * @retval VINF_SUCCESS if we can proceed with running the guest.
4031 * @retval VINF_* scheduling changes, we have to go back to ring-3.
4032 *
4033 * @param pVCpu The cross context virtual CPU structure.
4034 * @param pSvmTransient Pointer to the SVM transient structure.
4035 */
4036static VBOXSTRICTRC hmR0SvmPreRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4037{
4038 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
4039
4040#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
4041 if (pSvmTransient->fIsNestedGuest)
4042 {
4043 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
4044 return VINF_EM_RESCHEDULE_REM;
4045 }
4046#endif
4047
4048 /* Check force flag actions that might require us to go back to ring-3. */
4049 VBOXSTRICTRC rc = hmR0SvmCheckForceFlags(pVCpu);
4050 if (rc != VINF_SUCCESS)
4051 return rc;
4052
4053 if (TRPMHasTrap(pVCpu))
4054 hmR0SvmTrpmTrapToPendingEvent(pVCpu);
4055 else if (!pVCpu->hm.s.Event.fPending)
4056 {
4057 rc = hmR0SvmEvaluatePendingEvent(pVCpu, pSvmTransient);
4058 if ( rc != VINF_SUCCESS
4059 || pSvmTransient->fIsNestedGuest != CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
4060 {
4061 /* If a nested-guest VM-exit occurred, bail. */
4062 if (pSvmTransient->fIsNestedGuest)
4063 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
4064 return rc;
4065 }
4066 }
4067
4068 /*
4069 * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.
4070 * Just do it in software, see @bugref{8411}.
4071 * NB: If we could continue a task switch exit we wouldn't need to do this.
4072 */
4073 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4074 if (RT_UNLIKELY( !g_fHmSvmFeatures
4075 && pVCpu->hm.s.Event.fPending
4076 && SVM_EVENT_GET_TYPE(pVCpu->hm.s.Event.u64IntInfo) == SVM_EVENT_NMI))
4077 return VINF_EM_RAW_INJECT_TRPM_EVENT;
4078
4079#ifdef HMSVM_SYNC_FULL_GUEST_STATE
4080 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
4081 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
4082#endif
4083
4084#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4085 /*
4086 * Set up the nested-guest VMCB for execution using hardware-assisted SVM.
4087 */
4088 if (pSvmTransient->fIsNestedGuest)
4089 hmR0SvmSetupVmcbNested(pVCpu);
4090#endif
4091
4092 /*
4093 * Export the guest state bits that are not shared with the host in any way as we can
4094 * longjmp or get preempted in the midst of exporting some of the state.
4095 */
4096 rc = hmR0SvmExportGuestState(pVCpu, pSvmTransient);
4097 AssertRCReturn(rc, rc);
4098 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
4099
4100 /* Ensure we've cached (and hopefully modified) the nested-guest VMCB for execution using hardware-assisted SVM. */
4101 Assert(!pSvmTransient->fIsNestedGuest || pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
4102
4103 /*
4104 * If we're not intercepting TPR changes in the guest, save the guest TPR before the
4105 * world-switch so we can update it on the way back if the guest changed the TPR.
4106 */
4107 if (pVCpu->hmr0.s.svm.fSyncVTpr)
4108 {
4109 Assert(!pSvmTransient->fIsNestedGuest);
4110 PCSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
4111 if (pVM->hm.s.fTprPatchingActive)
4112 pSvmTransient->u8GuestTpr = pVmcb->guest.u64LSTAR;
4113 else
4114 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
4115 }
4116
4117 /*
4118 * No longjmps to ring-3 from this point on!!!
4119 *
4120 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4121 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4122 */
4123 VMMRZCallRing3Disable(pVCpu);
4124
4125 /*
4126 * We disable interrupts so that we don't miss any interrupts that would flag preemption
4127 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
4128 * preemption disabled for a while. Since this is purly to aid the
4129 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
4130 * disable interrupt on NT.
4131 *
4132 * We need to check for force-flags that could've possible been altered since we last
4133 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
4134 * see @bugref{6398}).
4135 *
4136 * We also check a couple of other force-flags as a last opportunity to get the EMT back
4137 * to ring-3 before executing guest code.
4138 */
4139 pSvmTransient->fEFlags = ASMIntDisableFlags();
4140 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4141 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4142 {
4143 ASMSetFlags(pSvmTransient->fEFlags);
4144 VMMRZCallRing3Enable(pVCpu);
4145 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
4146 return VINF_EM_RAW_TO_R3;
4147 }
4148 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
4149 {
4150 ASMSetFlags(pSvmTransient->fEFlags);
4151 VMMRZCallRing3Enable(pVCpu);
4152 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);
4153 return VINF_EM_RAW_INTERRUPT;
4154 }
4155
4156 return VINF_SUCCESS;
4157}
4158
4159
4160/**
4161 * Prepares to run guest (or nested-guest) code in AMD-V and we've committed to
4162 * doing so.
4163 *
4164 * This means there is no backing out to ring-3 or anywhere else at this point.
4165 *
4166 * @param pVCpu The cross context virtual CPU structure.
4167 * @param pSvmTransient Pointer to the SVM transient structure.
4168 *
4169 * @remarks Called with preemption disabled.
4170 * @remarks No-long-jump zone!!!
4171 */
4172static void hmR0SvmPreRunGuestCommitted(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4173{
4174 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4175 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4176
4177 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4178 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
4179
4180 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4181 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
4182
4183 hmR0SvmInjectPendingEvent(pVCpu, pVmcb);
4184
4185 if (!CPUMIsGuestFPUStateActive(pVCpu))
4186 {
4187 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
4188 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
4189 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
4190 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
4191 }
4192
4193 /* Load the state shared between host and guest (FPU, debug). */
4194 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)
4195 hmR0SvmExportSharedState(pVCpu, pVmcb);
4196
4197 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT; /* Preemption might set this, nothing to do on AMD-V. */
4198 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
4199
4200 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
4201 RTCPUID const idHostCpu = pHostCpu->idCpu;
4202 bool const fMigratedHostCpu = idHostCpu != pVCpu->hmr0.s.idLastCpu;
4203
4204 /* Setup TSC offsetting. */
4205 if ( pSvmTransient->fUpdateTscOffsetting
4206 || fMigratedHostCpu)
4207 {
4208 hmR0SvmUpdateTscOffsetting(pVCpu, pVmcb);
4209 pSvmTransient->fUpdateTscOffsetting = false;
4210 }
4211
4212 /* Record statistics of how often we use TSC offsetting as opposed to intercepting RDTSC/P. */
4213 if (!(pVmcb->ctrl.u64InterceptCtrl & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP)))
4214 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4215 else
4216 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4217
4218 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
4219 if (fMigratedHostCpu)
4220 pVmcb->ctrl.u32VmcbCleanBits = 0;
4221
4222 /* Store status of the shared guest-host state at the time of VMRUN. */
4223 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
4224 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
4225
4226#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4227 uint8_t *pbMsrBitmap;
4228 if (!pSvmTransient->fIsNestedGuest)
4229 pbMsrBitmap = (uint8_t *)pVCpu->hmr0.s.svm.pvMsrBitmap;
4230 else
4231 {
4232 /** @todo We could perhaps optimize this by monitoring if the guest modifies its
4233 * MSRPM and only perform this if it changed also use EVEX.POR when it
4234 * does. */
4235 hmR0SvmMergeMsrpmNested(pHostCpu, pVCpu);
4236
4237 /* Update the nested-guest VMCB with the newly merged MSRPM (clean bits updated below). */
4238 pVmcb->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm;
4239 pbMsrBitmap = (uint8_t *)pHostCpu->n.svm.pvNstGstMsrpm;
4240 }
4241#else
4242 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
4243#endif
4244
4245 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
4246 /* Flush the appropriate tagged-TLB entries. */
4247 hmR0SvmFlushTaggedTlb(pHostCpu, pVCpu, pVmcb);
4248 Assert(pVCpu->hmr0.s.idLastCpu == idHostCpu);
4249
4250 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
4251
4252 TMNotifyStartOfExecution(pVM, pVCpu); /* Finally, notify TM to resume its clocks as we're about
4253 to start executing. */
4254
4255 /*
4256 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that RDTSCPs
4257 * (that don't cause exits) reads the guest MSR, see @bugref{3324}.
4258 *
4259 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
4260 */
4261 if ( pVM->cpum.ro.HostFeatures.fRdTscP
4262 && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
4263 {
4264 uint64_t const uGuestTscAux = CPUMGetGuestTscAux(pVCpu);
4265 pVCpu->hmr0.s.svm.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
4266 if (uGuestTscAux != pVCpu->hmr0.s.svm.u64HostTscAux)
4267 ASMWrMsr(MSR_K8_TSC_AUX, uGuestTscAux);
4268 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
4269 pSvmTransient->fRestoreTscAuxMsr = true;
4270 }
4271 else
4272 {
4273 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
4274 pSvmTransient->fRestoreTscAuxMsr = false;
4275 }
4276 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
4277
4278 /*
4279 * If VMCB Clean bits isn't supported by the CPU or exposed to the guest in the nested
4280 * virtualization case, mark all state-bits as dirty indicating to the CPU to re-load
4281 * from the VMCB.
4282 */
4283 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pSvmTransient->fIsNestedGuest);
4284 if (!fSupportsVmcbCleanBits)
4285 pVmcb->ctrl.u32VmcbCleanBits = 0;
4286}
4287
4288
4289/**
4290 * Wrapper for running the guest (or nested-guest) code in AMD-V.
4291 *
4292 * @returns VBox strict status code.
4293 * @param pVCpu The cross context virtual CPU structure.
4294 * @param HCPhysVmcb The host physical address of the VMCB.
4295 *
4296 * @remarks No-long-jump zone!!!
4297 */
4298DECLINLINE(int) hmR0SvmRunGuest(PVMCPUCC pVCpu, RTHCPHYS HCPhysVmcb)
4299{
4300 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
4301 pVCpu->cpum.GstCtx.fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
4302 return pVCpu->hmr0.s.svm.pfnVMRun(pVCpu->CTX_SUFF(pVM), pVCpu, HCPhysVmcb);
4303}
4304
4305
4306/**
4307 * Performs some essential restoration of state after running guest (or
4308 * nested-guest) code in AMD-V.
4309 *
4310 * @param pVCpu The cross context virtual CPU structure.
4311 * @param pSvmTransient Pointer to the SVM transient structure.
4312 * @param rcVMRun Return code of VMRUN.
4313 *
4314 * @remarks Called with interrupts disabled.
4315 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
4316 * unconditionally when it is safe to do so.
4317 */
4318static void hmR0SvmPostRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, VBOXSTRICTRC rcVMRun)
4319{
4320 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4321
4322 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
4323 ASMAtomicIncU32(&pVCpu->hmr0.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
4324
4325 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
4326 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
4327
4328 /* TSC read must be done early for maximum accuracy. */
4329 if (!(pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
4330 {
4331 if (!pSvmTransient->fIsNestedGuest)
4332 TMCpuTickSetLastSeen(pVCpu, pVCpu->hmr0.s.uTscExit + pVmcbCtrl->u64TSCOffset);
4333#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4334 else
4335 {
4336 /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMNotifySvmNstGstVmexit(). */
4337 uint64_t const uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, pVCpu->hmr0.s.uTscExit + pVmcbCtrl->u64TSCOffset);
4338 TMCpuTickSetLastSeen(pVCpu, uGstTsc);
4339 }
4340#endif
4341 }
4342
4343 if (pSvmTransient->fRestoreTscAuxMsr)
4344 {
4345 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
4346 CPUMSetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
4347 if (u64GuestTscAuxMsr != pVCpu->hmr0.s.svm.u64HostTscAux)
4348 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hmr0.s.svm.u64HostTscAux);
4349 }
4350
4351 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
4352 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4353 TMNotifyEndOfExecution(pVM, pVCpu, pVCpu->hmr0.s.uTscExit); /* Notify TM that the guest is no longer running. */
4354 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4355
4356 Assert(!(ASMGetFlags() & X86_EFL_IF));
4357 ASMSetFlags(pSvmTransient->fEFlags); /* Enable interrupts. */
4358 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
4359
4360 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
4361 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
4362 {
4363 Log4Func(("VMRUN failure: rcVMRun=%Rrc\n", VBOXSTRICTRC_VAL(rcVMRun)));
4364 return;
4365 }
4366
4367 pSvmTransient->u64ExitCode = pVmcbCtrl->u64ExitCode; /* Save the #VMEXIT reason. */
4368 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
4369 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
4370 pVmcbCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
4371
4372#ifdef HMSVM_SYNC_FULL_GUEST_STATE
4373 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4374 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
4375#else
4376 /*
4377 * Always import the following:
4378 *
4379 * - RIP for exit optimizations and evaluating event injection on re-entry.
4380 * - RFLAGS for evaluating event injection on VM re-entry and for exporting shared debug
4381 * state on preemption.
4382 * - Interrupt shadow, GIF for evaluating event injection on VM re-entry.
4383 * - CS for exit optimizations.
4384 * - RAX, RSP for simplifying assumptions on GPRs. All other GPRs are swapped by the
4385 * assembly switcher code.
4386 * - Shared state (only DR7 currently) for exporting shared debug state on preemption.
4387 */
4388 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP
4389 | CPUMCTX_EXTRN_RFLAGS
4390 | CPUMCTX_EXTRN_RAX
4391 | CPUMCTX_EXTRN_RSP
4392 | CPUMCTX_EXTRN_CS
4393 | CPUMCTX_EXTRN_HWVIRT
4394 | CPUMCTX_EXTRN_INHIBIT_INT
4395 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ
4396 | HMSVM_CPUMCTX_SHARED_STATE);
4397#endif
4398
4399 if ( pSvmTransient->u64ExitCode != SVM_EXIT_INVALID
4400 && pVCpu->hmr0.s.svm.fSyncVTpr)
4401 {
4402 Assert(!pSvmTransient->fIsNestedGuest);
4403 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
4404 if ( pVM->hm.s.fTprPatchingActive
4405 && (pVmcb->guest.u64LSTAR & 0xff) != pSvmTransient->u8GuestTpr)
4406 {
4407 int rc = APICSetTpr(pVCpu, pVmcb->guest.u64LSTAR & 0xff);
4408 AssertRC(rc);
4409 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
4410 }
4411 /* Sync TPR when we aren't intercepting CR8 writes. */
4412 else if (pSvmTransient->u8GuestTpr != pVmcbCtrl->IntCtrl.n.u8VTPR)
4413 {
4414 int rc = APICSetTpr(pVCpu, pVmcbCtrl->IntCtrl.n.u8VTPR << 4);
4415 AssertRC(rc);
4416 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
4417 }
4418 }
4419
4420#ifdef DEBUG_ramshankar
4421 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
4422 {
4423 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4424 hmR0SvmLogState(pVCpu, pVmcb, pVCpu->cpum.GstCtx, "hmR0SvmPostRunGuestNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR,
4425 0 /* uVerbose */);
4426 }
4427#endif
4428
4429 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
4430 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_SVM, pSvmTransient->u64ExitCode & EMEXIT_F_TYPE_MASK),
4431 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, pVCpu->hmr0.s.uTscExit);
4432}
4433
4434
4435/**
4436 * Runs the guest code using AMD-V.
4437 *
4438 * @returns Strict VBox status code.
4439 * @param pVCpu The cross context virtual CPU structure.
4440 * @param pcLoops Pointer to the number of executed loops.
4441 */
4442static VBOXSTRICTRC hmR0SvmRunGuestCodeNormal(PVMCPUCC pVCpu, uint32_t *pcLoops)
4443{
4444 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
4445 Assert(pcLoops);
4446 Assert(*pcLoops <= cMaxResumeLoops);
4447
4448 SVMTRANSIENT SvmTransient;
4449 RT_ZERO(SvmTransient);
4450 SvmTransient.fUpdateTscOffsetting = true;
4451 SvmTransient.pVmcb = pVCpu->hmr0.s.svm.pVmcb;
4452
4453 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_5;
4454 for (;;)
4455 {
4456 Assert(!HMR0SuspendPending());
4457 HMSVM_ASSERT_CPU_SAFE(pVCpu);
4458
4459 /* Preparatory work for running nested-guest code, this may force us to return to
4460 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4461 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4462 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
4463 if (rc != VINF_SUCCESS)
4464 break;
4465
4466 /*
4467 * No longjmps to ring-3 from this point on!!!
4468 *
4469 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4470 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4471 */
4472 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
4473 rc = hmR0SvmRunGuest(pVCpu, pVCpu->hmr0.s.svm.HCPhysVmcb);
4474
4475 /* Restore any residual host-state and save any bits shared between host and guest
4476 into the guest-CPU state. Re-enables interrupts! */
4477 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
4478
4479 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
4480 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
4481 {
4482 if (rc == VINF_SUCCESS)
4483 rc = VERR_SVM_INVALID_GUEST_STATE;
4484 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
4485 hmR0SvmReportWorldSwitchError(pVCpu, VBOXSTRICTRC_VAL(rc));
4486 break;
4487 }
4488
4489 /* Handle the #VMEXIT. */
4490 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4491 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
4492 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, SvmTransient.u64ExitCode, pVCpu->hmr0.s.svm.pVmcb);
4493 rc = hmR0SvmHandleExit(pVCpu, &SvmTransient);
4494 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
4495 if (rc != VINF_SUCCESS)
4496 break;
4497 if (++(*pcLoops) >= cMaxResumeLoops)
4498 {
4499 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4500 rc = VINF_EM_RAW_INTERRUPT;
4501 break;
4502 }
4503 }
4504
4505 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4506 return rc;
4507}
4508
4509
4510#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4511/**
4512 * Runs the nested-guest code using AMD-V.
4513 *
4514 * @returns Strict VBox status code.
4515 * @param pVCpu The cross context virtual CPU structure.
4516 * @param pcLoops Pointer to the number of executed loops. If we're switching
4517 * from the guest-code execution loop to this nested-guest
4518 * execution loop pass the remainder value, else pass 0.
4519 */
4520static VBOXSTRICTRC hmR0SvmRunGuestCodeNested(PVMCPUCC pVCpu, uint32_t *pcLoops)
4521{
4522 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4523 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
4524 Assert(pcLoops);
4525 Assert(*pcLoops <= pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops);
4526 /** @todo r=bird: Sharing this with ring-3 isn't safe in the long run, I fear... */
4527 RTHCPHYS const HCPhysVmcb = GVMMR0ConvertGVMPtr2HCPhys(pVCpu->pGVM, &pCtx->hwvirt.svm.Vmcb);
4528
4529 SVMTRANSIENT SvmTransient;
4530 RT_ZERO(SvmTransient);
4531 SvmTransient.fUpdateTscOffsetting = true;
4532 SvmTransient.pVmcb = &pCtx->hwvirt.svm.Vmcb;
4533 SvmTransient.fIsNestedGuest = true;
4534
4535 /* Setup pointer so PGM/IEM can query #VMEXIT auxiliary info. on demand in ring-0. */
4536 pVCpu->hmr0.s.svm.pSvmTransient = &SvmTransient;
4537
4538 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_4;
4539 for (;;)
4540 {
4541 Assert(!HMR0SuspendPending());
4542 HMSVM_ASSERT_CPU_SAFE(pVCpu);
4543
4544 /* Preparatory work for running nested-guest code, this may force us to return to
4545 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4546 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4547 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
4548 if ( rc != VINF_SUCCESS
4549 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
4550 break;
4551
4552 /*
4553 * No longjmps to ring-3 from this point on!!!
4554 *
4555 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4556 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4557 */
4558 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
4559
4560 rc = hmR0SvmRunGuest(pVCpu, HCPhysVmcb);
4561
4562 /* Restore any residual host-state and save any bits shared between host and guest
4563 into the guest-CPU state. Re-enables interrupts! */
4564 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
4565
4566 if (RT_LIKELY( rc == VINF_SUCCESS
4567 && SvmTransient.u64ExitCode != SVM_EXIT_INVALID))
4568 { /* extremely likely */ }
4569 else
4570 {
4571 /* VMRUN failed, shouldn't really happen, Guru. */
4572 if (rc != VINF_SUCCESS)
4573 break;
4574
4575 /* Invalid nested-guest state. Cause a #VMEXIT but assert on strict builds. */
4576 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4577 AssertMsgFailed(("Invalid nested-guest state. rc=%Rrc u64ExitCode=%#RX64\n", rc, SvmTransient.u64ExitCode));
4578 rc = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0, 0);
4579 break;
4580 }
4581
4582 /* Handle the #VMEXIT. */
4583 HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4584 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
4585 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, &pCtx->hwvirt.svm.Vmcb);
4586 rc = hmR0SvmHandleExitNested(pVCpu, &SvmTransient);
4587 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
4588 if (rc == VINF_SUCCESS)
4589 {
4590 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
4591 {
4592 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
4593 rc = VINF_SVM_VMEXIT;
4594 }
4595 else
4596 {
4597 if (++(*pcLoops) <= pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops)
4598 continue;
4599 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4600 rc = VINF_EM_RAW_INTERRUPT;
4601 }
4602 }
4603 else
4604 Assert(rc != VINF_SVM_VMEXIT);
4605 break;
4606 /** @todo NSTSVM: handle single-stepping. */
4607 }
4608
4609 /* Ensure #VMEXIT auxiliary info. is no longer available. */
4610 pVCpu->hmr0.s.svm.pSvmTransient = NULL;
4611
4612 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4613 return rc;
4614}
4615#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
4616
4617
4618/**
4619 * Checks if any expensive dtrace probes are enabled and we should go to the
4620 * debug loop.
4621 *
4622 * @returns true if we should use debug loop, false if not.
4623 */
4624static bool hmR0SvmAnyExpensiveProbesEnabled(void)
4625{
4626 /* It's probably faster to OR the raw 32-bit counter variables together.
4627 Since the variables are in an array and the probes are next to one
4628 another (more or less), we have good locality. So, better read
4629 eight-nine cache lines ever time and only have one conditional, than
4630 128+ conditionals, right? */
4631 return ( VBOXVMM_R0_HMSVM_VMEXIT_ENABLED_RAW() /* expensive too due to context */
4632 | VBOXVMM_XCPT_DE_ENABLED_RAW()
4633 | VBOXVMM_XCPT_DB_ENABLED_RAW()
4634 | VBOXVMM_XCPT_BP_ENABLED_RAW()
4635 | VBOXVMM_XCPT_OF_ENABLED_RAW()
4636 | VBOXVMM_XCPT_BR_ENABLED_RAW()
4637 | VBOXVMM_XCPT_UD_ENABLED_RAW()
4638 | VBOXVMM_XCPT_NM_ENABLED_RAW()
4639 | VBOXVMM_XCPT_DF_ENABLED_RAW()
4640 | VBOXVMM_XCPT_TS_ENABLED_RAW()
4641 | VBOXVMM_XCPT_NP_ENABLED_RAW()
4642 | VBOXVMM_XCPT_SS_ENABLED_RAW()
4643 | VBOXVMM_XCPT_GP_ENABLED_RAW()
4644 | VBOXVMM_XCPT_PF_ENABLED_RAW()
4645 | VBOXVMM_XCPT_MF_ENABLED_RAW()
4646 | VBOXVMM_XCPT_AC_ENABLED_RAW()
4647 | VBOXVMM_XCPT_XF_ENABLED_RAW()
4648 | VBOXVMM_XCPT_VE_ENABLED_RAW()
4649 | VBOXVMM_XCPT_SX_ENABLED_RAW()
4650 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
4651 | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
4652 ) != 0
4653 || ( VBOXVMM_INSTR_HALT_ENABLED_RAW()
4654 | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
4655 | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
4656 | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
4657 | VBOXVMM_INSTR_INVD_ENABLED_RAW()
4658 | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
4659 | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
4660 | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
4661 | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
4662 | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
4663 | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
4664 | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
4665 | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
4666 | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
4667 | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
4668 | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
4669 | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
4670 | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
4671 | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
4672 | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
4673 | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
4674 | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
4675 | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
4676 | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
4677 | VBOXVMM_INSTR_STR_ENABLED_RAW()
4678 | VBOXVMM_INSTR_LTR_ENABLED_RAW()
4679 //| VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
4680 | VBOXVMM_INSTR_RSM_ENABLED_RAW()
4681 //| VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
4682 //| VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
4683 //| VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
4684 //| VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
4685 | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
4686 | VBOXVMM_INSTR_SVM_VMRUN_ENABLED_RAW()
4687 | VBOXVMM_INSTR_SVM_VMLOAD_ENABLED_RAW()
4688 | VBOXVMM_INSTR_SVM_VMSAVE_ENABLED_RAW()
4689 | VBOXVMM_INSTR_SVM_STGI_ENABLED_RAW()
4690 | VBOXVMM_INSTR_SVM_CLGI_ENABLED_RAW()
4691 ) != 0
4692 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
4693 | VBOXVMM_EXIT_HALT_ENABLED_RAW()
4694 | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
4695 | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
4696 | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
4697 | VBOXVMM_EXIT_INVD_ENABLED_RAW()
4698 | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
4699 | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
4700 | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
4701 | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
4702 | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
4703 | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
4704 | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
4705 | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
4706 | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
4707 | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
4708 | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
4709 | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
4710 | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
4711 | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
4712 | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
4713 | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
4714 | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
4715 | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
4716 | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
4717 | VBOXVMM_EXIT_STR_ENABLED_RAW()
4718 | VBOXVMM_EXIT_LTR_ENABLED_RAW()
4719 //| VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
4720 | VBOXVMM_EXIT_RSM_ENABLED_RAW()
4721 //| VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
4722 //| VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
4723 //| VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
4724 //| VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
4725 | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
4726 | VBOXVMM_EXIT_SVM_VMRUN_ENABLED_RAW()
4727 | VBOXVMM_EXIT_SVM_VMLOAD_ENABLED_RAW()
4728 | VBOXVMM_EXIT_SVM_VMSAVE_ENABLED_RAW()
4729 | VBOXVMM_EXIT_SVM_STGI_ENABLED_RAW()
4730 | VBOXVMM_EXIT_SVM_CLGI_ENABLED_RAW()
4731 ) != 0;
4732}
4733
4734
4735/**
4736 * Runs the guest code using AMD-V.
4737 *
4738 * @returns Strict VBox status code.
4739 * @param pVCpu The cross context virtual CPU structure.
4740 */
4741VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPUCC pVCpu)
4742{
4743 AssertPtr(pVCpu);
4744 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4745 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4746 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
4747 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
4748
4749 uint32_t cLoops = 0;
4750 VBOXSTRICTRC rc;
4751 for (;;)
4752 {
4753#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4754 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
4755#else
4756 NOREF(pCtx);
4757 bool const fInNestedGuestMode = false;
4758#endif
4759 if (!fInNestedGuestMode)
4760 {
4761 if ( !pVCpu->hm.s.fUseDebugLoop
4762 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0SvmAnyExpensiveProbesEnabled())
4763 && !DBGFIsStepping(pVCpu)
4764 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
4765 rc = hmR0SvmRunGuestCodeNormal(pVCpu, &cLoops);
4766 else
4767 rc = hmR0SvmRunGuestCodeDebug(pVCpu, &cLoops);
4768 }
4769#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4770 else
4771 rc = hmR0SvmRunGuestCodeNested(pVCpu, &cLoops);
4772
4773 if (rc == VINF_SVM_VMRUN)
4774 {
4775 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
4776 continue;
4777 }
4778 if (rc == VINF_SVM_VMEXIT)
4779 {
4780 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
4781 continue;
4782 }
4783#endif
4784 break;
4785 }
4786
4787 /* Fixup error codes. */
4788 if (rc == VERR_EM_INTERPRETER)
4789 rc = VINF_EM_RAW_EMULATE_INSTR;
4790 else if (rc == VINF_EM_RESET)
4791 rc = VINF_EM_TRIPLE_FAULT;
4792
4793 /* Prepare to return to ring-3. This will remove longjmp notifications. */
4794 rc = hmR0SvmExitToRing3(pVCpu, rc);
4795 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
4796 Assert(!VMMR0AssertionIsNotificationSet(pVCpu));
4797 return rc;
4798}
4799
4800#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4801
4802/**
4803 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
4804 *
4805 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
4806 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO.
4807 */
4808static bool hmR0SvmIsIoInterceptSet(void *pvIoBitmap, PSVMIOIOEXITINFO pIoExitInfo)
4809{
4810 const uint16_t u16Port = pIoExitInfo->n.u16Port;
4811 const SVMIOIOTYPE enmIoType = (SVMIOIOTYPE)pIoExitInfo->n.u1Type;
4812 const uint8_t cbReg = (pIoExitInfo->u >> SVM_IOIO_OP_SIZE_SHIFT) & 7;
4813 const uint8_t cAddrSizeBits = ((pIoExitInfo->u >> SVM_IOIO_ADDR_SIZE_SHIFT) & 7) << 4;
4814 const uint8_t iEffSeg = pIoExitInfo->n.u3Seg;
4815 const bool fRep = pIoExitInfo->n.u1Rep;
4816 const bool fStrIo = pIoExitInfo->n.u1Str;
4817
4818 return CPUMIsSvmIoInterceptSet(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo,
4819 NULL /* pIoExitInfo */);
4820}
4821
4822
4823/**
4824 * Handles a nested-guest \#VMEXIT (for all EXITCODE values except
4825 * SVM_EXIT_INVALID).
4826 *
4827 * @returns VBox status code (informational status codes included).
4828 * @param pVCpu The cross context virtual CPU structure.
4829 * @param pSvmTransient Pointer to the SVM transient structure.
4830 */
4831static VBOXSTRICTRC hmR0SvmHandleExitNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4832{
4833 HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
4834 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
4835 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
4836
4837 /*
4838 * We import the complete state here because we use separate VMCBs for the guest and the
4839 * nested-guest, and the guest's VMCB is used after the #VMEXIT. We can only save/restore
4840 * the #VMEXIT specific state if we used the same VMCB for both guest and nested-guest.
4841 */
4842#define NST_GST_VMEXIT_CALL_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
4843 do { \
4844 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \
4845 return IEMExecSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
4846 } while (0)
4847
4848 /*
4849 * For all the #VMEXITs here we primarily figure out if the #VMEXIT is expected by the
4850 * nested-guest. If it isn't, it should be handled by the (outer) guest.
4851 */
4852 PSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
4853 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4854 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
4855 uint64_t const uExitCode = pVmcbNstGstCtrl->u64ExitCode;
4856 uint64_t const uExitInfo1 = pVmcbNstGstCtrl->u64ExitInfo1;
4857 uint64_t const uExitInfo2 = pVmcbNstGstCtrl->u64ExitInfo2;
4858
4859 Assert(uExitCode == pVmcbNstGstCtrl->u64ExitCode);
4860 switch (uExitCode)
4861 {
4862 case SVM_EXIT_CPUID:
4863 {
4864 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CPUID))
4865 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4866 return hmR0SvmExitCpuid(pVCpu, pSvmTransient);
4867 }
4868
4869 case SVM_EXIT_RDTSC:
4870 {
4871 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSC))
4872 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4873 return hmR0SvmExitRdtsc(pVCpu, pSvmTransient);
4874 }
4875
4876 case SVM_EXIT_RDTSCP:
4877 {
4878 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSCP))
4879 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4880 return hmR0SvmExitRdtscp(pVCpu, pSvmTransient);
4881 }
4882
4883 case SVM_EXIT_MONITOR:
4884 {
4885 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MONITOR))
4886 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4887 return hmR0SvmExitMonitor(pVCpu, pSvmTransient);
4888 }
4889
4890 case SVM_EXIT_MWAIT:
4891 {
4892 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MWAIT))
4893 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4894 return hmR0SvmExitMwait(pVCpu, pSvmTransient);
4895 }
4896
4897 case SVM_EXIT_HLT:
4898 {
4899 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_HLT))
4900 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4901 return hmR0SvmExitHlt(pVCpu, pSvmTransient);
4902 }
4903
4904 case SVM_EXIT_MSR:
4905 {
4906 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MSR_PROT))
4907 {
4908 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
4909 uint16_t offMsrpm;
4910 uint8_t uMsrpmBit;
4911 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
4912 if (RT_SUCCESS(rc))
4913 {
4914 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
4915 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
4916
4917 uint8_t const * const pbMsrBitmap = &pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm];
4918 bool const fInterceptRead = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit));
4919 bool const fInterceptWrite = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1));
4920
4921 if ( (fInterceptWrite && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
4922 || (fInterceptRead && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ))
4923 {
4924 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4925 }
4926 }
4927 else
4928 {
4929 /*
4930 * MSRs not covered by the MSRPM automatically cause an #VMEXIT.
4931 * See AMD-V spec. "15.11 MSR Intercepts".
4932 */
4933 Assert(rc == VERR_OUT_OF_RANGE);
4934 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4935 }
4936 }
4937 return hmR0SvmExitMsr(pVCpu, pSvmTransient);
4938 }
4939
4940 case SVM_EXIT_IOIO:
4941 {
4942 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT))
4943 {
4944 SVMIOIOEXITINFO IoExitInfo;
4945 IoExitInfo.u = pVmcbNstGst->ctrl.u64ExitInfo1;
4946 bool const fIntercept = hmR0SvmIsIoInterceptSet(pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap, &IoExitInfo);
4947 if (fIntercept)
4948 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4949 }
4950 return hmR0SvmExitIOInstr(pVCpu, pSvmTransient);
4951 }
4952
4953 case SVM_EXIT_XCPT_PF:
4954 {
4955 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4956 if (pVM->hmr0.s.fNestedPaging)
4957 {
4958 uint32_t const u32ErrCode = pVmcbNstGstCtrl->u64ExitInfo1;
4959 uint64_t const uFaultAddress = pVmcbNstGstCtrl->u64ExitInfo2;
4960
4961 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
4962 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
4963 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, u32ErrCode, uFaultAddress);
4964
4965 /* If the nested-guest is not intercepting #PFs, forward the #PF to the guest. */
4966 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2);
4967 hmR0SvmSetPendingXcptPF(pVCpu, u32ErrCode, uFaultAddress);
4968 return VINF_SUCCESS;
4969 }
4970 return hmR0SvmExitXcptPF(pVCpu, pSvmTransient);
4971 }
4972
4973 case SVM_EXIT_XCPT_UD:
4974 {
4975 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_UD))
4976 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4977 hmR0SvmSetPendingXcptUD(pVCpu);
4978 return VINF_SUCCESS;
4979 }
4980
4981 case SVM_EXIT_XCPT_MF:
4982 {
4983 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_MF))
4984 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4985 return hmR0SvmExitXcptMF(pVCpu, pSvmTransient);
4986 }
4987
4988 case SVM_EXIT_XCPT_DB:
4989 {
4990 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_DB))
4991 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4992 return hmR0SvmNestedExitXcptDB(pVCpu, pSvmTransient);
4993 }
4994
4995 case SVM_EXIT_XCPT_AC:
4996 {
4997 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_AC))
4998 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4999 return hmR0SvmExitXcptAC(pVCpu, pSvmTransient);
5000 }
5001
5002 case SVM_EXIT_XCPT_BP:
5003 {
5004 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_BP))
5005 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5006 return hmR0SvmNestedExitXcptBP(pVCpu, pSvmTransient);
5007 }
5008
5009 case SVM_EXIT_READ_CR0:
5010 case SVM_EXIT_READ_CR3:
5011 case SVM_EXIT_READ_CR4:
5012 {
5013 uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0;
5014 if (CPUMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr))
5015 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5016 return hmR0SvmExitReadCRx(pVCpu, pSvmTransient);
5017 }
5018
5019 case SVM_EXIT_CR0_SEL_WRITE:
5020 {
5021 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5022 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5023 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
5024 }
5025
5026 case SVM_EXIT_WRITE_CR0:
5027 case SVM_EXIT_WRITE_CR3:
5028 case SVM_EXIT_WRITE_CR4:
5029 case SVM_EXIT_WRITE_CR8: /* CR8 writes would go to the V_TPR rather than here, since we run with V_INTR_MASKING. */
5030 {
5031 uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;
5032 Log4Func(("Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2));
5033
5034 if (CPUMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr))
5035 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5036 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
5037 }
5038
5039 case SVM_EXIT_PAUSE:
5040 {
5041 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
5042 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5043 return hmR0SvmExitPause(pVCpu, pSvmTransient);
5044 }
5045
5046 case SVM_EXIT_VINTR:
5047 {
5048 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
5049 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5050 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
5051 }
5052
5053 case SVM_EXIT_INTR:
5054 case SVM_EXIT_NMI:
5055 case SVM_EXIT_SMI:
5056 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */
5057 {
5058 /*
5059 * We shouldn't direct physical interrupts, NMIs, SMIs to the nested-guest.
5060 *
5061 * Although we don't intercept SMIs, the nested-guest might. Therefore, we might
5062 * get an SMI #VMEXIT here so simply ignore rather than causing a corresponding
5063 * nested-guest #VMEXIT.
5064 *
5065 * We shall import the complete state here as we may cause #VMEXITs from ring-3
5066 * while trying to inject interrupts, see comment at the top of this function.
5067 */
5068 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_ALL);
5069 return hmR0SvmExitIntr(pVCpu, pSvmTransient);
5070 }
5071
5072 case SVM_EXIT_FERR_FREEZE:
5073 {
5074 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_FERR_FREEZE))
5075 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5076 return hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient);
5077 }
5078
5079 case SVM_EXIT_INVLPG:
5080 {
5081 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPG))
5082 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5083 return hmR0SvmExitInvlpg(pVCpu, pSvmTransient);
5084 }
5085
5086 case SVM_EXIT_WBINVD:
5087 {
5088 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_WBINVD))
5089 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5090 return hmR0SvmExitWbinvd(pVCpu, pSvmTransient);
5091 }
5092
5093 case SVM_EXIT_INVD:
5094 {
5095 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVD))
5096 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5097 return hmR0SvmExitInvd(pVCpu, pSvmTransient);
5098 }
5099
5100 case SVM_EXIT_RDPMC:
5101 {
5102 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDPMC))
5103 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5104 return hmR0SvmExitRdpmc(pVCpu, pSvmTransient);
5105 }
5106
5107 default:
5108 {
5109 switch (uExitCode)
5110 {
5111 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5112 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
5113 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
5114 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5115 {
5116 uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0;
5117 if (CPUMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr))
5118 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5119 return hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
5120 }
5121
5122 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5123 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
5124 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
5125 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5126 {
5127 uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0;
5128 if (CPUMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr))
5129 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5130 return hmR0SvmExitWriteDRx(pVCpu, pSvmTransient);
5131 }
5132
5133 case SVM_EXIT_XCPT_DE:
5134 /* SVM_EXIT_XCPT_DB: */ /* Handled above. */
5135 /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */
5136 /* SVM_EXIT_XCPT_BP: */ /* Handled above. */
5137 case SVM_EXIT_XCPT_OF:
5138 case SVM_EXIT_XCPT_BR:
5139 /* SVM_EXIT_XCPT_UD: */ /* Handled above. */
5140 case SVM_EXIT_XCPT_NM:
5141 case SVM_EXIT_XCPT_DF:
5142 case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
5143 case SVM_EXIT_XCPT_TS:
5144 case SVM_EXIT_XCPT_NP:
5145 case SVM_EXIT_XCPT_SS:
5146 case SVM_EXIT_XCPT_GP:
5147 /* SVM_EXIT_XCPT_PF: */ /* Handled above. */
5148 case SVM_EXIT_XCPT_15: /* Reserved. */
5149 /* SVM_EXIT_XCPT_MF: */ /* Handled above. */
5150 /* SVM_EXIT_XCPT_AC: */ /* Handled above. */
5151 case SVM_EXIT_XCPT_MC:
5152 case SVM_EXIT_XCPT_XF:
5153 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
5154 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
5155 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
5156 {
5157 uint8_t const uVector = uExitCode - SVM_EXIT_XCPT_0;
5158 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector))
5159 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5160 return hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient);
5161 }
5162
5163 case SVM_EXIT_XSETBV:
5164 {
5165 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_XSETBV))
5166 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5167 return hmR0SvmExitXsetbv(pVCpu, pSvmTransient);
5168 }
5169
5170 case SVM_EXIT_TASK_SWITCH:
5171 {
5172 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_TASK_SWITCH))
5173 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5174 return hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient);
5175 }
5176
5177 case SVM_EXIT_IRET:
5178 {
5179 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IRET))
5180 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5181 return hmR0SvmExitIret(pVCpu, pSvmTransient);
5182 }
5183
5184 case SVM_EXIT_SHUTDOWN:
5185 {
5186 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN))
5187 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5188 return hmR0SvmExitShutdown(pVCpu, pSvmTransient);
5189 }
5190
5191 case SVM_EXIT_VMMCALL:
5192 {
5193 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMMCALL))
5194 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5195 return hmR0SvmExitVmmCall(pVCpu, pSvmTransient);
5196 }
5197
5198 case SVM_EXIT_CLGI:
5199 {
5200 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CLGI))
5201 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5202 return hmR0SvmExitClgi(pVCpu, pSvmTransient);
5203 }
5204
5205 case SVM_EXIT_STGI:
5206 {
5207 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_STGI))
5208 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5209 return hmR0SvmExitStgi(pVCpu, pSvmTransient);
5210 }
5211
5212 case SVM_EXIT_VMLOAD:
5213 {
5214 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMLOAD))
5215 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5216 return hmR0SvmExitVmload(pVCpu, pSvmTransient);
5217 }
5218
5219 case SVM_EXIT_VMSAVE:
5220 {
5221 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMSAVE))
5222 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5223 return hmR0SvmExitVmsave(pVCpu, pSvmTransient);
5224 }
5225
5226 case SVM_EXIT_INVLPGA:
5227 {
5228 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPGA))
5229 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5230 return hmR0SvmExitInvlpga(pVCpu, pSvmTransient);
5231 }
5232
5233 case SVM_EXIT_VMRUN:
5234 {
5235 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMRUN))
5236 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5237 return hmR0SvmExitVmrun(pVCpu, pSvmTransient);
5238 }
5239
5240 case SVM_EXIT_RSM:
5241 {
5242 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RSM))
5243 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5244 hmR0SvmSetPendingXcptUD(pVCpu);
5245 return VINF_SUCCESS;
5246 }
5247
5248 case SVM_EXIT_SKINIT:
5249 {
5250 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SKINIT))
5251 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5252 hmR0SvmSetPendingXcptUD(pVCpu);
5253 return VINF_SUCCESS;
5254 }
5255
5256 case SVM_EXIT_NPF:
5257 {
5258 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
5259 return hmR0SvmExitNestedPF(pVCpu, pSvmTransient);
5260 }
5261
5262 case SVM_EXIT_INIT: /* We shouldn't get INIT signals while executing a nested-guest. */
5263 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
5264
5265 default:
5266 {
5267 AssertMsgFailed(("hmR0SvmHandleExitNested: Unknown exit code %#x\n", pSvmTransient->u64ExitCode));
5268 pVCpu->hm.s.u32HMError = pSvmTransient->u64ExitCode;
5269 return VERR_SVM_UNKNOWN_EXIT;
5270 }
5271 }
5272 }
5273 }
5274 /* not reached */
5275
5276# undef NST_GST_VMEXIT_CALL_RET
5277}
5278
5279#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5280
5281/** @def VMEXIT_CALL_RET
5282 * Used by hmR0SvmHandleExit and hmR0SvmDebugHandleExit
5283 */
5284#ifdef DEBUG_ramshankar
5285# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) \
5286 do { \
5287 if ((a_fDbg) == 1) \
5288 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \
5289 int rc = a_CallExpr; \
5290 if ((a_fDbg) == 1) \
5291 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
5292 return rc; \
5293 } while (0)
5294#else
5295# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) return a_CallExpr
5296#endif
5297
5298/**
5299 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
5300 *
5301 * @returns Strict VBox status code (informational status codes included).
5302 * @param pVCpu The cross context virtual CPU structure.
5303 * @param pSvmTransient Pointer to the SVM transient structure.
5304 */
5305static VBOXSTRICTRC hmR0SvmHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5306{
5307 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
5308 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
5309
5310 /*
5311 * The ordering of the case labels is based on most-frequently-occurring #VMEXITs
5312 * for most guests under normal workloads (for some definition of "normal").
5313 */
5314 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
5315 switch (uExitCode)
5316 {
5317 case SVM_EXIT_NPF: VMEXIT_CALL_RET(0, hmR0SvmExitNestedPF(pVCpu, pSvmTransient));
5318 case SVM_EXIT_IOIO: VMEXIT_CALL_RET(0, hmR0SvmExitIOInstr(pVCpu, pSvmTransient));
5319 case SVM_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0SvmExitRdtsc(pVCpu, pSvmTransient));
5320 case SVM_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0SvmExitRdtscp(pVCpu, pSvmTransient));
5321 case SVM_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0SvmExitCpuid(pVCpu, pSvmTransient));
5322 case SVM_EXIT_XCPT_PF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptPF(pVCpu, pSvmTransient));
5323 case SVM_EXIT_MSR: VMEXIT_CALL_RET(0, hmR0SvmExitMsr(pVCpu, pSvmTransient));
5324 case SVM_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0SvmExitMonitor(pVCpu, pSvmTransient));
5325 case SVM_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0SvmExitMwait(pVCpu, pSvmTransient));
5326 case SVM_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0SvmExitHlt(pVCpu, pSvmTransient));
5327
5328 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */
5329 case SVM_EXIT_INTR:
5330 case SVM_EXIT_NMI: VMEXIT_CALL_RET(0, hmR0SvmExitIntr(pVCpu, pSvmTransient));
5331
5332 case SVM_EXIT_READ_CR0:
5333 case SVM_EXIT_READ_CR3:
5334 case SVM_EXIT_READ_CR4: VMEXIT_CALL_RET(0, hmR0SvmExitReadCRx(pVCpu, pSvmTransient));
5335
5336 case SVM_EXIT_CR0_SEL_WRITE:
5337 case SVM_EXIT_WRITE_CR0:
5338 case SVM_EXIT_WRITE_CR3:
5339 case SVM_EXIT_WRITE_CR4:
5340 case SVM_EXIT_WRITE_CR8: VMEXIT_CALL_RET(0, hmR0SvmExitWriteCRx(pVCpu, pSvmTransient));
5341
5342 case SVM_EXIT_VINTR: VMEXIT_CALL_RET(0, hmR0SvmExitVIntr(pVCpu, pSvmTransient));
5343 case SVM_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0SvmExitPause(pVCpu, pSvmTransient));
5344 case SVM_EXIT_VMMCALL: VMEXIT_CALL_RET(0, hmR0SvmExitVmmCall(pVCpu, pSvmTransient));
5345 case SVM_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpg(pVCpu, pSvmTransient));
5346 case SVM_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0SvmExitWbinvd(pVCpu, pSvmTransient));
5347 case SVM_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0SvmExitInvd(pVCpu, pSvmTransient));
5348 case SVM_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0SvmExitRdpmc(pVCpu, pSvmTransient));
5349 case SVM_EXIT_IRET: VMEXIT_CALL_RET(0, hmR0SvmExitIret(pVCpu, pSvmTransient));
5350 case SVM_EXIT_XCPT_UD: VMEXIT_CALL_RET(0, hmR0SvmExitXcptUD(pVCpu, pSvmTransient));
5351 case SVM_EXIT_XCPT_MF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptMF(pVCpu, pSvmTransient));
5352 case SVM_EXIT_XCPT_DB: VMEXIT_CALL_RET(0, hmR0SvmExitXcptDB(pVCpu, pSvmTransient));
5353 case SVM_EXIT_XCPT_AC: VMEXIT_CALL_RET(0, hmR0SvmExitXcptAC(pVCpu, pSvmTransient));
5354 case SVM_EXIT_XCPT_BP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptBP(pVCpu, pSvmTransient));
5355 case SVM_EXIT_XCPT_GP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptGP(pVCpu, pSvmTransient));
5356 case SVM_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0SvmExitXsetbv(pVCpu, pSvmTransient));
5357 case SVM_EXIT_FERR_FREEZE: VMEXIT_CALL_RET(0, hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient));
5358
5359 default:
5360 {
5361 switch (pSvmTransient->u64ExitCode)
5362 {
5363 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5364 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
5365 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
5366 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5367 VMEXIT_CALL_RET(0, hmR0SvmExitReadDRx(pVCpu, pSvmTransient));
5368
5369 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5370 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
5371 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
5372 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5373 VMEXIT_CALL_RET(0, hmR0SvmExitWriteDRx(pVCpu, pSvmTransient));
5374
5375 case SVM_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient));
5376 case SVM_EXIT_SHUTDOWN: VMEXIT_CALL_RET(0, hmR0SvmExitShutdown(pVCpu, pSvmTransient));
5377
5378 case SVM_EXIT_SMI:
5379 case SVM_EXIT_INIT:
5380 {
5381 /*
5382 * We don't intercept SMIs. As for INIT signals, it really shouldn't ever happen here.
5383 * If it ever does, we want to know about it so log the exit code and bail.
5384 */
5385 VMEXIT_CALL_RET(0, hmR0SvmExitUnexpected(pVCpu, pSvmTransient));
5386 }
5387
5388#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5389 case SVM_EXIT_CLGI: VMEXIT_CALL_RET(0, hmR0SvmExitClgi(pVCpu, pSvmTransient));
5390 case SVM_EXIT_STGI: VMEXIT_CALL_RET(0, hmR0SvmExitStgi(pVCpu, pSvmTransient));
5391 case SVM_EXIT_VMLOAD: VMEXIT_CALL_RET(0, hmR0SvmExitVmload(pVCpu, pSvmTransient));
5392 case SVM_EXIT_VMSAVE: VMEXIT_CALL_RET(0, hmR0SvmExitVmsave(pVCpu, pSvmTransient));
5393 case SVM_EXIT_INVLPGA: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpga(pVCpu, pSvmTransient));
5394 case SVM_EXIT_VMRUN: VMEXIT_CALL_RET(0, hmR0SvmExitVmrun(pVCpu, pSvmTransient));
5395#else
5396 case SVM_EXIT_CLGI:
5397 case SVM_EXIT_STGI:
5398 case SVM_EXIT_VMLOAD:
5399 case SVM_EXIT_VMSAVE:
5400 case SVM_EXIT_INVLPGA:
5401 case SVM_EXIT_VMRUN:
5402#endif
5403 case SVM_EXIT_RSM:
5404 case SVM_EXIT_SKINIT:
5405 {
5406 hmR0SvmSetPendingXcptUD(pVCpu);
5407 return VINF_SUCCESS;
5408 }
5409
5410 /*
5411 * The remaining should only be possible when debugging or dtracing.
5412 */
5413 case SVM_EXIT_XCPT_DE:
5414 /* SVM_EXIT_XCPT_DB: */ /* Handled above. */
5415 /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */
5416 /* SVM_EXIT_XCPT_BP: */ /* Handled above. */
5417 case SVM_EXIT_XCPT_OF:
5418 case SVM_EXIT_XCPT_BR:
5419 /* SVM_EXIT_XCPT_UD: */ /* Handled above. */
5420 case SVM_EXIT_XCPT_NM:
5421 case SVM_EXIT_XCPT_DF:
5422 case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
5423 case SVM_EXIT_XCPT_TS:
5424 case SVM_EXIT_XCPT_NP:
5425 case SVM_EXIT_XCPT_SS:
5426 /* SVM_EXIT_XCPT_GP: */ /* Handled above. */
5427 /* SVM_EXIT_XCPT_PF: */
5428 case SVM_EXIT_XCPT_15: /* Reserved. */
5429 /* SVM_EXIT_XCPT_MF: */ /* Handled above. */
5430 /* SVM_EXIT_XCPT_AC: */ /* Handled above. */
5431 case SVM_EXIT_XCPT_MC:
5432 case SVM_EXIT_XCPT_XF:
5433 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
5434 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
5435 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
5436 VMEXIT_CALL_RET(0, hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient));
5437
5438 case SVM_EXIT_SWINT: VMEXIT_CALL_RET(0, hmR0SvmExitSwInt(pVCpu, pSvmTransient));
5439 case SVM_EXIT_TR_READ: VMEXIT_CALL_RET(0, hmR0SvmExitTrRead(pVCpu, pSvmTransient));
5440 case SVM_EXIT_TR_WRITE: VMEXIT_CALL_RET(0, hmR0SvmExitTrWrite(pVCpu, pSvmTransient)); /* Also OS/2 TLB workaround. */
5441
5442 default:
5443 {
5444 AssertMsgFailed(("hmR0SvmHandleExit: Unknown exit code %#RX64\n", uExitCode));
5445 pVCpu->hm.s.u32HMError = uExitCode;
5446 return VERR_SVM_UNKNOWN_EXIT;
5447 }
5448 }
5449 }
5450 }
5451 /* not reached */
5452}
5453
5454
5455/** @name Execution loop for single stepping, DBGF events and expensive Dtrace probes.
5456 *
5457 * The following few functions and associated structure contains the bloat
5458 * necessary for providing detailed debug events and dtrace probes as well as
5459 * reliable host side single stepping. This works on the principle of
5460 * "subclassing" the normal execution loop and workers. We replace the loop
5461 * method completely and override selected helpers to add necessary adjustments
5462 * to their core operation.
5463 *
5464 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
5465 * any performance for debug and analysis features.
5466 *
5467 * @{
5468 */
5469
5470/**
5471 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
5472 * the debug run loop.
5473 */
5474typedef struct SVMRUNDBGSTATE
5475{
5476 /** The initial SVMVMCBCTRL::u64InterceptCtrl value (helps with restore). */
5477 uint64_t bmInterceptInitial;
5478 /** The initial SVMVMCBCTRL::u32InterceptXcpt value (helps with restore). */
5479 uint32_t bmXcptInitial;
5480 /** The initial SVMVMCBCTRL::u16InterceptRdCRx value (helps with restore). */
5481 uint16_t bmInterceptRdCRxInitial;
5482 /** The initial SVMVMCBCTRL::u16InterceptWrCRx value (helps with restore). */
5483 uint16_t bmInterceptWrCRxInitial;
5484 /** The initial SVMVMCBCTRL::u16InterceptRdDRx value (helps with restore). */
5485 uint16_t bmInterceptRdDRxInitial;
5486 /** The initial SVMVMCBCTRL::u16InterceptWrDRx value (helps with restore). */
5487 uint16_t bmInterceptWrDRxInitial;
5488
5489 /** Whether we've actually modified the intercept control qword. */
5490 bool fModifiedInterceptCtrl : 1;
5491 /** Whether we've actually modified the exception bitmap. */
5492 bool fModifiedXcptBitmap : 1;
5493 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptRdCRx. */
5494 bool fModifiedInterceptRdCRx : 1;
5495 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptWrCRx. */
5496 bool fModifiedInterceptWrCRx : 1;
5497 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptRdDRx. */
5498 bool fModifiedInterceptRdDRx : 1;
5499 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptWrDRx. */
5500 bool fModifiedInterceptWrDRx : 1;
5501
5502 /** The CS we started executing with. */
5503 uint16_t uCsStart;
5504 /** The RIP we started executing at. This is for detecting that we stepped. */
5505 uint64_t uRipStart;
5506
5507 /** The sequence number of the Dtrace provider settings the state was
5508 * configured against. */
5509 uint32_t uDtraceSettingsSeqNo;
5510 /** Extra stuff we need in SVMVMCBCTRL::u32InterceptXcpt. */
5511 uint32_t bmXcptExtra;
5512 /** Extra stuff we need in SVMVMCBCTRL::u64InterceptCtrl. */
5513 uint64_t bmInterceptExtra;
5514 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptRdCRx. */
5515 uint16_t bmInterceptRdCRxExtra;
5516 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptWrCRx. */
5517 uint16_t bmInterceptWrCRxExtra;
5518 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptRdDRx. */
5519 uint16_t bmInterceptRdDRxExtra;
5520 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptWrDRx. */
5521 uint16_t bmInterceptWrDRxExtra;
5522 /** VM-exits to check (one bit per VM-exit). */
5523 uint32_t bmExitsToCheck[33];
5524} SVMRUNDBGSTATE;
5525AssertCompileMemberSize(SVMRUNDBGSTATE, bmExitsToCheck, (SVM_EXIT_MAX + 1 + 31) / 32 * 4);
5526typedef SVMRUNDBGSTATE *PSVMRUNDBGSTATE;
5527
5528
5529/**
5530 * Initializes the SVMRUNDBGSTATE structure.
5531 *
5532 * @param pVCpu The cross context virtual CPU structure of the
5533 * calling EMT.
5534 * @param pSvmTransient The SVM-transient structure.
5535 * @param pDbgState The debug state to initialize.
5536 */
5537static void hmR0SvmRunDebugStateInit(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5538{
5539 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
5540 pDbgState->bmInterceptInitial = pVmcb->ctrl.u64InterceptCtrl;
5541 pDbgState->bmXcptInitial = pVmcb->ctrl.u32InterceptXcpt;
5542 pDbgState->bmInterceptRdCRxInitial = pVmcb->ctrl.u16InterceptRdCRx;
5543 pDbgState->bmInterceptWrCRxInitial = pVmcb->ctrl.u16InterceptWrCRx;
5544 pDbgState->bmInterceptRdDRxInitial = pVmcb->ctrl.u16InterceptRdDRx;
5545 pDbgState->bmInterceptWrDRxInitial = pVmcb->ctrl.u16InterceptWrDRx;
5546
5547 pDbgState->fModifiedInterceptCtrl = false;
5548 pDbgState->fModifiedXcptBitmap = false;
5549 pDbgState->fModifiedInterceptRdCRx = false;
5550 pDbgState->fModifiedInterceptWrCRx = false;
5551 pDbgState->fModifiedInterceptRdDRx = false;
5552 pDbgState->fModifiedInterceptWrDRx = false;
5553
5554 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
5555 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
5556
5557 /* We don't really need to zero these. */
5558 pDbgState->bmInterceptExtra = 0;
5559 pDbgState->bmXcptExtra = 0;
5560 pDbgState->bmInterceptRdCRxExtra = 0;
5561 pDbgState->bmInterceptWrCRxExtra = 0;
5562 pDbgState->bmInterceptRdDRxExtra = 0;
5563 pDbgState->bmInterceptWrDRxExtra = 0;
5564}
5565
5566
5567/**
5568 * Updates the VMCB fields with changes requested by @a pDbgState.
5569 *
5570 * This is performed after hmR0SvmPreRunGuestDebugStateUpdate as well
5571 * immediately before executing guest code, i.e. when interrupts are disabled.
5572 * We don't check status codes here as we cannot easily assert or return in the
5573 * latter case.
5574 *
5575 * @param pSvmTransient The SVM-transient structure.
5576 * @param pDbgState The debug state.
5577 */
5578static void hmR0SvmPreRunGuestDebugStateApply(PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5579{
5580 /*
5581 * Ensure desired flags in VMCS control fields are set.
5582 */
5583 PSVMVMCB const pVmcb = pSvmTransient->pVmcb;
5584#define ADD_EXTRA_INTERCEPTS(a_VmcbCtrlField, a_bmExtra, a_fModified) do { \
5585 if ((pVmcb->ctrl. a_VmcbCtrlField & (a_bmExtra)) != (a_bmExtra)) \
5586 { \
5587 pVmcb->ctrl. a_VmcbCtrlField |= (a_bmExtra); \
5588 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; \
5589 Log6Func((#a_VmcbCtrlField ": %#RX64\n", pVmcb->ctrl. a_VmcbCtrlField)); \
5590 (a_fModified) = true; \
5591 } \
5592 } while (0)
5593 ADD_EXTRA_INTERCEPTS(u64InterceptCtrl, pDbgState->bmInterceptExtra, pDbgState->fModifiedInterceptCtrl);
5594 ADD_EXTRA_INTERCEPTS(u32InterceptXcpt, pDbgState->bmXcptExtra, pDbgState->fModifiedXcptBitmap);
5595 ADD_EXTRA_INTERCEPTS(u16InterceptRdCRx, pDbgState->bmInterceptRdCRxExtra, pDbgState->fModifiedInterceptRdCRx);
5596 ADD_EXTRA_INTERCEPTS(u16InterceptWrCRx, pDbgState->bmInterceptWrCRxExtra, pDbgState->fModifiedInterceptWrCRx);
5597 ADD_EXTRA_INTERCEPTS(u16InterceptRdDRx, pDbgState->bmInterceptRdDRxExtra, pDbgState->fModifiedInterceptRdDRx);
5598 ADD_EXTRA_INTERCEPTS(u16InterceptWrDRx, pDbgState->bmInterceptWrDRxExtra, pDbgState->fModifiedInterceptWrDRx);
5599#undef ADD_EXTRA_INTERCEPTS
5600}
5601
5602
5603/**
5604 * Restores VMCB fields that were changed by hmR0SvmPreRunGuestDebugStateApply
5605 * for re-entry next time around.
5606 *
5607 * @returns Strict VBox status code (i.e. informational status codes too).
5608 * @param pSvmTransient The SVM-transient structure.
5609 * @param pDbgState The debug state.
5610 */
5611static void hmR0SvmRunDebugStateRevert(PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5612{
5613 /*
5614 * Restore VM-exit control settings as we may not reenter this function the
5615 * next time around.
5616 */
5617 PSVMVMCB const pVmcb = pSvmTransient->pVmcb;
5618
5619#define RESTORE_INTERCEPTS(a_VmcbCtrlField, a_bmInitial, a_fModified) do { \
5620 if ((a_fModified)) \
5621 { \
5622 pVmcb->ctrl. a_VmcbCtrlField = (a_bmInitial); \
5623 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; \
5624 } \
5625 } while (0)
5626 RESTORE_INTERCEPTS(u64InterceptCtrl, pDbgState->bmInterceptInitial, pDbgState->fModifiedInterceptCtrl);
5627 RESTORE_INTERCEPTS(u32InterceptXcpt, pDbgState->bmXcptInitial, pDbgState->fModifiedXcptBitmap);
5628 RESTORE_INTERCEPTS(u16InterceptRdCRx, pDbgState->bmInterceptRdCRxInitial, pDbgState->fModifiedInterceptRdCRx);
5629 RESTORE_INTERCEPTS(u16InterceptWrCRx, pDbgState->bmInterceptWrCRxInitial, pDbgState->fModifiedInterceptWrCRx);
5630 RESTORE_INTERCEPTS(u16InterceptRdDRx, pDbgState->bmInterceptRdDRxInitial, pDbgState->fModifiedInterceptRdDRx);
5631 RESTORE_INTERCEPTS(u16InterceptWrDRx, pDbgState->bmInterceptWrDRxInitial, pDbgState->fModifiedInterceptWrDRx);
5632#undef RESTORE_INTERCEPTS
5633}
5634
5635
5636/**
5637 * Configures VM-exit controls for current DBGF and DTrace settings.
5638 *
5639 * This updates @a pDbgState and the VMCB execution control fields (in the debug
5640 * state) to reflect the necessary VM-exits demanded by DBGF and DTrace.
5641 *
5642 * @param pVCpu The cross context virtual CPU structure.
5643 * @param pSvmTransient The SVM-transient structure. May update
5644 * fUpdatedTscOffsettingAndPreemptTimer.
5645 * @param pDbgState The debug state.
5646 */
5647static void hmR0SvmPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5648{
5649 /*
5650 * Take down the dtrace serial number so we can spot changes.
5651 */
5652 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
5653 ASMCompilerBarrier();
5654
5655 /*
5656 * Clear data members that we'll be rebuilding here.
5657 */
5658 pDbgState->bmXcptExtra = 0;
5659 pDbgState->bmInterceptExtra = 0;
5660 pDbgState->bmInterceptRdCRxExtra = 0;
5661 pDbgState->bmInterceptWrCRxExtra = 0;
5662 pDbgState->bmInterceptRdDRxExtra = 0;
5663 pDbgState->bmInterceptWrDRxExtra = 0;
5664 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
5665 pDbgState->bmExitsToCheck[i] = 0;
5666
5667 /*
5668 * Software interrupts (INT XXh)
5669 */
5670 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5671 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
5672 || VBOXVMM_INT_SOFTWARE_ENABLED())
5673 {
5674 pDbgState->bmInterceptExtra |= SVM_CTRL_INTERCEPT_INTN;
5675 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_SWINT);
5676 }
5677
5678 /*
5679 * INT3 breakpoints - triggered by #BP exceptions.
5680 */
5681 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
5682 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
5683
5684 /*
5685 * Exception bitmap and XCPT events+probes.
5686 */
5687#define SET_XCPT(a_iXcpt) do { \
5688 pDbgState->bmXcptExtra |= RT_BIT_32(a_iXcpt); \
5689 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_XCPT_0 + (a_iXcpt)); \
5690 } while (0)
5691
5692 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
5693 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
5694 SET_XCPT(iXcpt);
5695
5696 if (VBOXVMM_XCPT_DE_ENABLED()) SET_XCPT(X86_XCPT_DE);
5697 if (VBOXVMM_XCPT_DB_ENABLED()) SET_XCPT(X86_XCPT_DB);
5698 if (VBOXVMM_XCPT_BP_ENABLED()) SET_XCPT(X86_XCPT_BP);
5699 if (VBOXVMM_XCPT_OF_ENABLED()) SET_XCPT(X86_XCPT_OF);
5700 if (VBOXVMM_XCPT_BR_ENABLED()) SET_XCPT(X86_XCPT_BR);
5701 if (VBOXVMM_XCPT_UD_ENABLED()) SET_XCPT(X86_XCPT_UD);
5702 if (VBOXVMM_XCPT_NM_ENABLED()) SET_XCPT(X86_XCPT_NM);
5703 if (VBOXVMM_XCPT_DF_ENABLED()) SET_XCPT(X86_XCPT_DF);
5704 if (VBOXVMM_XCPT_TS_ENABLED()) SET_XCPT(X86_XCPT_TS);
5705 if (VBOXVMM_XCPT_NP_ENABLED()) SET_XCPT(X86_XCPT_NP);
5706 if (VBOXVMM_XCPT_SS_ENABLED()) SET_XCPT(X86_XCPT_SS);
5707 if (VBOXVMM_XCPT_GP_ENABLED()) SET_XCPT(X86_XCPT_GP);
5708 if (VBOXVMM_XCPT_PF_ENABLED()) SET_XCPT(X86_XCPT_PF);
5709 if (VBOXVMM_XCPT_MF_ENABLED()) SET_XCPT(X86_XCPT_MF);
5710 if (VBOXVMM_XCPT_AC_ENABLED()) SET_XCPT(X86_XCPT_AC);
5711 if (VBOXVMM_XCPT_XF_ENABLED()) SET_XCPT(X86_XCPT_XF);
5712 if (VBOXVMM_XCPT_VE_ENABLED()) SET_XCPT(X86_XCPT_VE);
5713 if (VBOXVMM_XCPT_SX_ENABLED()) SET_XCPT(X86_XCPT_SX);
5714
5715#undef SET_XCPT
5716
5717 /*
5718 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
5719 *
5720 * Note! This is the reverse of what hmR0SvmHandleExitDtraceEvents does.
5721 * So, when adding/changing/removing please don't forget to update it.
5722 *
5723 * Some of the macros are picking up local variables to save horizontal space,
5724 * (being able to see it in a table is the lesser evil here).
5725 */
5726#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
5727 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
5728 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
5729#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
5730 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
5731 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
5732 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
5733 } else do { } while (0)
5734#define SET_INCP_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fInterceptCtrl) \
5735 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
5736 { \
5737 (pDbgState)->bmInterceptExtra |= (a_fInterceptCtrl); \
5738 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
5739 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
5740 } else do { } while (0)
5741
5742 /** @todo double check these */
5743 /** @todo Check what more AMD-V specific we can intercept. */
5744 //SET_INCP_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, SVM_EXIT_TASK_SWITCH, SVM_CTRL_INTERCEPT_TASK_SWITCH);
5745 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, SVM_EXIT_TASK_SWITCH);
5746 SET_INCP_XBM_IF_EITHER_EN(INSTR_VMM_CALL, SVM_EXIT_VMMCALL, SVM_CTRL_INTERCEPT_VMMCALL);
5747 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, SVM_EXIT_VMMCALL);
5748 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMRUN, SVM_EXIT_VMRUN, SVM_CTRL_INTERCEPT_VMRUN);
5749 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMRUN, SVM_EXIT_VMRUN);
5750 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMLOAD, SVM_EXIT_VMLOAD, SVM_CTRL_INTERCEPT_VMLOAD);
5751 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMLOAD, SVM_EXIT_VMLOAD);
5752 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMSAVE, SVM_EXIT_VMSAVE, SVM_CTRL_INTERCEPT_VMSAVE);
5753 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMSAVE, SVM_EXIT_VMSAVE);
5754 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_STGI, SVM_EXIT_STGI, SVM_CTRL_INTERCEPT_STGI);
5755 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_STGI, SVM_EXIT_STGI);
5756 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_CLGI, SVM_EXIT_CLGI, SVM_CTRL_INTERCEPT_CLGI);
5757 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_CLGI, SVM_EXIT_CLGI);
5758
5759 SET_INCP_XBM_IF_EITHER_EN(INSTR_CPUID, SVM_EXIT_CPUID, SVM_CTRL_INTERCEPT_CPUID);
5760 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, SVM_EXIT_CPUID);
5761 SET_INCP_XBM_IF_EITHER_EN(INSTR_HALT, SVM_EXIT_HLT, SVM_CTRL_INTERCEPT_HLT);
5762 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, SVM_EXIT_HLT);
5763 SET_INCP_XBM_IF_EITHER_EN(INSTR_INVD, SVM_EXIT_INVD, SVM_CTRL_INTERCEPT_INVD);
5764 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, SVM_EXIT_INVD);
5765 SET_INCP_XBM_IF_EITHER_EN(INSTR_INVLPG, SVM_EXIT_INVLPG, SVM_CTRL_INTERCEPT_INVLPG);
5766 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, SVM_EXIT_INVLPG);
5767 SET_INCP_XBM_IF_EITHER_EN(INSTR_RDPMC, SVM_EXIT_RDPMC, SVM_CTRL_INTERCEPT_RDPMC);
5768 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, SVM_EXIT_RDPMC);
5769 SET_INCP_XBM_IF_EITHER_EN(INSTR_RDTSC, SVM_EXIT_RDTSC, SVM_CTRL_INTERCEPT_RDTSC);
5770 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, SVM_EXIT_RDTSC);
5771 SET_INCP_XBM_IF_EITHER_EN(INSTR_RDTSCP, SVM_EXIT_RDTSCP, SVM_CTRL_INTERCEPT_RDTSCP);
5772 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, SVM_EXIT_RDTSCP);
5773 SET_INCP_XBM_IF_EITHER_EN(INSTR_RSM, SVM_EXIT_RSM, SVM_CTRL_INTERCEPT_RSM);
5774 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, SVM_EXIT_RSM);
5775
5776 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
5777 pDbgState->bmInterceptRdCRxExtra = 0xffff;
5778 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ) || IS_EITHER_ENABLED(pVM, EXIT_CRX_READ))
5779 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_READ_CR0, SVM_EXIT_READ_CR15 + 1);
5780
5781 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
5782 pDbgState->bmInterceptWrCRxExtra = 0xffff;
5783 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE) || IS_EITHER_ENABLED(pVM, EXIT_CRX_WRITE))
5784 {
5785 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_WRITE_CR0, SVM_EXIT_WRITE_CR15 + 1);
5786 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_CR0_SEL_WRITE);
5787 }
5788
5789 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_READ))
5790 pDbgState->bmInterceptRdDRxExtra = 0xffff;
5791 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_READ) || IS_EITHER_ENABLED(pVM, EXIT_DRX_READ))
5792 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_READ_DR0, SVM_EXIT_READ_DR15 + 1);
5793
5794 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
5795 pDbgState->bmInterceptWrDRxExtra = 0xffff;
5796 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE) || IS_EITHER_ENABLED(pVM, EXIT_DRX_WRITE))
5797 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_WRITE_DR0, SVM_EXIT_WRITE_DR15 + 1);
5798
5799 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RDMSR, SVM_EXIT_MSR); /** @todo modify bitmap to intercept almost everything? (Clearing MSR_PROT just means no intercepts.) */
5800 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, SVM_EXIT_MSR);
5801 SET_ONLY_XBM_IF_EITHER_EN(INSTR_WRMSR, SVM_EXIT_MSR); /** @todo ditto */
5802 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, SVM_EXIT_MSR);
5803 SET_INCP_XBM_IF_EITHER_EN(INSTR_MWAIT, SVM_EXIT_MWAIT, SVM_CTRL_INTERCEPT_MWAIT);
5804 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, SVM_EXIT_MWAIT);
5805 if (ASMBitTest(pDbgState->bmExitsToCheck, SVM_EXIT_MWAIT))
5806 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_MWAIT_ARMED);
5807 SET_INCP_XBM_IF_EITHER_EN(INSTR_MONITOR, SVM_EXIT_MONITOR, SVM_CTRL_INTERCEPT_MONITOR);
5808 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, SVM_EXIT_MONITOR);
5809 SET_INCP_XBM_IF_EITHER_EN(INSTR_PAUSE, SVM_EXIT_PAUSE, SVM_CTRL_INTERCEPT_PAUSE);
5810 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, SVM_EXIT_PAUSE);
5811 SET_INCP_XBM_IF_EITHER_EN(INSTR_SIDT, SVM_EXIT_IDTR_READ, SVM_CTRL_INTERCEPT_IDTR_READS);
5812 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, SVM_EXIT_IDTR_READ);
5813 SET_INCP_XBM_IF_EITHER_EN(INSTR_LIDT, SVM_EXIT_IDTR_WRITE, SVM_CTRL_INTERCEPT_IDTR_WRITES);
5814 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, SVM_EXIT_IDTR_WRITE);
5815 SET_INCP_XBM_IF_EITHER_EN(INSTR_SGDT, SVM_EXIT_GDTR_READ, SVM_CTRL_INTERCEPT_GDTR_READS);
5816 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, SVM_EXIT_GDTR_READ);
5817 SET_INCP_XBM_IF_EITHER_EN(INSTR_LGDT, SVM_EXIT_GDTR_WRITE, SVM_CTRL_INTERCEPT_GDTR_WRITES);
5818 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, SVM_EXIT_GDTR_WRITE);
5819 SET_INCP_XBM_IF_EITHER_EN(INSTR_SLDT, SVM_EXIT_LDTR_READ, SVM_CTRL_INTERCEPT_LDTR_READS);
5820 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, SVM_EXIT_LDTR_READ);
5821 SET_INCP_XBM_IF_EITHER_EN(INSTR_LLDT, SVM_EXIT_LDTR_WRITE, SVM_CTRL_INTERCEPT_LDTR_WRITES);
5822 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, SVM_EXIT_LDTR_WRITE);
5823 SET_INCP_XBM_IF_EITHER_EN(INSTR_STR, SVM_EXIT_TR_READ, SVM_CTRL_INTERCEPT_TR_READS);
5824 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, SVM_EXIT_TR_READ);
5825 SET_INCP_XBM_IF_EITHER_EN(INSTR_LTR, SVM_EXIT_TR_WRITE, SVM_CTRL_INTERCEPT_TR_WRITES);
5826 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, SVM_EXIT_TR_WRITE);
5827 SET_INCP_XBM_IF_EITHER_EN(INSTR_WBINVD, SVM_EXIT_WBINVD, SVM_CTRL_INTERCEPT_WBINVD);
5828 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, SVM_EXIT_WBINVD);
5829 SET_INCP_XBM_IF_EITHER_EN(INSTR_XSETBV, SVM_EXIT_XSETBV, SVM_CTRL_INTERCEPT_XSETBV);
5830 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, SVM_EXIT_XSETBV);
5831
5832 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_TRIPLE_FAULT))
5833 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_SHUTDOWN);
5834
5835#undef IS_EITHER_ENABLED
5836#undef SET_ONLY_XBM_IF_EITHER_EN
5837#undef SET_INCP_XBM_IF_EITHER_EN
5838
5839 /*
5840 * Sanitize the control stuff.
5841 */
5842 /** @todo filter out unsupported stuff? */
5843 if ( pVCpu->hmr0.s.fDebugWantRdTscExit
5844 != RT_BOOL(pDbgState->bmInterceptExtra & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP)))
5845 {
5846 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
5847 /// @todo pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
5848 RT_NOREF(pSvmTransient);
5849 }
5850
5851 Log6(("HM: debug state: bmInterceptExtra=%#RX64 bmXcptExtra=%#RX32%s%s%s%s bmExitsToCheck=%08RX32'%08RX32'%08RX32'%08RX32'%08RX32\n",
5852 pDbgState->bmInterceptExtra, pDbgState->bmXcptExtra,
5853 pDbgState->bmInterceptRdCRxExtra ? " rd-cr" : "",
5854 pDbgState->bmInterceptWrCRxExtra ? " wr-cr" : "",
5855 pDbgState->bmInterceptRdDRxExtra ? " rd-dr" : "",
5856 pDbgState->bmInterceptWrDRxExtra ? " wr-dr" : "",
5857 pDbgState->bmExitsToCheck[0],
5858 pDbgState->bmExitsToCheck[1],
5859 pDbgState->bmExitsToCheck[2],
5860 pDbgState->bmExitsToCheck[3],
5861 pDbgState->bmExitsToCheck[4]));
5862}
5863
5864
5865/**
5866 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
5867 * appropriate.
5868 *
5869 * The caller has checked the VM-exit against the SVMRUNDBGSTATE::bmExitsToCheck
5870 * bitmap.
5871 *
5872 * @returns Strict VBox status code (i.e. informational status codes too).
5873 * @param pVCpu The cross context virtual CPU structure.
5874 * @param pSvmTransient The SVM-transient structure.
5875 * @param uExitCode The VM-exit code.
5876 *
5877 * @remarks The name of this function is displayed by dtrace, so keep it short
5878 * and to the point. No longer than 33 chars long, please.
5879 */
5880static VBOXSTRICTRC hmR0SvmHandleExitDtraceEvents(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, uint64_t uExitCode)
5881{
5882 /*
5883 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
5884 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
5885 *
5886 * Note! This is the reverse operation of what hmR0SvmPreRunGuestDebugStateUpdate
5887 * does. Must add/change/remove both places. Same ordering, please.
5888 *
5889 * Added/removed events must also be reflected in the next section
5890 * where we dispatch dtrace events.
5891 */
5892 bool fDtrace1 = false;
5893 bool fDtrace2 = false;
5894 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
5895 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
5896 uint64_t uEventArg = 0;
5897#define SET_XCPT(a_XcptName) \
5898 do { \
5899 enmEvent2 = RT_CONCAT(DBGFEVENT_XCPT_, a_XcptName); \
5900 fDtrace2 = RT_CONCAT3(VBOXVMM_XCPT_, a_XcptName, _ENABLED)(); \
5901 } while (0)
5902#define SET_EXIT(a_EventSubName) \
5903 do { \
5904 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
5905 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
5906 } while (0)
5907#define SET_BOTH(a_EventSubName) \
5908 do { \
5909 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
5910 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
5911 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
5912 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
5913 } while (0)
5914 switch (uExitCode)
5915 {
5916 case SVM_EXIT_SWINT:
5917 enmEvent2 = DBGFEVENT_INTERRUPT_SOFTWARE;
5918 fDtrace2 = VBOXVMM_INT_SOFTWARE_ENABLED();
5919 uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1;
5920 break;
5921
5922 case SVM_EXIT_XCPT_DE: SET_XCPT(DE); break;
5923 case SVM_EXIT_XCPT_DB: SET_XCPT(DB); break;
5924 case SVM_EXIT_XCPT_BP: SET_XCPT(BP); break;
5925 case SVM_EXIT_XCPT_OF: SET_XCPT(OF); break;
5926 case SVM_EXIT_XCPT_BR: SET_XCPT(BR); break;
5927 case SVM_EXIT_XCPT_UD: SET_XCPT(UD); break;
5928 case SVM_EXIT_XCPT_NM: SET_XCPT(NM); break;
5929 case SVM_EXIT_XCPT_DF: SET_XCPT(DF); break;
5930 case SVM_EXIT_XCPT_TS: SET_XCPT(TS); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5931 case SVM_EXIT_XCPT_NP: SET_XCPT(NP); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5932 case SVM_EXIT_XCPT_SS: SET_XCPT(SS); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5933 case SVM_EXIT_XCPT_GP: SET_XCPT(GP); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5934 case SVM_EXIT_XCPT_PF: SET_XCPT(PF); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5935 case SVM_EXIT_XCPT_MF: SET_XCPT(MF); break;
5936 case SVM_EXIT_XCPT_AC: SET_XCPT(AC); break;
5937 case SVM_EXIT_XCPT_XF: SET_XCPT(XF); break;
5938 case SVM_EXIT_XCPT_VE: SET_XCPT(VE); break;
5939 case SVM_EXIT_XCPT_SX: SET_XCPT(SX); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5940
5941 case SVM_EXIT_XCPT_2: enmEvent2 = DBGFEVENT_XCPT_02; break;
5942 case SVM_EXIT_XCPT_9: enmEvent2 = DBGFEVENT_XCPT_09; break;
5943 case SVM_EXIT_XCPT_15: enmEvent2 = DBGFEVENT_XCPT_0f; break;
5944 case SVM_EXIT_XCPT_18: enmEvent2 = DBGFEVENT_XCPT_MC; break;
5945 case SVM_EXIT_XCPT_21: enmEvent2 = DBGFEVENT_XCPT_15; break;
5946 case SVM_EXIT_XCPT_22: enmEvent2 = DBGFEVENT_XCPT_16; break;
5947 case SVM_EXIT_XCPT_23: enmEvent2 = DBGFEVENT_XCPT_17; break;
5948 case SVM_EXIT_XCPT_24: enmEvent2 = DBGFEVENT_XCPT_18; break;
5949 case SVM_EXIT_XCPT_25: enmEvent2 = DBGFEVENT_XCPT_19; break;
5950 case SVM_EXIT_XCPT_26: enmEvent2 = DBGFEVENT_XCPT_1a; break;
5951 case SVM_EXIT_XCPT_27: enmEvent2 = DBGFEVENT_XCPT_1b; break;
5952 case SVM_EXIT_XCPT_28: enmEvent2 = DBGFEVENT_XCPT_1c; break;
5953 case SVM_EXIT_XCPT_29: enmEvent2 = DBGFEVENT_XCPT_1d; break;
5954 case SVM_EXIT_XCPT_31: enmEvent2 = DBGFEVENT_XCPT_1f; break;
5955
5956 case SVM_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
5957 case SVM_EXIT_VMMCALL: SET_BOTH(VMM_CALL); break;
5958 case SVM_EXIT_VMRUN: SET_BOTH(SVM_VMRUN); break;
5959 case SVM_EXIT_VMLOAD: SET_BOTH(SVM_VMLOAD); break;
5960 case SVM_EXIT_VMSAVE: SET_BOTH(SVM_VMSAVE); break;
5961 case SVM_EXIT_STGI: SET_BOTH(SVM_STGI); break;
5962 case SVM_EXIT_CLGI: SET_BOTH(SVM_CLGI); break;
5963 case SVM_EXIT_CPUID: SET_BOTH(CPUID); break;
5964 case SVM_EXIT_HLT: SET_BOTH(HALT); break;
5965 case SVM_EXIT_INVD: SET_BOTH(INVD); break;
5966 case SVM_EXIT_INVLPG: SET_BOTH(INVLPG); break;
5967 case SVM_EXIT_RDPMC: SET_BOTH(RDPMC); break;
5968 case SVM_EXIT_RDTSC: SET_BOTH(RDTSC); break;
5969 case SVM_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
5970 case SVM_EXIT_RSM: SET_BOTH(RSM); break;
5971
5972 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
5973 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
5974 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
5975 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
5976 SET_BOTH(CRX_READ);
5977 uEventArg = uExitCode - SVM_EXIT_READ_CR0;
5978 break;
5979 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
5980 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
5981 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
5982 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
5983 case SVM_EXIT_CR0_SEL_WRITE:
5984 SET_BOTH(CRX_WRITE);
5985 uEventArg = uExitCode - SVM_EXIT_WRITE_CR0;
5986 break;
5987 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5988 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
5989 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
5990 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5991 SET_BOTH(DRX_READ);
5992 uEventArg = uExitCode - SVM_EXIT_READ_DR0;
5993 break;
5994 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5995 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
5996 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
5997 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5998 SET_BOTH(DRX_WRITE);
5999 uEventArg = uExitCode - SVM_EXIT_WRITE_DR0;
6000 break;
6001 case SVM_EXIT_MSR:
6002 if (pSvmTransient->pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
6003 SET_BOTH(WRMSR);
6004 else
6005 SET_BOTH(RDMSR);
6006 break;
6007 case SVM_EXIT_MWAIT_ARMED:
6008 case SVM_EXIT_MWAIT: SET_BOTH(MWAIT); break;
6009 case SVM_EXIT_MONITOR: SET_BOTH(MONITOR); break;
6010 case SVM_EXIT_PAUSE: SET_BOTH(PAUSE); break;
6011 case SVM_EXIT_IDTR_READ: SET_BOTH(SIDT); break;
6012 case SVM_EXIT_IDTR_WRITE: SET_BOTH(LIDT); break;
6013 case SVM_EXIT_GDTR_READ: SET_BOTH(SGDT); break;
6014 case SVM_EXIT_GDTR_WRITE: SET_BOTH(LGDT); break;
6015 case SVM_EXIT_LDTR_READ: SET_BOTH(SLDT); break;
6016 case SVM_EXIT_LDTR_WRITE: SET_BOTH(LLDT); break;
6017 case SVM_EXIT_TR_READ: SET_BOTH(STR); break;
6018 case SVM_EXIT_TR_WRITE: SET_BOTH(LTR); break;
6019 case SVM_EXIT_WBINVD: SET_BOTH(WBINVD); break;
6020 case SVM_EXIT_XSETBV: SET_BOTH(XSETBV); break;
6021
6022 case SVM_EXIT_SHUTDOWN:
6023 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
6024 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
6025 break;
6026
6027 default:
6028 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitCode));
6029 break;
6030 }
6031#undef SET_BOTH
6032#undef SET_EXIT
6033
6034 /*
6035 * Dtrace tracepoints go first. We do them here at once so we don't
6036 * have to copy the guest state saving and stuff a few dozen times.
6037 * Down side is that we've got to repeat the switch, though this time
6038 * we use enmEvent since the probes are a subset of what DBGF does.
6039 */
6040 if (fDtrace1 || fDtrace2)
6041 {
6042 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6043 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6044 switch (enmEvent1)
6045 {
6046 /** @todo consider which extra parameters would be helpful for each probe. */
6047 case DBGFEVENT_END: break;
6048 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6049 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
6050 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
6051 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
6052 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
6053 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
6054 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
6055 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
6056 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
6057 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, (uint32_t)uEventArg); break;
6058 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, (uint32_t)uEventArg); break;
6059 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, (uint32_t)uEventArg); break;
6060 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, (uint32_t)uEventArg); break;
6061 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, (uint32_t)uEventArg, pCtx->cr2); break;
6062 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
6063 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
6064 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
6065 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
6066 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, (uint32_t)uEventArg); break;
6067 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
6068 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
6069 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
6070 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
6071 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
6072 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
6073 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
6074 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6075 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6076 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6077 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6078 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
6079 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
6080 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
6081 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
6082 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
6083 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
6084 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
6085 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
6086 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
6087 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
6088 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
6089 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
6090 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
6091 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
6092 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
6093 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
6094 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
6095 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
6096 case DBGFEVENT_INSTR_SVM_VMRUN: VBOXVMM_INSTR_SVM_VMRUN(pVCpu, pCtx); break;
6097 case DBGFEVENT_INSTR_SVM_VMLOAD: VBOXVMM_INSTR_SVM_VMLOAD(pVCpu, pCtx); break;
6098 case DBGFEVENT_INSTR_SVM_VMSAVE: VBOXVMM_INSTR_SVM_VMSAVE(pVCpu, pCtx); break;
6099 case DBGFEVENT_INSTR_SVM_STGI: VBOXVMM_INSTR_SVM_STGI(pVCpu, pCtx); break;
6100 case DBGFEVENT_INSTR_SVM_CLGI: VBOXVMM_INSTR_SVM_CLGI(pVCpu, pCtx); break;
6101 default: AssertMsgFailed(("enmEvent1=%d uExitCode=%d\n", enmEvent1, uExitCode)); break;
6102 }
6103 switch (enmEvent2)
6104 {
6105 /** @todo consider which extra parameters would be helpful for each probe. */
6106 case DBGFEVENT_END: break;
6107 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
6108 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
6109 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
6110 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
6111 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
6112 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
6113 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
6114 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
6115 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6116 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6117 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6118 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6119 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
6120 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
6121 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
6122 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
6123 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
6124 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
6125 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
6126 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
6127 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
6128 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
6129 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
6130 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
6131 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
6132 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
6133 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
6134 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
6135 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
6136 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
6137 case DBGFEVENT_EXIT_SVM_VMRUN: VBOXVMM_EXIT_SVM_VMRUN(pVCpu, pCtx); break;
6138 case DBGFEVENT_EXIT_SVM_VMLOAD: VBOXVMM_EXIT_SVM_VMLOAD(pVCpu, pCtx); break;
6139 case DBGFEVENT_EXIT_SVM_VMSAVE: VBOXVMM_EXIT_SVM_VMSAVE(pVCpu, pCtx); break;
6140 case DBGFEVENT_EXIT_SVM_STGI: VBOXVMM_EXIT_SVM_STGI(pVCpu, pCtx); break;
6141 case DBGFEVENT_EXIT_SVM_CLGI: VBOXVMM_EXIT_SVM_CLGI(pVCpu, pCtx); break;
6142 default: AssertMsgFailed(("enmEvent2=%d uExitCode=%d\n", enmEvent2, uExitCode)); break;
6143 }
6144 }
6145
6146 /*
6147 * Fire of the DBGF event, if enabled (our check here is just a quick one,
6148 * the DBGF call will do a full check).
6149 *
6150 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
6151 * Note! If we have to events, we prioritize the first, i.e. the instruction
6152 * one, in order to avoid event nesting.
6153 */
6154 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6155 VBOXSTRICTRC rcStrict;
6156 if ( enmEvent1 != DBGFEVENT_END
6157 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
6158 {
6159 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6160 rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
6161 }
6162 else if ( enmEvent2 != DBGFEVENT_END
6163 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
6164 {
6165 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6166 rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
6167 }
6168 else
6169 rcStrict = VINF_SUCCESS;
6170 return rcStrict;
6171}
6172
6173
6174/**
6175 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID),
6176 * debug variant.
6177 *
6178 * @returns Strict VBox status code (informational status codes included).
6179 * @param pVCpu The cross context virtual CPU structure.
6180 * @param pSvmTransient Pointer to the SVM transient structure.
6181 * @param pDbgState The runtime debug state.
6182 */
6183static VBOXSTRICTRC hmR0SvmDebugHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
6184{
6185 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
6186 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
6187
6188 /*
6189 * Expensive (saves context) generic dtrace VM-exit probe.
6190 */
6191 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
6192 if (!VBOXVMM_R0_HMSVM_VMEXIT_ENABLED())
6193 { /* more likely */ }
6194 else
6195 {
6196 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6197 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, uExitCode, pSvmTransient->pVmcb);
6198 }
6199
6200 /*
6201 * Check for single stepping event if we're stepping.
6202 */
6203 if (pVCpu->hm.s.fSingleInstruction)
6204 {
6205 switch (uExitCode)
6206 {
6207 /* Various events: */
6208 case SVM_EXIT_XCPT_0: case SVM_EXIT_XCPT_1: case SVM_EXIT_XCPT_2: case SVM_EXIT_XCPT_3:
6209 case SVM_EXIT_XCPT_4: case SVM_EXIT_XCPT_5: case SVM_EXIT_XCPT_6: case SVM_EXIT_XCPT_7:
6210 case SVM_EXIT_XCPT_8: case SVM_EXIT_XCPT_9: case SVM_EXIT_XCPT_10: case SVM_EXIT_XCPT_11:
6211 case SVM_EXIT_XCPT_12: case SVM_EXIT_XCPT_13: case SVM_EXIT_XCPT_14: case SVM_EXIT_XCPT_15:
6212 case SVM_EXIT_XCPT_16: case SVM_EXIT_XCPT_17: case SVM_EXIT_XCPT_18: case SVM_EXIT_XCPT_19:
6213 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
6214 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
6215 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
6216 case SVM_EXIT_INTR:
6217 case SVM_EXIT_NMI:
6218 case SVM_EXIT_VINTR:
6219 case SVM_EXIT_NPF:
6220 case SVM_EXIT_AVIC_NOACCEL:
6221
6222 /* Instruction specific VM-exits: */
6223 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
6224 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
6225 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
6226 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
6227 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
6228 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
6229 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
6230 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
6231 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
6232 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
6233 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
6234 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
6235 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
6236 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
6237 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
6238 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
6239 case SVM_EXIT_CR0_SEL_WRITE:
6240 case SVM_EXIT_IDTR_READ:
6241 case SVM_EXIT_GDTR_READ:
6242 case SVM_EXIT_LDTR_READ:
6243 case SVM_EXIT_TR_READ:
6244 case SVM_EXIT_IDTR_WRITE:
6245 case SVM_EXIT_GDTR_WRITE:
6246 case SVM_EXIT_LDTR_WRITE:
6247 case SVM_EXIT_TR_WRITE:
6248 case SVM_EXIT_RDTSC:
6249 case SVM_EXIT_RDPMC:
6250 case SVM_EXIT_PUSHF:
6251 case SVM_EXIT_POPF:
6252 case SVM_EXIT_CPUID:
6253 case SVM_EXIT_RSM:
6254 case SVM_EXIT_IRET:
6255 case SVM_EXIT_SWINT:
6256 case SVM_EXIT_INVD:
6257 case SVM_EXIT_PAUSE:
6258 case SVM_EXIT_HLT:
6259 case SVM_EXIT_INVLPG:
6260 case SVM_EXIT_INVLPGA:
6261 case SVM_EXIT_IOIO:
6262 case SVM_EXIT_MSR:
6263 case SVM_EXIT_TASK_SWITCH:
6264 case SVM_EXIT_VMRUN:
6265 case SVM_EXIT_VMMCALL:
6266 case SVM_EXIT_VMLOAD:
6267 case SVM_EXIT_VMSAVE:
6268 case SVM_EXIT_STGI:
6269 case SVM_EXIT_CLGI:
6270 case SVM_EXIT_SKINIT:
6271 case SVM_EXIT_RDTSCP:
6272 case SVM_EXIT_ICEBP:
6273 case SVM_EXIT_WBINVD:
6274 case SVM_EXIT_MONITOR:
6275 case SVM_EXIT_MWAIT:
6276 case SVM_EXIT_MWAIT_ARMED:
6277 case SVM_EXIT_XSETBV:
6278 case SVM_EXIT_RDPRU:
6279 case SVM_EXIT_WRITE_EFER_TRAP:
6280 case SVM_EXIT_WRITE_CR0_TRAP: case SVM_EXIT_WRITE_CR1_TRAP: case SVM_EXIT_WRITE_CR2_TRAP: case SVM_EXIT_WRITE_CR3_TRAP:
6281 case SVM_EXIT_WRITE_CR4_TRAP: case SVM_EXIT_WRITE_CR5_TRAP: case SVM_EXIT_WRITE_CR6_TRAP: case SVM_EXIT_WRITE_CR7_TRAP:
6282 case SVM_EXIT_WRITE_CR8_TRAP: case SVM_EXIT_WRITE_CR9_TRAP: case SVM_EXIT_WRITE_CR10_TRAP: case SVM_EXIT_WRITE_CR11_TRAP:
6283 case SVM_EXIT_WRITE_CR12_TRAP: case SVM_EXIT_WRITE_CR13_TRAP: case SVM_EXIT_WRITE_CR14_TRAP: case SVM_EXIT_WRITE_CR15_TRAP:
6284 case SVM_EXIT_MCOMMIT:
6285 {
6286 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6287 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
6288 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
6289 {
6290 Log6Func(("VINF_EM_DBG_STEPPED: %04x:%08RX64 (exit %u)\n",
6291 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode));
6292 return VINF_EM_DBG_STEPPED;
6293 }
6294 break;
6295 }
6296
6297 /* Errors and unexpected events: */
6298 case SVM_EXIT_FERR_FREEZE:
6299 case SVM_EXIT_SHUTDOWN:
6300 case SVM_EXIT_AVIC_INCOMPLETE_IPI:
6301 break;
6302
6303 case SVM_EXIT_SMI:
6304 case SVM_EXIT_INIT:
6305 default:
6306 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitCode));
6307 break;
6308 }
6309 }
6310
6311 /*
6312 * Check for debugger event breakpoints and dtrace probes.
6313 */
6314 if ( uExitCode < sizeof(pDbgState->bmExitsToCheck) * 8U
6315 && ASMBitTest(pDbgState->bmExitsToCheck, uExitCode) )
6316 {
6317 VBOXSTRICTRC rcStrict = hmR0SvmHandleExitDtraceEvents(pVCpu, pSvmTransient, uExitCode);
6318 if (rcStrict != VINF_SUCCESS)
6319 {
6320 Log6Func(("%04x:%08RX64 (exit %u) -> %Rrc\n",
6321 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode, VBOXSTRICTRC_VAL(rcStrict) ));
6322 return rcStrict;
6323 }
6324 }
6325
6326 /*
6327 * Normal processing.
6328 */
6329 return hmR0SvmHandleExit(pVCpu, pSvmTransient);
6330}
6331
6332
6333/**
6334 * Runs the guest code using AMD-V in single step mode.
6335 *
6336 * @returns Strict VBox status code.
6337 * @param pVCpu The cross context virtual CPU structure.
6338 * @param pcLoops Pointer to the number of executed loops.
6339 */
6340static VBOXSTRICTRC hmR0SvmRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops)
6341{
6342 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
6343 Assert(pcLoops);
6344 Assert(*pcLoops <= cMaxResumeLoops);
6345
6346 SVMTRANSIENT SvmTransient;
6347 RT_ZERO(SvmTransient);
6348 SvmTransient.fUpdateTscOffsetting = true;
6349 SvmTransient.pVmcb = pVCpu->hmr0.s.svm.pVmcb;
6350
6351 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6352
6353 /* Set HMCPU indicators. */
6354 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
6355 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
6356 pVCpu->hmr0.s.fDebugWantRdTscExit = false;
6357 pVCpu->hmr0.s.fUsingDebugLoop = true;
6358
6359 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
6360 SVMRUNDBGSTATE DbgState;
6361 hmR0SvmRunDebugStateInit(pVCpu, &SvmTransient, &DbgState);
6362 hmR0SvmPreRunGuestDebugStateUpdate(pVCpu, &SvmTransient, &DbgState);
6363
6364 /*
6365 * The loop.
6366 */
6367 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_5;
6368 for (;;)
6369 {
6370 Assert(!HMR0SuspendPending());
6371 AssertMsg(pVCpu->hmr0.s.idEnteredCpu == RTMpCpuId(),
6372 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hmr0.s.idEnteredCpu,
6373 (unsigned)RTMpCpuId(), *pcLoops));
6374 bool fStepping = pVCpu->hm.s.fSingleInstruction;
6375
6376 /* Set up VM-execution controls the next two can respond to. */
6377 hmR0SvmPreRunGuestDebugStateApply(&SvmTransient, &DbgState);
6378
6379 /* Preparatory work for running nested-guest code, this may force us to return to
6380 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
6381 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
6382 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
6383 if (rc != VINF_SUCCESS)
6384 break;
6385
6386 /*
6387 * No longjmps to ring-3 from this point on!!!
6388 *
6389 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
6390 * better than a kernel panic. This also disables flushing of the R0-logger instance.
6391 */
6392 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
6393
6394 /* Override any obnoxious code in the above two calls. */
6395 hmR0SvmPreRunGuestDebugStateApply(&SvmTransient, &DbgState);
6396#if 0
6397 Log(("%04x:%08RX64 ds=%04x %04x:%08RX64 i=%#RX64\n",
6398 SvmTransient.pVmcb->guest.CS.u16Sel, SvmTransient.pVmcb->guest.u64RIP, SvmTransient.pVmcb->guest.DS.u16Sel,
6399 SvmTransient.pVmcb->guest.SS.u16Sel, SvmTransient.pVmcb->guest.u64RSP, SvmTransient.pVmcb->ctrl.EventInject.u));
6400#endif
6401
6402 /*
6403 * Finally execute guest code.
6404 */
6405 rc = hmR0SvmRunGuest(pVCpu, pVCpu->hmr0.s.svm.HCPhysVmcb);
6406
6407 /* Restore any residual host-state and save any bits shared between host and guest
6408 into the guest-CPU state. Re-enables interrupts! */
6409 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
6410#if 0
6411 Log(("%04x:%08RX64 ds=%04x %04x:%08RX64 i=%#RX64 exit=%d\n",
6412 SvmTransient.pVmcb->guest.CS.u16Sel, SvmTransient.pVmcb->guest.u64RIP, SvmTransient.pVmcb->guest.DS.u16Sel,
6413 SvmTransient.pVmcb->guest.SS.u16Sel, SvmTransient.pVmcb->guest.u64RSP, SvmTransient.pVmcb->ctrl.EventInject.u, SvmTransient.u64ExitCode));
6414#endif
6415
6416 if (RT_LIKELY( rc == VINF_SUCCESS /* Check for VMRUN errors. */
6417 && SvmTransient.u64ExitCode != SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
6418 { /* very likely*/ }
6419 else
6420 {
6421 if (rc == VINF_SUCCESS)
6422 rc = VERR_SVM_INVALID_GUEST_STATE;
6423 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
6424 hmR0SvmReportWorldSwitchError(pVCpu, VBOXSTRICTRC_VAL(rc));
6425 return rc;
6426 }
6427
6428 /* Handle the #VMEXIT. */
6429 HMSVM_DEBUG_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
6430 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
6431 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hmr0.s.svm.pVmcb);
6432 rc = hmR0SvmDebugHandleExit(pVCpu, &SvmTransient, &DbgState);
6433 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
6434 if (rc != VINF_SUCCESS)
6435 break;
6436 if (++(*pcLoops) >= cMaxResumeLoops)
6437 {
6438 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
6439 rc = VINF_EM_RAW_INTERRUPT;
6440 break;
6441 }
6442
6443 /*
6444 * Stepping: Did the RIP change, if so, consider it a single step.
6445 * Otherwise, make sure one of the TFs gets set.
6446 */
6447 if (fStepping)
6448 {
6449 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6450 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart
6451 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
6452 {
6453 Log6Func(("VINF_EM_DBG_STEPPED: %04x:%08RX64 (exit %u)\n",
6454 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, SvmTransient.u64ExitCode));
6455 rc = VINF_EM_DBG_STEPPED;
6456 break;
6457 }
6458 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
6459 }
6460
6461 /*
6462 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
6463 * Revert the state changes afterware so we can drop intercepts no longer needed.
6464 */
6465 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
6466 {
6467 hmR0SvmPreRunGuestDebugStateUpdate(pVCpu, &SvmTransient, &DbgState);
6468 hmR0SvmRunDebugStateRevert(&SvmTransient, &DbgState);
6469 }
6470 }
6471
6472 /*
6473 * Clear the X86_EFL_TF if necessary.
6474 */
6475 if (pVCpu->hmr0.s.fClearTrapFlag)
6476 {
6477 pVCpu->hmr0.s.fClearTrapFlag = false;
6478 pCtx->eflags.Bits.u1TF = 0;
6479 }
6480
6481 /* Restore HMCPU indicators. */
6482 pVCpu->hmr0.s.fUsingDebugLoop = false;
6483 pVCpu->hmr0.s.fDebugWantRdTscExit = false;
6484 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
6485
6486 /* Restore all controls applied by hmR0SvmPreRunGuestDebugStateApply above. */
6487 hmR0SvmRunDebugStateRevert(&SvmTransient, &DbgState);
6488
6489 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
6490 return rc;
6491}
6492
6493/** @} */
6494
6495#undef VMEXIT_CALL_RET
6496
6497
6498#ifdef VBOX_STRICT
6499/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6500# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
6501 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6502
6503# define HMSVM_ASSERT_PREEMPT_CPUID() \
6504 do \
6505 { \
6506 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6507 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6508 } while (0)
6509
6510# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \
6511 do { \
6512 AssertPtr((a_pVCpu)); \
6513 AssertPtr((a_pSvmTransient)); \
6514 Assert(ASMIntAreEnabled()); \
6515 HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \
6516 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
6517 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu)); \
6518 HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \
6519 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6520 HMSVM_ASSERT_PREEMPT_CPUID(); \
6521 } while (0)
6522#else
6523# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \
6524 do { \
6525 RT_NOREF2(a_pVCpu, a_pSvmTransient); \
6526 } while (0)
6527#endif
6528
6529
6530/**
6531 * Gets the IEM exception flags for the specified SVM event.
6532 *
6533 * @returns The IEM exception flags.
6534 * @param pEvent Pointer to the SVM event.
6535 *
6536 * @remarks This function currently only constructs flags required for
6537 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g. error-code
6538 * and CR2 aspects of an exception are not included).
6539 */
6540static uint32_t hmR0SvmGetIemXcptFlags(PCSVMEVENT pEvent)
6541{
6542 uint8_t const uEventType = pEvent->n.u3Type;
6543 uint32_t fIemXcptFlags;
6544 switch (uEventType)
6545 {
6546 case SVM_EVENT_EXCEPTION:
6547 /*
6548 * Only INT3 and INTO instructions can raise #BP and #OF exceptions.
6549 * See AMD spec. Table 8-1. "Interrupt Vector Source and Cause".
6550 */
6551 if (pEvent->n.u8Vector == X86_XCPT_BP)
6552 {
6553 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR;
6554 break;
6555 }
6556 if (pEvent->n.u8Vector == X86_XCPT_OF)
6557 {
6558 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_OF_INSTR;
6559 break;
6560 }
6561 /** @todo How do we distinguish ICEBP \#DB from the regular one? */
6562 RT_FALL_THRU();
6563 case SVM_EVENT_NMI:
6564 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6565 break;
6566
6567 case SVM_EVENT_EXTERNAL_IRQ:
6568 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
6569 break;
6570
6571 case SVM_EVENT_SOFTWARE_INT:
6572 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
6573 break;
6574
6575 default:
6576 fIemXcptFlags = 0;
6577 AssertMsgFailed(("Unexpected event type! uEventType=%#x uVector=%#x", uEventType, pEvent->n.u8Vector));
6578 break;
6579 }
6580 return fIemXcptFlags;
6581}
6582
6583
6584/**
6585 * Handle a condition that occurred while delivering an event through the guest
6586 * IDT.
6587 *
6588 * @returns VBox status code (informational error codes included).
6589 * @retval VINF_SUCCESS if we should continue handling the \#VMEXIT.
6590 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought to
6591 * continue execution of the guest which will delivery the \#DF.
6592 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6593 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6594 *
6595 * @param pVCpu The cross context virtual CPU structure.
6596 * @param pSvmTransient Pointer to the SVM transient structure.
6597 *
6598 * @remarks No-long-jump zone!!!
6599 */
6600static int hmR0SvmCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6601{
6602 /** @todo r=bird: Looks like this is called on many exits and we start by
6603 * loading CR2 on the offchance that we actually have work to do here.
6604 *
6605 * HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY can surely check
6606 * pVmcb->ctrl.ExitIntInfo.n.u1Valid, can't it?
6607 *
6608 * Also, what's the deal with hmR0SvmGetCurrentVmcb() vs pSvmTransient->pVmcb?
6609 */
6610 int rc = VINF_SUCCESS;
6611 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6612 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2);
6613
6614 Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n",
6615 pVmcb->ctrl.ExitIntInfo.u, !!pVmcb->ctrl.ExitIntInfo.n.u1Valid, !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid,
6616 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, pVmcb->ctrl.ExitIntInfo.n.u3Type, pVmcb->ctrl.ExitIntInfo.n.u8Vector));
6617
6618 /*
6619 * The EXITINTINFO (if valid) contains the prior exception (IDT vector) that was trying to
6620 * be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector).
6621 *
6622 * See AMD spec. 15.7.3 "EXITINFO Pseudo-Code".
6623 */
6624 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid)
6625 {
6626 IEMXCPTRAISE enmRaise;
6627 IEMXCPTRAISEINFO fRaiseInfo;
6628 bool const fExitIsHwXcpt = pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0 <= SVM_EXIT_XCPT_31;
6629 uint8_t const uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
6630 if (fExitIsHwXcpt)
6631 {
6632 uint8_t const uExitVector = pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0;
6633 uint32_t const fIdtVectorFlags = hmR0SvmGetIemXcptFlags(&pVmcb->ctrl.ExitIntInfo);
6634 uint32_t const fExitVectorFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6635 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6636 }
6637 else
6638 {
6639 /*
6640 * If delivery of an event caused a #VMEXIT that is not an exception (e.g. #NPF)
6641 * then we end up here.
6642 *
6643 * If the event was:
6644 * - a software interrupt, we can re-execute the instruction which will
6645 * regenerate the event.
6646 * - an NMI, we need to clear NMI blocking and re-inject the NMI.
6647 * - a hardware exception or external interrupt, we re-inject it.
6648 */
6649 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6650 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_SOFTWARE_INT)
6651 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6652 else
6653 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6654 }
6655
6656 switch (enmRaise)
6657 {
6658 case IEMXCPTRAISE_CURRENT_XCPT:
6659 case IEMXCPTRAISE_PREV_EVENT:
6660 {
6661 /* For software interrupts, we shall re-execute the instruction. */
6662 if (!(fRaiseInfo & IEMXCPTRAISEINFO_SOFT_INT_XCPT))
6663 {
6664 RTGCUINTPTR GCPtrFaultAddress = 0;
6665
6666 /* If we are re-injecting an NMI, clear NMI blocking. */
6667 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)
6668 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6669
6670 /* Determine a vectoring #PF condition, see comment in hmR0SvmExitXcptPF(). */
6671 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6672 {
6673 pSvmTransient->fVectoringPF = true;
6674 Log4Func(("IDT: Pending vectoring #PF due to delivery of Ext-Int/NMI. uCR2=%#RX64\n",
6675 pVCpu->cpum.GstCtx.cr2));
6676 }
6677 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION
6678 && uIdtVector == X86_XCPT_PF)
6679 {
6680 /*
6681 * If the previous exception was a #PF, we need to recover the CR2 value.
6682 * This can't happen with shadow paging.
6683 */
6684 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
6685 }
6686
6687 /*
6688 * Without nested paging, when uExitVector is #PF, CR2 value will be updated from the VMCB's
6689 * exit info. fields, if it's a guest #PF, see hmR0SvmExitXcptPF().
6690 */
6691 Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);
6692 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflect);
6693 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, GCPtrFaultAddress);
6694
6695 Log4Func(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32 GCPtrFaultAddress=%#RX64\n",
6696 pVmcb->ctrl.ExitIntInfo.u, RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid),
6697 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, GCPtrFaultAddress));
6698 }
6699 break;
6700 }
6701
6702 case IEMXCPTRAISE_REEXEC_INSTR:
6703 {
6704 Assert(rc == VINF_SUCCESS);
6705 break;
6706 }
6707
6708 case IEMXCPTRAISE_DOUBLE_FAULT:
6709 {
6710 /*
6711 * Determing a vectoring double #PF condition. Used later, when PGM evaluates
6712 * the second #PF as a guest #PF (and not a shadow #PF) and needs to be
6713 * converted into a #DF.
6714 */
6715 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6716 {
6717 Log4Func(("IDT: Pending vectoring double #PF uCR2=%#RX64\n", pVCpu->cpum.GstCtx.cr2));
6718 pSvmTransient->fVectoringDoublePF = true;
6719 Assert(rc == VINF_SUCCESS);
6720 }
6721 else
6722 {
6723 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectConvertDF);
6724 hmR0SvmSetPendingXcptDF(pVCpu);
6725 rc = VINF_HM_DOUBLE_FAULT;
6726 }
6727 break;
6728 }
6729
6730 case IEMXCPTRAISE_TRIPLE_FAULT:
6731 {
6732 rc = VINF_EM_RESET;
6733 break;
6734 }
6735
6736 case IEMXCPTRAISE_CPU_HANG:
6737 {
6738 rc = VERR_EM_GUEST_CPU_HANG;
6739 break;
6740 }
6741
6742 default:
6743 AssertMsgFailedBreakStmt(("Bogus enmRaise value: %d (%#x)\n", enmRaise, enmRaise), rc = VERR_SVM_IPE_2);
6744 }
6745 }
6746 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG);
6747 return rc;
6748}
6749
6750
6751/**
6752 * Advances the guest RIP by the number of bytes specified in @a cb.
6753 *
6754 * @param pVCpu The cross context virtual CPU structure.
6755 * @param cb RIP increment value in bytes.
6756 */
6757DECLINLINE(void) hmR0SvmAdvanceRip(PVMCPUCC pVCpu, uint32_t cb)
6758{
6759 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6760 pCtx->rip += cb;
6761
6762 /* Update interrupt shadow. */
6763 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6764 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
6765 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6766}
6767
6768
6769/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6770/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6771/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6772
6773/** @name \#VMEXIT handlers.
6774 * @{
6775 */
6776
6777/**
6778 * \#VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT
6779 * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT).
6780 */
6781HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6782{
6783 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6784
6785 if (pSvmTransient->u64ExitCode == SVM_EXIT_NMI)
6786 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
6787 else if (pSvmTransient->u64ExitCode == SVM_EXIT_INTR)
6788 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
6789
6790 /*
6791 * AMD-V has no preemption timer and the generic periodic preemption timer has no way to
6792 * signal -before- the timer fires if the current interrupt is our own timer or a some
6793 * other host interrupt. We also cannot examine what interrupt it is until the host
6794 * actually take the interrupt.
6795 *
6796 * Going back to executing guest code here unconditionally causes random scheduling
6797 * problems (observed on an AMD Phenom 9850 Quad-Core on Windows 64-bit host).
6798 */
6799 return VINF_EM_RAW_INTERRUPT;
6800}
6801
6802
6803/**
6804 * \#VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional \#VMEXIT.
6805 */
6806HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6807{
6808 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6809
6810 VBOXSTRICTRC rcStrict;
6811 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6812 if (fSupportsNextRipSave)
6813 {
6814 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
6815 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6816 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6817 rcStrict = IEMExecDecodedWbinvd(pVCpu, cbInstr);
6818 }
6819 else
6820 {
6821 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6822 rcStrict = IEMExecOne(pVCpu);
6823 }
6824
6825 if (rcStrict == VINF_IEM_RAISED_XCPT)
6826 {
6827 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6828 rcStrict = VINF_SUCCESS;
6829 }
6830 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6831 return rcStrict;
6832}
6833
6834
6835/**
6836 * \#VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional \#VMEXIT.
6837 */
6838HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6839{
6840 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6841
6842 VBOXSTRICTRC rcStrict;
6843 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6844 if (fSupportsNextRipSave)
6845 {
6846 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
6847 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6848 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6849 rcStrict = IEMExecDecodedInvd(pVCpu, cbInstr);
6850 }
6851 else
6852 {
6853 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6854 rcStrict = IEMExecOne(pVCpu);
6855 }
6856
6857 if (rcStrict == VINF_IEM_RAISED_XCPT)
6858 {
6859 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6860 rcStrict = VINF_SUCCESS;
6861 }
6862 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6863 return rcStrict;
6864}
6865
6866
6867/**
6868 * \#VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional \#VMEXIT.
6869 */
6870HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6871{
6872 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6873
6874 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
6875 VBOXSTRICTRC rcStrict;
6876 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
6877 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
6878 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
6879 if (!pExitRec)
6880 {
6881 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6882 if (fSupportsNextRipSave)
6883 {
6884 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6885 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6886 rcStrict = IEMExecDecodedCpuid(pVCpu, cbInstr);
6887 }
6888 else
6889 {
6890 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6891 rcStrict = IEMExecOne(pVCpu);
6892 }
6893
6894 if (rcStrict == VINF_IEM_RAISED_XCPT)
6895 {
6896 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6897 rcStrict = VINF_SUCCESS;
6898 }
6899 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6900 }
6901 else
6902 {
6903 /*
6904 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
6905 */
6906 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6907
6908 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
6909 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
6910
6911 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
6912
6913 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
6914 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6915 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6916 }
6917 return rcStrict;
6918}
6919
6920
6921/**
6922 * \#VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional \#VMEXIT.
6923 */
6924HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6925{
6926 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6927
6928 VBOXSTRICTRC rcStrict;
6929 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6930 if (fSupportsNextRipSave)
6931 {
6932 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
6933 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6934 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6935 rcStrict = IEMExecDecodedRdtsc(pVCpu, cbInstr);
6936 }
6937 else
6938 {
6939 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6940 rcStrict = IEMExecOne(pVCpu);
6941 }
6942
6943 if (rcStrict == VINF_SUCCESS)
6944 pSvmTransient->fUpdateTscOffsetting = true;
6945 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6946 {
6947 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6948 rcStrict = VINF_SUCCESS;
6949 }
6950 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6951 return rcStrict;
6952}
6953
6954
6955/**
6956 * \#VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional \#VMEXIT.
6957 */
6958HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6959{
6960 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6961
6962 VBOXSTRICTRC rcStrict;
6963 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6964 if (fSupportsNextRipSave)
6965 {
6966 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
6967 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6968 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6969 rcStrict = IEMExecDecodedRdtscp(pVCpu, cbInstr);
6970 }
6971 else
6972 {
6973 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6974 rcStrict = IEMExecOne(pVCpu);
6975 }
6976
6977 if (rcStrict == VINF_SUCCESS)
6978 pSvmTransient->fUpdateTscOffsetting = true;
6979 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6980 {
6981 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6982 rcStrict = VINF_SUCCESS;
6983 }
6984 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6985 return rcStrict;
6986}
6987
6988
6989/**
6990 * \#VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional \#VMEXIT.
6991 */
6992HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6993{
6994 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6995
6996 VBOXSTRICTRC rcStrict;
6997 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6998 if (fSupportsNextRipSave)
6999 {
7000 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
7001 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7002 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7003 rcStrict = IEMExecDecodedRdpmc(pVCpu, cbInstr);
7004 }
7005 else
7006 {
7007 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7008 rcStrict = IEMExecOne(pVCpu);
7009 }
7010
7011 if (rcStrict == VINF_IEM_RAISED_XCPT)
7012 {
7013 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7014 rcStrict = VINF_SUCCESS;
7015 }
7016 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7017 return rcStrict;
7018}
7019
7020
7021/**
7022 * \#VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional \#VMEXIT.
7023 */
7024HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7025{
7026 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7027 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
7028
7029 VBOXSTRICTRC rcStrict;
7030 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
7031 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7032 if ( fSupportsDecodeAssists
7033 && fSupportsNextRipSave)
7034 {
7035 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7036 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7037 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7038 RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1;
7039 rcStrict = IEMExecDecodedInvlpg(pVCpu, cbInstr, GCPtrPage);
7040 }
7041 else
7042 {
7043 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7044 rcStrict = IEMExecOne(pVCpu);
7045 }
7046
7047 if (rcStrict == VINF_IEM_RAISED_XCPT)
7048 {
7049 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7050 rcStrict = VINF_SUCCESS;
7051 }
7052 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7053 return VBOXSTRICTRC_VAL(rcStrict);
7054}
7055
7056
7057/**
7058 * \#VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional \#VMEXIT.
7059 */
7060HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7061{
7062 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7063
7064 VBOXSTRICTRC rcStrict;
7065 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7066 if (fSupportsNextRipSave)
7067 {
7068 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7069 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7070 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7071 rcStrict = IEMExecDecodedHlt(pVCpu, cbInstr);
7072 }
7073 else
7074 {
7075 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7076 rcStrict = IEMExecOne(pVCpu);
7077 }
7078
7079 if ( rcStrict == VINF_EM_HALT
7080 || rcStrict == VINF_SUCCESS)
7081 rcStrict = EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx) ? VINF_SUCCESS : VINF_EM_HALT;
7082 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7083 {
7084 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7085 rcStrict = VINF_SUCCESS;
7086 }
7087 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7088 if (rcStrict != VINF_SUCCESS)
7089 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
7090 return VBOXSTRICTRC_VAL(rcStrict);;
7091}
7092
7093
7094/**
7095 * \#VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional \#VMEXIT.
7096 */
7097HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7098{
7099 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7100
7101 /*
7102 * If the instruction length is supplied by the CPU is 3 bytes, we can be certain that no
7103 * segment override prefix is present (and thus use the default segment DS). Otherwise, a
7104 * segment override prefix or other prefixes might be used, in which case we fallback to
7105 * IEMExecOne() to figure out.
7106 */
7107 VBOXSTRICTRC rcStrict;
7108 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7109 uint8_t const cbInstr = hmR0SvmSupportsNextRipSave(pVCpu) ? pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip : 0;
7110 if (cbInstr)
7111 {
7112 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7113 rcStrict = IEMExecDecodedMonitor(pVCpu, cbInstr);
7114 }
7115 else
7116 {
7117 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7118 rcStrict = IEMExecOne(pVCpu);
7119 }
7120
7121 if (rcStrict == VINF_IEM_RAISED_XCPT)
7122 {
7123 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7124 rcStrict = VINF_SUCCESS;
7125 }
7126 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7127 return rcStrict;
7128}
7129
7130
7131/**
7132 * \#VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional \#VMEXIT.
7133 */
7134HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7135{
7136 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7137
7138 VBOXSTRICTRC rcStrict;
7139 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7140 if (fSupportsNextRipSave)
7141 {
7142 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7143 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7144 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7145 rcStrict = IEMExecDecodedMwait(pVCpu, cbInstr);
7146 }
7147 else
7148 {
7149 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7150 rcStrict = IEMExecOne(pVCpu);
7151 }
7152
7153 if ( rcStrict == VINF_EM_HALT
7154 && EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7155 rcStrict = VINF_SUCCESS;
7156 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7157 {
7158 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7159 rcStrict = VINF_SUCCESS;
7160 }
7161 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7162 return rcStrict;
7163}
7164
7165
7166/**
7167 * \#VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN). Conditional
7168 * \#VMEXIT.
7169 */
7170HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7171{
7172 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7173 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7174 return VINF_EM_RESET;
7175}
7176
7177
7178/**
7179 * \#VMEXIT handler for unexpected exits. Conditional \#VMEXIT.
7180 */
7181HMSVM_EXIT_DECL hmR0SvmExitUnexpected(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7182{
7183 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7184 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7185 AssertMsgFailed(("hmR0SvmExitUnexpected: ExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pSvmTransient->u64ExitCode,
7186 pVmcb->ctrl.u64ExitInfo1, pVmcb->ctrl.u64ExitInfo2));
7187 RT_NOREF(pVmcb);
7188 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
7189 return VERR_SVM_UNEXPECTED_EXIT;
7190}
7191
7192
7193/**
7194 * \#VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional \#VMEXIT.
7195 */
7196HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7197{
7198 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7199
7200 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7201 Log4Func(("CS:RIP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
7202#ifdef VBOX_WITH_STATISTICS
7203 switch (pSvmTransient->u64ExitCode)
7204 {
7205 case SVM_EXIT_READ_CR0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
7206 case SVM_EXIT_READ_CR2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
7207 case SVM_EXIT_READ_CR3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
7208 case SVM_EXIT_READ_CR4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
7209 case SVM_EXIT_READ_CR8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
7210 }
7211#endif
7212
7213 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
7214 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7215 if ( fSupportsDecodeAssists
7216 && fSupportsNextRipSave)
7217 {
7218 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7219 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
7220 if (fMovCRx)
7221 {
7222 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR_MASK
7223 | CPUMCTX_EXTRN_APIC_TPR);
7224 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
7225 uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0;
7226 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
7227 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
7228 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7229 return VBOXSTRICTRC_VAL(rcStrict);
7230 }
7231 /* else: SMSW instruction, fall back below to IEM for this. */
7232 }
7233
7234 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7235 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7236 AssertMsg( rcStrict == VINF_SUCCESS
7237 || rcStrict == VINF_PGM_SYNC_CR3
7238 || rcStrict == VINF_IEM_RAISED_XCPT,
7239 ("hmR0SvmExitReadCRx: IEMExecOne failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7240 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15);
7241 if (rcStrict == VINF_IEM_RAISED_XCPT)
7242 {
7243 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7244 rcStrict = VINF_SUCCESS;
7245 }
7246 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7247 return rcStrict;
7248}
7249
7250
7251/**
7252 * \#VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional \#VMEXIT.
7253 */
7254HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7255{
7256 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7257
7258 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
7259 uint8_t const iCrReg = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0);
7260 Assert(iCrReg <= 15);
7261
7262 VBOXSTRICTRC rcStrict = VERR_SVM_IPE_5;
7263 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7264 bool fDecodedInstr = false;
7265 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
7266 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7267 if ( fSupportsDecodeAssists
7268 && fSupportsNextRipSave)
7269 {
7270 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7271 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
7272 if (fMovCRx)
7273 {
7274 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
7275 | CPUMCTX_EXTRN_APIC_TPR);
7276 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
7277 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
7278 Log4Func(("Mov CR%u w/ iGReg=%#x\n", iCrReg, iGReg));
7279 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
7280 fDecodedInstr = true;
7281 }
7282 /* else: LMSW or CLTS instruction, fall back below to IEM for this. */
7283 }
7284
7285 if (!fDecodedInstr)
7286 {
7287 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7288 Log4Func(("iCrReg=%#x\n", iCrReg));
7289 rcStrict = IEMExecOne(pVCpu);
7290 if (RT_UNLIKELY( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
7291 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED))
7292 rcStrict = VERR_EM_INTERPRETER;
7293 }
7294
7295 if (rcStrict == VINF_SUCCESS)
7296 {
7297 switch (iCrReg)
7298 {
7299 case 0:
7300 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
7301 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
7302 break;
7303
7304 case 2:
7305 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2);
7306 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
7307 break;
7308
7309 case 3:
7310 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR3);
7311 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
7312 break;
7313
7314 case 4:
7315 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
7316 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
7317 break;
7318
7319 case 8:
7320 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7321 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
7322 break;
7323
7324 default:
7325 {
7326 AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x\n",
7327 pSvmTransient->u64ExitCode, iCrReg));
7328 break;
7329 }
7330 }
7331 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7332 }
7333 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7334 {
7335 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7336 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7337 rcStrict = VINF_SUCCESS;
7338 }
7339 else
7340 Assert(rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_PGM_SYNC_CR3);
7341 return rcStrict;
7342}
7343
7344
7345/**
7346 * \#VMEXIT helper for read MSRs, see hmR0SvmExitMsr.
7347 *
7348 * @returns Strict VBox status code.
7349 * @param pVCpu The cross context virtual CPU structure.
7350 * @param pVmcb Pointer to the VM control block.
7351 */
7352static VBOXSTRICTRC hmR0SvmExitReadMsr(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
7353{
7354 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
7355 Log4Func(("idMsr=%#RX32\n", pVCpu->cpum.GstCtx.ecx));
7356
7357 VBOXSTRICTRC rcStrict;
7358 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7359 if (fSupportsNextRipSave)
7360 {
7361 /** @todo Optimize this: Only retrieve the MSR bits we need here. CPUMAllMsrs.cpp
7362 * can ask for what it needs instead of using CPUMCTX_EXTRN_ALL_MSRS. */
7363 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7364 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7365 rcStrict = IEMExecDecodedRdmsr(pVCpu, cbInstr);
7366 }
7367 else
7368 {
7369 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7370 rcStrict = IEMExecOne(pVCpu);
7371 }
7372
7373 AssertMsg( rcStrict == VINF_SUCCESS
7374 || rcStrict == VINF_IEM_RAISED_XCPT
7375 || rcStrict == VINF_CPUM_R3_MSR_READ,
7376 ("hmR0SvmExitReadMsr: Unexpected status %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7377
7378 if (rcStrict == VINF_IEM_RAISED_XCPT)
7379 {
7380 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7381 rcStrict = VINF_SUCCESS;
7382 }
7383 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7384 return rcStrict;
7385}
7386
7387
7388/**
7389 * \#VMEXIT helper for write MSRs, see hmR0SvmExitMsr.
7390 *
7391 * @returns Strict VBox status code.
7392 * @param pVCpu The cross context virtual CPU structure.
7393 * @param pVmcb Pointer to the VM control block.
7394 * @param pSvmTransient Pointer to the SVM-transient structure.
7395 */
7396static VBOXSTRICTRC hmR0SvmExitWriteMsr(PVMCPUCC pVCpu, PSVMVMCB pVmcb, PSVMTRANSIENT pSvmTransient)
7397{
7398 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7399 uint32_t const idMsr = pCtx->ecx;
7400 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7401 Log4Func(("idMsr=%#RX32\n", idMsr));
7402
7403 /*
7404 * Handle TPR patching MSR writes.
7405 * We utilitize the LSTAR MSR for patching.
7406 */
7407 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7408 if ( idMsr == MSR_K8_LSTAR
7409 && pVCpu->CTX_SUFF(pVM)->hm.s.fTprPatchingActive)
7410 {
7411 unsigned cbInstr;
7412 if (fSupportsNextRipSave)
7413 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7414 else
7415 {
7416 PDISCPUSTATE pDis = &pVCpu->hmr0.s.svm.DisState;
7417 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, &cbInstr);
7418 if ( rc == VINF_SUCCESS
7419 && pDis->pCurInstr->uOpcode == OP_WRMSR)
7420 Assert(cbInstr > 0);
7421 else
7422 cbInstr = 0;
7423 }
7424
7425 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */
7426 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr)
7427 {
7428 int rc = APICSetTpr(pVCpu, pCtx->eax & 0xff);
7429 AssertRCReturn(rc, rc);
7430 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7431 }
7432
7433 int rc = VINF_SUCCESS;
7434 hmR0SvmAdvanceRip(pVCpu, cbInstr);
7435 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
7436 return rc;
7437 }
7438
7439 /*
7440 * Handle regular MSR writes.
7441 */
7442 VBOXSTRICTRC rcStrict;
7443 if (fSupportsNextRipSave)
7444 {
7445 /** @todo Optimize this: We don't need to get much of the MSR state here
7446 * since we're only updating. CPUMAllMsrs.cpp can ask for what it needs and
7447 * clear the applicable extern flags. */
7448 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7449 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7450 rcStrict = IEMExecDecodedWrmsr(pVCpu, cbInstr);
7451 }
7452 else
7453 {
7454 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7455 rcStrict = IEMExecOne(pVCpu);
7456 }
7457
7458 AssertMsg( rcStrict == VINF_SUCCESS
7459 || rcStrict == VINF_IEM_RAISED_XCPT
7460 || rcStrict == VINF_CPUM_R3_MSR_WRITE,
7461 ("hmR0SvmExitWriteMsr: Unexpected status %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7462
7463 if (rcStrict == VINF_SUCCESS)
7464 {
7465 /* If this is an X2APIC WRMSR access, update the APIC TPR state. */
7466 if ( idMsr >= MSR_IA32_X2APIC_START
7467 && idMsr <= MSR_IA32_X2APIC_END)
7468 {
7469 /*
7470 * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest().
7471 * When full APIC register virtualization is implemented we'll have to make sure
7472 * APIC state is saved from the VMCB before IEM changes it.
7473 */
7474 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7475 }
7476 else
7477 {
7478 switch (idMsr)
7479 {
7480 case MSR_IA32_TSC: pSvmTransient->fUpdateTscOffsetting = true; break;
7481 case MSR_K6_EFER: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR); break;
7482 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
7483 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
7484 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7485 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7486 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7487 }
7488 }
7489 }
7490 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7491 {
7492 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7493 rcStrict = VINF_SUCCESS;
7494 }
7495 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7496 return rcStrict;
7497}
7498
7499
7500/**
7501 * \#VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional
7502 * \#VMEXIT.
7503 */
7504HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7505{
7506 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7507
7508 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7509 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ)
7510 return hmR0SvmExitReadMsr(pVCpu, pVmcb);
7511
7512 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE);
7513 return hmR0SvmExitWriteMsr(pVCpu, pVmcb, pSvmTransient);
7514}
7515
7516
7517/**
7518 * \#VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional \#VMEXIT.
7519 */
7520HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7521{
7522 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7523 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7524
7525 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
7526
7527 /** @todo Stepping with nested-guest. */
7528 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7529 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
7530 {
7531 /* We should -not- get this #VMEXIT if the guest's debug registers were active. */
7532 if (pSvmTransient->fWasGuestDebugStateActive)
7533 {
7534 AssertMsgFailed(("hmR0SvmExitReadDRx: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
7535 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
7536 return VERR_SVM_UNEXPECTED_EXIT;
7537 }
7538
7539 /*
7540 * Lazy DR0-3 loading.
7541 */
7542 if (!pSvmTransient->fWasHyperDebugStateActive)
7543 {
7544 Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
7545 Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
7546
7547 /* Don't intercept DRx read and writes. */
7548 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
7549 pVmcb->ctrl.u16InterceptRdDRx = 0;
7550 pVmcb->ctrl.u16InterceptWrDRx = 0;
7551 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
7552
7553 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
7554 VMMRZCallRing3Disable(pVCpu);
7555 HM_DISABLE_PREEMPT(pVCpu);
7556
7557 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
7558 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
7559 Assert(CPUMIsGuestDebugStateActive(pVCpu));
7560
7561 HM_RESTORE_PREEMPT();
7562 VMMRZCallRing3Enable(pVCpu);
7563
7564 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
7565 return VINF_SUCCESS;
7566 }
7567 }
7568
7569 /*
7570 * Interpret the read/writing of DRx.
7571 */
7572 /** @todo Decode assist. */
7573 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
7574 Log5(("hmR0SvmExitReadDRx: Emulated DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
7575 if (RT_LIKELY(rc == VINF_SUCCESS))
7576 {
7577 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
7578 /** @todo CPUM should set this flag! */
7579 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
7580 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
7581 }
7582 else
7583 Assert(rc == VERR_EM_INTERPRETER);
7584 return rc;
7585}
7586
7587
7588/**
7589 * \#VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional \#VMEXIT.
7590 */
7591HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7592{
7593 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7594 /* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
7595 VBOXSTRICTRC rc = hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
7596 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
7597 STAM_COUNTER_DEC(&pVCpu->hm.s.StatExitDRxRead);
7598 return rc;
7599}
7600
7601
7602/**
7603 * \#VMEXIT handler for XCRx write (SVM_EXIT_XSETBV). Conditional \#VMEXIT.
7604 */
7605HMSVM_EXIT_DECL hmR0SvmExitXsetbv(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7606{
7607 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7608 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7609
7610 /** @todo decode assists... */
7611 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7612 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7613 {
7614 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7615 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7616 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], fLoadSaveGuestXcr0, pCtx->cr4));
7617 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7618 {
7619 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7620 hmR0SvmUpdateVmRunFunction(pVCpu);
7621 }
7622 }
7623 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7624 {
7625 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7626 rcStrict = VINF_SUCCESS;
7627 }
7628 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7629 return rcStrict;
7630}
7631
7632
7633/**
7634 * \#VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional \#VMEXIT.
7635 */
7636HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7637{
7638 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7639 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK);
7640
7641 /* I/O operation lookup arrays. */
7642 static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
7643 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
7644 the result (in AL/AX/EAX). */
7645 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7646 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7647 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7648
7649 Log4Func(("CS:RIP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
7650
7651 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
7652 SVMIOIOEXITINFO IoExitInfo;
7653 IoExitInfo.u = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
7654 uint32_t uIOWidth = (IoExitInfo.u >> 4) & 0x7;
7655 uint32_t cbValue = s_aIOSize[uIOWidth];
7656 uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
7657
7658 if (RT_UNLIKELY(!cbValue))
7659 {
7660 AssertMsgFailed(("hmR0SvmExitIOInstr: Invalid IO operation. uIOWidth=%u\n", uIOWidth));
7661 return VERR_EM_INTERPRETER;
7662 }
7663
7664 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
7665 VBOXSTRICTRC rcStrict;
7666 PCEMEXITREC pExitRec = NULL;
7667 if ( !pVCpu->hm.s.fSingleInstruction
7668 && !pVCpu->cpum.GstCtx.eflags.Bits.u1TF)
7669 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7670 !IoExitInfo.n.u1Str
7671 ? IoExitInfo.n.u1Type == SVM_IOIO_READ
7672 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
7673 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
7674 : IoExitInfo.n.u1Type == SVM_IOIO_READ
7675 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
7676 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
7677 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7678 if (!pExitRec)
7679 {
7680 bool fUpdateRipAlready = false;
7681 if (IoExitInfo.n.u1Str)
7682 {
7683 /* INS/OUTS - I/O String instruction. */
7684 /** @todo Huh? why can't we use the segment prefix information given by AMD-V
7685 * in EXITINFO1? Investigate once this thing is up and running. */
7686 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, IoExitInfo.n.u16Port, cbValue,
7687 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? 'w' : 'r'));
7688 AssertReturn(pCtx->dx == IoExitInfo.n.u16Port, VERR_SVM_IPE_2);
7689 static IEMMODE const s_aenmAddrMode[8] =
7690 {
7691 (IEMMODE)-1, IEMMODE_16BIT, IEMMODE_32BIT, (IEMMODE)-1, IEMMODE_64BIT, (IEMMODE)-1, (IEMMODE)-1, (IEMMODE)-1
7692 };
7693 IEMMODE enmAddrMode = s_aenmAddrMode[(IoExitInfo.u >> 7) & 0x7];
7694 if (enmAddrMode != (IEMMODE)-1)
7695 {
7696 uint64_t cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip;
7697 if (cbInstr <= 15 && cbInstr >= 1)
7698 {
7699 Assert(cbInstr >= 1U + IoExitInfo.n.u1Rep);
7700 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
7701 {
7702 /* Don't know exactly how to detect whether u3Seg is valid, currently
7703 only enabling it for Bulldozer and later with NRIP. OS/2 broke on
7704 2384 Opterons when only checking NRIP. */
7705 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7706 if ( fSupportsNextRipSave
7707 && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First)
7708 {
7709 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_DS || cbInstr > 1U + IoExitInfo.n.u1Rep,
7710 ("u32Seg=%d cbInstr=%d u1REP=%d", IoExitInfo.n.u3Seg, cbInstr, IoExitInfo.n.u1Rep));
7711 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
7712 IoExitInfo.n.u3Seg, true /*fIoChecked*/);
7713 }
7714 else if (cbInstr == 1U + IoExitInfo.n.u1Rep)
7715 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
7716 X86_SREG_DS, true /*fIoChecked*/);
7717 else
7718 rcStrict = IEMExecOne(pVCpu);
7719 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
7720 }
7721 else
7722 {
7723 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_ES /*=0*/, ("%#x\n", IoExitInfo.n.u3Seg));
7724 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
7725 true /*fIoChecked*/);
7726 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
7727 }
7728 }
7729 else
7730 {
7731 AssertMsgFailed(("rip=%RX64 nrip=%#RX64 cbInstr=%#RX64\n", pCtx->rip, pVmcb->ctrl.u64ExitInfo2, cbInstr));
7732 rcStrict = IEMExecOne(pVCpu);
7733 }
7734 }
7735 else
7736 {
7737 AssertMsgFailed(("IoExitInfo=%RX64\n", IoExitInfo.u));
7738 rcStrict = IEMExecOne(pVCpu);
7739 }
7740 fUpdateRipAlready = true;
7741 if (rcStrict == VINF_IEM_RAISED_XCPT)
7742 {
7743 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7744 rcStrict = VINF_SUCCESS;
7745 }
7746 }
7747 else
7748 {
7749 /* IN/OUT - I/O instruction. */
7750 Assert(!IoExitInfo.n.u1Rep);
7751
7752 uint8_t const cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip;
7753 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
7754 {
7755 rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue);
7756 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
7757 && !pCtx->eflags.Bits.u1TF)
7758 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, IoExitInfo.n.u16Port, cbInstr, cbValue, pCtx->eax & uAndVal);
7759 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
7760 }
7761 else
7762 {
7763 uint32_t u32Val = 0;
7764 rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue);
7765 if (IOM_SUCCESS(rcStrict))
7766 {
7767 /* Save result of I/O IN instr. in AL/AX/EAX. */
7768 /** @todo r=bird: 32-bit op size should clear high bits of rax! */
7769 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
7770 }
7771 else if ( rcStrict == VINF_IOM_R3_IOPORT_READ
7772 && !pCtx->eflags.Bits.u1TF)
7773 rcStrict = EMRZSetPendingIoPortRead(pVCpu, IoExitInfo.n.u16Port, cbInstr, cbValue);
7774
7775 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
7776 }
7777 }
7778
7779 if (IOM_SUCCESS(rcStrict))
7780 {
7781 /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */
7782 if (!fUpdateRipAlready)
7783 pCtx->rip = pVmcb->ctrl.u64ExitInfo2;
7784
7785 /*
7786 * If any I/O breakpoints are armed, we need to check if one triggered
7787 * and take appropriate action.
7788 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
7789 */
7790 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
7791 * execution engines about whether hyper BPs and such are pending. */
7792 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);
7793 uint32_t const uDr7 = pCtx->dr[7];
7794 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
7795 && X86_DR7_ANY_RW_IO(uDr7)
7796 && (pCtx->cr4 & X86_CR4_DE))
7797 || DBGFBpIsHwIoArmed(pVM)))
7798 {
7799 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
7800 VMMRZCallRing3Disable(pVCpu);
7801 HM_DISABLE_PREEMPT(pVCpu);
7802
7803 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
7804 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/);
7805
7806 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, &pVCpu->cpum.GstCtx, IoExitInfo.n.u16Port, cbValue);
7807 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
7808 {
7809 /* Raise #DB. */
7810 pVmcb->guest.u64DR6 = pCtx->dr[6];
7811 pVmcb->guest.u64DR7 = pCtx->dr[7];
7812 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
7813 hmR0SvmSetPendingXcptDB(pVCpu);
7814 }
7815 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
7816 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
7817 else if ( rcStrict2 != VINF_SUCCESS
7818 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
7819 rcStrict = rcStrict2;
7820 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
7821
7822 HM_RESTORE_PREEMPT();
7823 VMMRZCallRing3Enable(pVCpu);
7824 }
7825
7826 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7827 }
7828#ifdef VBOX_STRICT
7829 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
7830 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
7831 Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ);
7832 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
7833 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
7834 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
7835 Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE);
7836 else
7837 {
7838 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
7839 * statuses, that the VMM device and some others may return. See
7840 * IOM_SUCCESS() for guidance. */
7841 AssertMsg( RT_FAILURE(rcStrict)
7842 || rcStrict == VINF_SUCCESS
7843 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
7844 || rcStrict == VINF_EM_DBG_BREAKPOINT
7845 || rcStrict == VINF_EM_RAW_GUEST_TRAP
7846 || rcStrict == VINF_EM_DBG_STEPPED
7847 || rcStrict == VINF_EM_RAW_TO_R3
7848 || rcStrict == VINF_EM_TRIPLE_FAULT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7849 }
7850#endif
7851 }
7852 else
7853 {
7854 /*
7855 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7856 */
7857 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7858 STAM_COUNTER_INC(!IoExitInfo.n.u1Str
7859 ? IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
7860 : IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
7861 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
7862 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IoExitInfo.n.u1Rep ? "REP " : "",
7863 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? "OUT" : "IN", IoExitInfo.n.u1Str ? "S" : "", IoExitInfo.n.u16Port, uIOWidth));
7864
7865 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7866 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
7867
7868 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7869 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7870 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7871 }
7872 return rcStrict;
7873}
7874
7875
7876/**
7877 * \#VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional \#VMEXIT.
7878 */
7879HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7880{
7881 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7882 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7883 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7884
7885 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7886 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7887 Assert(pVM->hmr0.s.fNestedPaging);
7888
7889 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
7890 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7891 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
7892 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1; /* Note! High bits in EXITINFO1 may contain additional info and are
7893 thus intentionally not copied into u32ErrCode. */
7894
7895 Log4Func(("#NPF at CS:RIP=%04x:%RX64 GCPhysFaultAddr=%RGp ErrCode=%#x cbInstrFetched=%u %.15Rhxs\n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr,
7896 u32ErrCode, pVmcb->ctrl.cbInstrFetched, pVmcb->ctrl.abInstr));
7897
7898 /*
7899 * TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions.
7900 */
7901 if ( pVM->hm.s.fTprPatchingAllowed
7902 && (GCPhysFaultAddr & GUEST_PAGE_OFFSET_MASK) == XAPIC_OFF_TPR
7903 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */
7904 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */
7905 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
7906 && !CPUMIsGuestInLongModeEx(pCtx)
7907 && !CPUMGetGuestCPL(pVCpu)
7908 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
7909 {
7910 RTGCPHYS GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
7911 GCPhysApicBase &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7912
7913 if (GCPhysFaultAddr == GCPhysApicBase + XAPIC_OFF_TPR)
7914 {
7915 /* Only attempt to patch the instruction once. */
7916 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
7917 if (!pPatch)
7918 return VINF_EM_HM_PATCH_TPR_INSTR;
7919 }
7920 }
7921
7922 /*
7923 * Determine the nested paging mode.
7924 */
7925/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
7926 PGMMODE const enmNestedPagingMode = PGMGetHostMode(pVM);
7927
7928 /*
7929 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
7930 */
7931 Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
7932 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
7933 {
7934 /*
7935 * If event delivery causes an MMIO #NPF, go back to instruction emulation as otherwise
7936 * injecting the original pending event would most likely cause the same MMIO #NPF.
7937 */
7938 if (pVCpu->hm.s.Event.fPending)
7939 {
7940 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
7941 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7942 }
7943
7944 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
7945 VBOXSTRICTRC rcStrict;
7946 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7947 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
7948 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7949 if (!pExitRec)
7950 {
7951
7952 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr,
7953 u32ErrCode);
7954
7955 /*
7956 * If we succeed, resume guest execution.
7957 *
7958 * If we fail in interpreting the instruction because we couldn't get the guest
7959 * physical address of the page containing the instruction via the guest's page
7960 * tables (we would invalidate the guest page in the host TLB), resume execution
7961 * which would cause a guest page fault to let the guest handle this weird case.
7962 *
7963 * See @bugref{6043}.
7964 */
7965 if ( rcStrict == VINF_SUCCESS
7966 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
7967 || rcStrict == VERR_PAGE_NOT_PRESENT)
7968 {
7969 /* Successfully handled MMIO operation. */
7970 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7971 rcStrict = VINF_SUCCESS;
7972 }
7973 }
7974 else
7975 {
7976 /*
7977 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7978 */
7979 Assert(pCtx == &pVCpu->cpum.GstCtx);
7980 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7981 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
7982 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysFaultAddr));
7983
7984 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7985 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
7986
7987 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7988 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7989 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7990 }
7991 return rcStrict;
7992 }
7993
7994 /*
7995 * Nested page-fault.
7996 */
7997 TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode);
7998 int rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr);
7999 TRPMResetTrap(pVCpu);
8000
8001 Log4Func(("#NPF: PGMR0Trap0eHandlerNestedPaging returns %Rrc CS:RIP=%04x:%RX64\n", rc, pCtx->cs.Sel, pCtx->rip));
8002
8003 /*
8004 * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
8005 */
8006 if ( rc == VINF_SUCCESS
8007 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8008 || rc == VERR_PAGE_NOT_PRESENT)
8009 {
8010 /* We've successfully synced our shadow page tables. */
8011 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
8012 rc = VINF_SUCCESS;
8013 }
8014
8015 /*
8016 * If delivering an event causes an #NPF (and not MMIO), we shall resolve the fault and
8017 * re-inject the original event.
8018 */
8019 if (pVCpu->hm.s.Event.fPending)
8020 {
8021 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflectNPF);
8022
8023 /*
8024 * If the #NPF handler requested emulation of the instruction, ignore it.
8025 * We need to re-inject the original event so as to not lose it.
8026 * Reproducible when booting ReactOS 0.4.12 with BTRFS (installed using BootCD,
8027 * LiveCD is broken for other reasons).
8028 */
8029 if (rc == VINF_EM_RAW_EMULATE_INSTR)
8030 rc = VINF_EM_RAW_INJECT_TRPM_EVENT;
8031 }
8032
8033 return rc;
8034}
8035
8036
8037/**
8038 * \#VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional
8039 * \#VMEXIT.
8040 */
8041HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8042{
8043 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8044 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
8045
8046 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
8047 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8048 hmR0SvmClearIntWindowExiting(pVCpu, pVmcb);
8049
8050 /* Deliver the pending interrupt via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
8051 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
8052 return VINF_SUCCESS;
8053}
8054
8055
8056/**
8057 * \#VMEXIT handler for task switches (SVM_EXIT_TASK_SWITCH). Conditional
8058 * \#VMEXIT.
8059 */
8060HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8061{
8062 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8063 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8064
8065#ifndef HMSVM_ALWAYS_TRAP_TASK_SWITCH
8066 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8067#endif
8068
8069 /* Check if this task-switch occurred while delivering an event through the guest IDT. */
8070 if (pVCpu->hm.s.Event.fPending) /* Can happen with exceptions/NMI. See @bugref{8411}. */
8071 {
8072 /*
8073 * AMD-V provides us with the exception which caused the TS; we collect
8074 * the information in the call to hmR0SvmCheckExitDueToEventDelivery().
8075 */
8076 Log4Func(("TS occurred during event delivery\n"));
8077 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8078 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8079 }
8080
8081 /** @todo Emulate task switch someday, currently just going back to ring-3 for
8082 * emulation. */
8083 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8084 return VERR_EM_INTERPRETER;
8085}
8086
8087
8088/**
8089 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
8090 */
8091HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8092{
8093 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8094 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8095
8096 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8097 if (pVM->hm.s.fTprPatchingAllowed)
8098 {
8099 int rc = hmEmulateSvmMovTpr(pVM, pVCpu);
8100 if (rc != VERR_NOT_FOUND)
8101 {
8102 Log4Func(("hmEmulateSvmMovTpr returns %Rrc\n", rc));
8103 return rc;
8104 }
8105 }
8106
8107 if (EMAreHypercallInstructionsEnabled(pVCpu))
8108 {
8109 unsigned cbInstr;
8110 if (hmR0SvmSupportsNextRipSave(pVCpu))
8111 {
8112 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8113 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8114 }
8115 else
8116 {
8117 PDISCPUSTATE pDis = &pVCpu->hmr0.s.svm.DisState;
8118 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, &cbInstr);
8119 if ( rc == VINF_SUCCESS
8120 && pDis->pCurInstr->uOpcode == OP_VMMCALL)
8121 Assert(cbInstr > 0);
8122 else
8123 cbInstr = 0;
8124 }
8125
8126 VBOXSTRICTRC rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8127 if (RT_SUCCESS(rcStrict))
8128 {
8129 /* Only update the RIP if we're continuing guest execution and not in the case
8130 of say VINF_GIM_R3_HYPERCALL. */
8131 if (rcStrict == VINF_SUCCESS)
8132 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8133
8134 return VBOXSTRICTRC_VAL(rcStrict);
8135 }
8136 else
8137 Log4Func(("GIMHypercall returns %Rrc -> #UD\n", VBOXSTRICTRC_VAL(rcStrict)));
8138 }
8139
8140 hmR0SvmSetPendingXcptUD(pVCpu);
8141 return VINF_SUCCESS;
8142}
8143
8144
8145/**
8146 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
8147 */
8148HMSVM_EXIT_DECL hmR0SvmExitPause(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8149{
8150 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8151
8152 unsigned cbInstr;
8153 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8154 if (fSupportsNextRipSave)
8155 {
8156 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8157 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8158 }
8159 else
8160 {
8161 PDISCPUSTATE pDis = &pVCpu->hmr0.s.svm.DisState;
8162 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, &cbInstr);
8163 if ( rc == VINF_SUCCESS
8164 && pDis->pCurInstr->uOpcode == OP_PAUSE)
8165 Assert(cbInstr > 0);
8166 else
8167 cbInstr = 0;
8168 }
8169
8170 /** @todo The guest has likely hit a contended spinlock. We might want to
8171 * poke a schedule different guest VCPU. */
8172 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8173 return VINF_EM_RAW_INTERRUPT;
8174}
8175
8176
8177/**
8178 * \#VMEXIT handler for FERR intercept (SVM_EXIT_FERR_FREEZE). Conditional
8179 * \#VMEXIT.
8180 */
8181HMSVM_EXIT_DECL hmR0SvmExitFerrFreeze(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8182{
8183 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8184 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);
8185 Assert(!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE));
8186
8187 Log4Func(("Raising IRQ 13 in response to #FERR\n"));
8188 return PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
8189}
8190
8191
8192/**
8193 * \#VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional \#VMEXIT.
8194 */
8195HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8196{
8197 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8198
8199 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now (almost) ready. */
8200 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8201 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_IRET);
8202
8203 /* Emulate the IRET. We have to execute the IRET before an NMI, but must potentially
8204 * deliver a pending NMI right after. If the IRET faults, an NMI can come before the
8205 * handler executes. Yes, x86 is ugly.
8206 */
8207 return VINF_EM_RAW_EMULATE_INSTR;
8208}
8209
8210
8211/**
8212 * \#VMEXIT handler for page-fault exceptions (SVM_EXIT_XCPT_14).
8213 * Conditional \#VMEXIT.
8214 */
8215HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8216{
8217 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8218 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8219 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8220
8221 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
8222 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8223 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8224 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8225 uint32_t uErrCode = pVmcb->ctrl.u64ExitInfo1;
8226 uint64_t const uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
8227
8228#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(HMSVM_ALWAYS_TRAP_PF)
8229 if (pVM->hmr0.s.fNestedPaging)
8230 {
8231 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
8232 if ( !pSvmTransient->fVectoringDoublePF
8233 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
8234 {
8235 /* A genuine guest #PF, reflect it to the guest. */
8236 hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress);
8237 Log4Func(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RX64 ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip,
8238 uFaultAddress, uErrCode));
8239 }
8240 else
8241 {
8242 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8243 hmR0SvmSetPendingXcptDF(pVCpu);
8244 Log4Func(("Pending #DF due to vectoring #PF. NP\n"));
8245 }
8246 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8247 return VINF_SUCCESS;
8248 }
8249#endif
8250
8251 Assert(!pVM->hmr0.s.fNestedPaging);
8252
8253 /*
8254 * TPR patching shortcut for APIC TPR reads and writes; only applicable to 32-bit guests.
8255 */
8256 if ( pVM->hm.s.fTprPatchingAllowed
8257 && (uFaultAddress & 0xfff) == XAPIC_OFF_TPR
8258 && !(uErrCode & X86_TRAP_PF_P) /* Not present. */
8259 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
8260 && !CPUMIsGuestInLongModeEx(pCtx)
8261 && !CPUMGetGuestCPL(pVCpu)
8262 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
8263 {
8264 RTGCPHYS GCPhysApicBase;
8265 GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
8266 GCPhysApicBase &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8267
8268 /* Check if the page at the fault-address is the APIC base. */
8269 PGMPTWALK Walk;
8270 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, &Walk);
8271 if ( rc2 == VINF_SUCCESS
8272 && Walk.GCPhys == GCPhysApicBase)
8273 {
8274 /* Only attempt to patch the instruction once. */
8275 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
8276 if (!pPatch)
8277 return VINF_EM_HM_PATCH_TPR_INSTR;
8278 }
8279 }
8280
8281 Log4Func(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
8282 pCtx->rip, uErrCode, pCtx->cr3));
8283
8284 /*
8285 * If it's a vectoring #PF, emulate injecting the original event injection as
8286 * PGMTrap0eHandler() is incapable of differentiating between instruction emulation and
8287 * event injection that caused a #PF. See @bugref{6607}.
8288 */
8289 if (pSvmTransient->fVectoringPF)
8290 {
8291 Assert(pVCpu->hm.s.Event.fPending);
8292 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8293 }
8294
8295 TRPMAssertXcptPF(pVCpu, uFaultAddress, uErrCode);
8296 int rc = PGMTrap0eHandler(pVCpu, uErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
8297
8298 Log4Func(("#PF: rc=%Rrc\n", rc));
8299
8300 if (rc == VINF_SUCCESS)
8301 {
8302 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
8303 TRPMResetTrap(pVCpu);
8304 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
8305 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
8306 return rc;
8307 }
8308
8309 if (rc == VINF_EM_RAW_GUEST_TRAP)
8310 {
8311 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
8312
8313 /*
8314 * If a nested-guest delivers a #PF and that causes a #PF which is -not- a shadow #PF,
8315 * we should simply forward the #PF to the guest and is up to the nested-hypervisor to
8316 * determine whether it is a nested-shadow #PF or a #DF, see @bugref{7243#c121}.
8317 */
8318 if ( !pSvmTransient->fVectoringDoublePF
8319 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
8320 {
8321 /* It's a guest (or nested-guest) page fault and needs to be reflected. */
8322 uErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */
8323 TRPMResetTrap(pVCpu);
8324
8325#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
8326 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
8327 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
8328 && CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
8329 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_XCPT_PF, uErrCode, uFaultAddress);
8330#endif
8331
8332 hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress);
8333 }
8334 else
8335 {
8336 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8337 TRPMResetTrap(pVCpu);
8338 hmR0SvmSetPendingXcptDF(pVCpu);
8339 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
8340 }
8341
8342 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8343 return VINF_SUCCESS;
8344 }
8345
8346 TRPMResetTrap(pVCpu);
8347 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
8348 return rc;
8349}
8350
8351
8352/**
8353 * \#VMEXIT handler for undefined opcode (SVM_EXIT_XCPT_6).
8354 * Conditional \#VMEXIT.
8355 */
8356HMSVM_EXIT_DECL hmR0SvmExitXcptUD(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8357{
8358 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8359 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
8360 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8361
8362 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
8363 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
8364 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
8365
8366 /** @todo if we accumulate more optional stuff here, we ought to combine the
8367 * reading of opcode bytes to avoid doing more than once. */
8368
8369 VBOXSTRICTRC rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
8370 if (pVCpu->hm.s.fGIMTrapXcptUD)
8371 {
8372 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8373 uint8_t cbInstr = 0;
8374 rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
8375 if (rcStrict == VINF_SUCCESS)
8376 {
8377 /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */
8378 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8379 rcStrict = VINF_SUCCESS;
8380 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
8381 }
8382 else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
8383 rcStrict = VINF_SUCCESS;
8384 else if (rcStrict == VINF_GIM_R3_HYPERCALL)
8385 rcStrict = VINF_GIM_R3_HYPERCALL;
8386 else
8387 {
8388 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
8389 rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
8390 }
8391 }
8392
8393 if (pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
8394 {
8395 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
8396 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
8397 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
8398 {
8399 /* Ideally, IEM should just handle all these special #UD situations, but
8400 we don't quite trust things to behave optimially when doing that. So,
8401 for now we'll restrict ourselves to a handful of possible sysenter and
8402 sysexit encodings that we filter right here. */
8403 uint8_t abInstr[SVM_CTRL_GUEST_INSTR_BYTES_MAX];
8404 uint8_t cbInstr = pVmcb->ctrl.cbInstrFetched;
8405 uint32_t const uCpl = CPUMGetGuestCPL(pVCpu);
8406 uint8_t const cbMin = uCpl != 0 ? 2 : 1 + 2;
8407 RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
8408 if (cbInstr < cbMin || cbInstr > SVM_CTRL_GUEST_INSTR_BYTES_MAX)
8409 {
8410 cbInstr = cbMin;
8411 int rc2 = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, GCPtrInstr, cbInstr);
8412 AssertRCStmt(rc2, cbInstr = 0);
8413 }
8414 else
8415 memcpy(abInstr, pVmcb->ctrl.abInstr, cbInstr); /* unlikely */
8416 if ( cbInstr == 0 /* read error */
8417 || (cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x34) /* sysenter */
8418 || ( uCpl == 0
8419 && ( ( cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x35) /* sysexit */
8420 || ( cbInstr >= 3 && abInstr[1] == 0x0f && abInstr[2] == 0x35 /* rex.w sysexit */
8421 && (abInstr[0] & (X86_OP_REX_W | 0xf0)) == X86_OP_REX_W))))
8422 {
8423 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK
8424 | CPUMCTX_EXTRN_SREG_MASK /* without ES+DS+GS the app will #GP later - go figure */);
8425 Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
8426 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), GCPtrInstr, abInstr, cbInstr);
8427 Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: rcStrict=%Rrc %04x:%08RX64 %08RX64 %04x:%08RX64\n",
8428 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u,
8429 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp));
8430 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8431 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); /** @todo Lazy bird. */
8432 if (rcStrict == VINF_IEM_RAISED_XCPT)
8433 rcStrict = VINF_SUCCESS;
8434 return rcStrict;
8435 }
8436 Log6(("hmR0SvmExitXcptUD: not sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
8437 }
8438 else
8439 Log6(("hmR0SvmExitXcptUD: not in long mode at %04x:%llx\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8440 }
8441
8442 /* If the GIM #UD exception handler didn't succeed for some reason or wasn't needed, raise #UD. */
8443 if (RT_FAILURE(rcStrict))
8444 {
8445 hmR0SvmSetPendingXcptUD(pVCpu);
8446 rcStrict = VINF_SUCCESS;
8447 }
8448
8449 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8450 return rcStrict;
8451}
8452
8453
8454/**
8455 * \#VMEXIT handler for math-fault exceptions (SVM_EXIT_XCPT_16).
8456 * Conditional \#VMEXIT.
8457 */
8458HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8459{
8460 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8461 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8462 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8463
8464 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8465 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8466
8467 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
8468 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
8469
8470 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8471
8472 if (!(pCtx->cr0 & X86_CR0_NE))
8473 {
8474 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8475 PDISSTATE pDis = &pVCpu->hmr0.s.svm.DisState;
8476 unsigned cbInstr;
8477 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbInstr);
8478 if (RT_SUCCESS(rc))
8479 {
8480 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
8481 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
8482 if (RT_SUCCESS(rc))
8483 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8484 }
8485 else
8486 Log4Func(("EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
8487 return rc;
8488 }
8489
8490 hmR0SvmSetPendingXcptMF(pVCpu);
8491 return VINF_SUCCESS;
8492}
8493
8494
8495/**
8496 * \#VMEXIT handler for debug exceptions (SVM_EXIT_XCPT_1). Conditional
8497 * \#VMEXIT.
8498 */
8499HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8500{
8501 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8502 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8503 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8504 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8505
8506 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
8507 {
8508 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
8509 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8510 }
8511
8512 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8513
8514 /*
8515 * This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data
8516 * breakpoint). However, for both cases DR6 and DR7 are updated to what the exception
8517 * handler expects. See AMD spec. 15.12.2 "#DB (Debug)".
8518 */
8519 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8520 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
8521 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8522 int rc = DBGFTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
8523 if (rc == VINF_EM_RAW_GUEST_TRAP)
8524 {
8525 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
8526 if (CPUMIsHyperDebugStateActive(pVCpu))
8527 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
8528
8529 /* Reflect the exception back to the guest. */
8530 hmR0SvmSetPendingXcptDB(pVCpu);
8531 rc = VINF_SUCCESS;
8532 }
8533
8534 /*
8535 * Update DR6.
8536 */
8537 if (CPUMIsHyperDebugStateActive(pVCpu))
8538 {
8539 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
8540 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
8541 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
8542 }
8543 else
8544 {
8545 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
8546 Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu));
8547 }
8548
8549 return rc;
8550}
8551
8552
8553/**
8554 * \#VMEXIT handler for alignment check exceptions (SVM_EXIT_XCPT_17).
8555 * Conditional \#VMEXIT.
8556 */
8557HMSVM_EXIT_DECL hmR0SvmExitXcptAC(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8558{
8559 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8560 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8561 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC);
8562
8563 SVMEVENT Event;
8564 Event.u = 0;
8565 Event.n.u1Valid = 1;
8566 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8567 Event.n.u8Vector = X86_XCPT_AC;
8568 Event.n.u1ErrorCodeValid = 1;
8569 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8570 return VINF_SUCCESS;
8571}
8572
8573
8574/**
8575 * \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_XCPT_3).
8576 * Conditional \#VMEXIT.
8577 */
8578HMSVM_EXIT_DECL hmR0SvmExitXcptBP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8579{
8580 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8581 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8582 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8583 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
8584
8585 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8586 VBOXSTRICTRC rc = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
8587 if (rc == VINF_EM_RAW_GUEST_TRAP)
8588 {
8589 SVMEVENT Event;
8590 Event.u = 0;
8591 Event.n.u1Valid = 1;
8592 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8593 Event.n.u8Vector = X86_XCPT_BP;
8594 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8595 rc = VINF_SUCCESS;
8596 }
8597
8598 Assert(rc == VINF_SUCCESS || rc == VINF_EM_DBG_BREAKPOINT);
8599 return rc;
8600}
8601
8602
8603/**
8604 * Hacks its way around the lovely mesa driver's backdoor accesses.
8605 *
8606 * @sa hmR0VmxHandleMesaDrvGp
8607 */
8608static int hmR0SvmHandleMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb)
8609{
8610 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK);
8611 Log(("hmR0SvmHandleMesaDrvGp: at %04x:%08RX64 rcx=%RX64 rbx=%RX64\n",
8612 pVmcb->guest.CS.u16Sel, pVmcb->guest.u64RIP, pCtx->rcx, pCtx->rbx));
8613 RT_NOREF(pCtx, pVmcb);
8614
8615 /* For now we'll just skip the instruction. */
8616 hmR0SvmAdvanceRip(pVCpu, 1);
8617 return VINF_SUCCESS;
8618}
8619
8620
8621/**
8622 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
8623 * backdoor logging w/o checking what it is running inside.
8624 *
8625 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
8626 * backdoor port and magic numbers loaded in registers.
8627 *
8628 * @returns true if it is, false if it isn't.
8629 * @sa hmR0VmxIsMesaDrvGp
8630 */
8631DECLINLINE(bool) hmR0SvmIsMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb)
8632{
8633 /* Check magic and port. */
8634 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
8635 /*Log8(("hmR0SvmIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->fExtrn & CPUMCTX_EXTRN_RAX ? pVmcb->guest.u64RAX : pCtx->rax, pCtx->rdx));*/
8636 if (pCtx->dx != UINT32_C(0x5658))
8637 return false;
8638 if ((pCtx->fExtrn & CPUMCTX_EXTRN_RAX ? pVmcb->guest.u64RAX : pCtx->rax) != UINT32_C(0x564d5868))
8639 return false;
8640
8641 /* Check that it is #GP(0). */
8642 if (pVmcb->ctrl.u64ExitInfo1 != 0)
8643 return false;
8644
8645 /* Flat ring-3 CS. */
8646 /*Log8(("hmR0SvmIsMesaDrvGp: u8CPL=%d base=%RX64\n", pVmcb->guest.u8CPL, pCtx->fExtrn & CPUMCTX_EXTRN_CS ? pVmcb->guest.CS.u64Base : pCtx->cs.u64Base));*/
8647 if (pVmcb->guest.u8CPL != 3)
8648 return false;
8649 if ((pCtx->fExtrn & CPUMCTX_EXTRN_CS ? pVmcb->guest.CS.u64Base : pCtx->cs.u64Base) != 0)
8650 return false;
8651
8652 /* 0xed: IN eAX,dx */
8653 if (pVmcb->ctrl.cbInstrFetched < 1) /* unlikely, it turns out. */
8654 {
8655 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_GPRS_MASK
8656 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
8657 uint8_t abInstr[1];
8658 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
8659 /*Log8(("hmR0SvmIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0])); */
8660 if (RT_FAILURE(rc))
8661 return false;
8662 if (abInstr[0] != 0xed)
8663 return false;
8664 }
8665 else
8666 {
8667 /*Log8(("hmR0SvmIsMesaDrvGp: %#x\n", pVmcb->ctrl.abInstr));*/
8668 if (pVmcb->ctrl.abInstr[0] != 0xed)
8669 return false;
8670 }
8671 return true;
8672}
8673
8674
8675/**
8676 * \#VMEXIT handler for general protection faults (SVM_EXIT_XCPT_BP).
8677 * Conditional \#VMEXIT.
8678 */
8679HMSVM_EXIT_DECL hmR0SvmExitXcptGP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8680{
8681 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8682 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8683 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
8684
8685 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8686 Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode);
8687
8688 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8689 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
8690 || !hmR0SvmIsMesaDrvGp(pVCpu, pCtx, pVmcb))
8691 {
8692 SVMEVENT Event;
8693 Event.u = 0;
8694 Event.n.u1Valid = 1;
8695 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8696 Event.n.u8Vector = X86_XCPT_GP;
8697 Event.n.u1ErrorCodeValid = 1;
8698 Event.n.u32ErrorCode = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
8699 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8700 return VINF_SUCCESS;
8701 }
8702 return hmR0SvmHandleMesaDrvGp(pVCpu, pCtx, pVmcb);
8703}
8704
8705
8706/**
8707 * \#VMEXIT handler for generic exceptions. Conditional \#VMEXIT.
8708 */
8709HMSVM_EXIT_DECL hmR0SvmExitXcptGeneric(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8710{
8711 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8712 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8713
8714 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8715 uint8_t const uVector = pVmcb->ctrl.u64ExitCode - SVM_EXIT_XCPT_0;
8716 uint32_t const uErrCode = pVmcb->ctrl.u64ExitInfo1;
8717 Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode);
8718 Assert(uVector <= X86_XCPT_LAST);
8719 Log4Func(("uVector=%#x uErrCode=%u\n", uVector, uErrCode));
8720
8721 SVMEVENT Event;
8722 Event.u = 0;
8723 Event.n.u1Valid = 1;
8724 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8725 Event.n.u8Vector = uVector;
8726 switch (uVector)
8727 {
8728 /* Shouldn't be here for reflecting #PFs (among other things, the fault address isn't passed along). */
8729 case X86_XCPT_PF: AssertMsgFailed(("hmR0SvmExitXcptGeneric: Unexpected exception")); return VERR_SVM_IPE_5;
8730 case X86_XCPT_DF:
8731 case X86_XCPT_TS:
8732 case X86_XCPT_NP:
8733 case X86_XCPT_SS:
8734 case X86_XCPT_GP:
8735 case X86_XCPT_AC:
8736 {
8737 Event.n.u1ErrorCodeValid = 1;
8738 Event.n.u32ErrorCode = uErrCode;
8739 break;
8740 }
8741 }
8742
8743#ifdef VBOX_WITH_STATISTICS
8744 switch (uVector)
8745 {
8746 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); break;
8747 case X86_XCPT_DB: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); break;
8748 case X86_XCPT_BP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); break;
8749 case X86_XCPT_OF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF); break;
8750 case X86_XCPT_BR: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBR); break;
8751 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); break;
8752 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF); break;
8753 case X86_XCPT_DF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDF); break;
8754 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS); break;
8755 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); break;
8756 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); break;
8757 case X86_XCPT_GP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); break;
8758 case X86_XCPT_PF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); break;
8759 case X86_XCPT_MF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); break;
8760 case X86_XCPT_AC: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC); break;
8761 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); break;
8762 default:
8763 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
8764 break;
8765 }
8766#endif
8767
8768 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8769 return VINF_SUCCESS;
8770}
8771
8772
8773/**
8774 * \#VMEXIT handler for software interrupt (INTn). Conditional \#VMEXIT (debug).
8775 */
8776HMSVM_EXIT_DECL hmR0SvmExitSwInt(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8777{
8778 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8779 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8780
8781 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8782 SVMEVENT Event;
8783 Event.u = 0;
8784 Event.n.u1Valid = 1;
8785 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
8786 Event.n.u8Vector = pVmcb->ctrl.u64ExitInfo1 & 0xff;
8787 Log4Func(("uVector=%#x\n", Event.n.u8Vector));
8788 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8789 return VINF_SUCCESS;
8790}
8791
8792
8793/**
8794 * Generic exit handler that interprets the current instruction
8795 *
8796 * Useful exit that only gets triggered by dtrace and the debugger. Caller does
8797 * the exit logging, and this function does the rest.
8798 */
8799static VBOXSTRICTRC hmR0SvmExitInterpretInstruction(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient,
8800 uint64_t fExtraImport, uint64_t fHmChanged)
8801{
8802#if 1
8803 RT_NOREF(pSvmTransient);
8804 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fExtraImport);
8805 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
8806 if (rcStrict == VINF_SUCCESS)
8807 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fHmChanged | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RIP);
8808 else
8809 {
8810 Log4Func(("IEMExecOne -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
8811 if (rcStrict == VINF_IEM_RAISED_XCPT)
8812 {
8813 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK | fHmChanged);
8814 rcStrict = VINF_SUCCESS;
8815 }
8816 else
8817 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fHmChanged);
8818 }
8819 return rcStrict;
8820#else
8821 RT_NOREF(pVCpu, pSvmTransient, fExtraImport, fHmChanged);
8822 return VINF_EM_RAW_EMULATE_INSTR;
8823#endif
8824}
8825
8826
8827/**
8828 * \#VMEXIT handler for STR. Conditional \#VMEXIT (debug).
8829 */
8830HMSVM_EXIT_DECL hmR0SvmExitTrRead(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8831{
8832 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8833 Log4Func(("%04x:%08RX64\n", pSvmTransient->pVmcb->guest.CS.u16Sel, pSvmTransient->pVmcb->guest.u64RIP));
8834 return hmR0SvmExitInterpretInstruction(pVCpu, pSvmTransient, CPUMCTX_EXTRN_TR, 0);
8835}
8836
8837
8838/**
8839 * \#VMEXIT handler for LTR. Conditional \#VMEXIT (OS/2 TLB workaround, debug).
8840 */
8841HMSVM_EXIT_DECL hmR0SvmExitTrWrite(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8842{
8843 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8844
8845 /* Workaround for lack of TLB flushing in OS/2 when returning to protected
8846 mode after a real mode call (like a BIOS call). See ticketref:20625
8847 comment 14. */
8848 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8849 if (pVM->hm.s.fMissingOS2TlbFlushWorkaround)
8850 {
8851 Log4Func(("%04x:%08RX64 TLB flush\n", pSvmTransient->pVmcb->guest.CS.u16Sel, pSvmTransient->pVmcb->guest.u64RIP));
8852 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
8853 }
8854 else
8855 Log4Func(("%04x:%08RX64\n", pSvmTransient->pVmcb->guest.CS.u16Sel, pSvmTransient->pVmcb->guest.u64RIP));
8856
8857 return hmR0SvmExitInterpretInstruction(pVCpu, pSvmTransient, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR, HM_CHANGED_GUEST_TR);
8858}
8859
8860
8861#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
8862/**
8863 * \#VMEXIT handler for CLGI (SVM_EXIT_CLGI). Conditional \#VMEXIT.
8864 */
8865HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8866{
8867 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8868
8869 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8870 Assert(pVmcb);
8871 Assert(!pVmcb->ctrl.IntCtrl.n.u1VGifEnable);
8872
8873 VBOXSTRICTRC rcStrict;
8874 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8875 uint64_t const fImport = CPUMCTX_EXTRN_HWVIRT;
8876 if (fSupportsNextRipSave)
8877 {
8878 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
8879 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8880 rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr);
8881 }
8882 else
8883 {
8884 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
8885 rcStrict = IEMExecOne(pVCpu);
8886 }
8887
8888 if (rcStrict == VINF_SUCCESS)
8889 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
8890 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8891 {
8892 rcStrict = VINF_SUCCESS;
8893 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8894 }
8895 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
8896 return rcStrict;
8897}
8898
8899
8900/**
8901 * \#VMEXIT handler for STGI (SVM_EXIT_STGI). Conditional \#VMEXIT.
8902 */
8903HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8904{
8905 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8906
8907 /*
8908 * When VGIF is not used we always intercept STGI instructions. When VGIF is used,
8909 * we only intercept STGI when events are pending for GIF to become 1.
8910 */
8911 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8912 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
8913 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_STGI);
8914
8915 VBOXSTRICTRC rcStrict;
8916 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8917 uint64_t const fImport = CPUMCTX_EXTRN_HWVIRT;
8918 if (fSupportsNextRipSave)
8919 {
8920 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
8921 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8922 rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr);
8923 }
8924 else
8925 {
8926 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
8927 rcStrict = IEMExecOne(pVCpu);
8928 }
8929
8930 if (rcStrict == VINF_SUCCESS)
8931 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
8932 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8933 {
8934 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8935 rcStrict = VINF_SUCCESS;
8936 }
8937 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
8938 return rcStrict;
8939}
8940
8941
8942/**
8943 * \#VMEXIT handler for VMLOAD (SVM_EXIT_VMLOAD). Conditional \#VMEXIT.
8944 */
8945HMSVM_EXIT_DECL hmR0SvmExitVmload(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8946{
8947 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8948
8949 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8950 Assert(pVmcb);
8951 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
8952
8953 VBOXSTRICTRC rcStrict;
8954 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8955 uint64_t const fImport = CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_KERNEL_GS_BASE
8956 | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_SYSCALL_MSRS
8957 | CPUMCTX_EXTRN_SYSENTER_MSRS;
8958 if (fSupportsNextRipSave)
8959 {
8960 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
8961 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8962 rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr);
8963 }
8964 else
8965 {
8966 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
8967 rcStrict = IEMExecOne(pVCpu);
8968 }
8969
8970 if (rcStrict == VINF_SUCCESS)
8971 {
8972 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS | HM_CHANGED_GUEST_GS
8973 | HM_CHANGED_GUEST_TR | HM_CHANGED_GUEST_LDTR
8974 | HM_CHANGED_GUEST_KERNEL_GS_BASE | HM_CHANGED_GUEST_SYSCALL_MSRS
8975 | HM_CHANGED_GUEST_SYSENTER_MSR_MASK);
8976 }
8977 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8978 {
8979 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8980 rcStrict = VINF_SUCCESS;
8981 }
8982 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
8983 return rcStrict;
8984}
8985
8986
8987/**
8988 * \#VMEXIT handler for VMSAVE (SVM_EXIT_VMSAVE). Conditional \#VMEXIT.
8989 */
8990HMSVM_EXIT_DECL hmR0SvmExitVmsave(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8991{
8992 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8993
8994 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8995 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
8996
8997 VBOXSTRICTRC rcStrict;
8998 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8999 if (fSupportsNextRipSave)
9000 {
9001 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9002 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
9003 rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr);
9004 }
9005 else
9006 {
9007 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
9008 rcStrict = IEMExecOne(pVCpu);
9009 }
9010
9011 if (rcStrict == VINF_IEM_RAISED_XCPT)
9012 {
9013 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9014 rcStrict = VINF_SUCCESS;
9015 }
9016 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9017 return rcStrict;
9018}
9019
9020
9021/**
9022 * \#VMEXIT handler for INVLPGA (SVM_EXIT_INVLPGA). Conditional \#VMEXIT.
9023 */
9024HMSVM_EXIT_DECL hmR0SvmExitInvlpga(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9025{
9026 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9027
9028 VBOXSTRICTRC rcStrict;
9029 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
9030 if (fSupportsNextRipSave)
9031 {
9032 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9033 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
9034 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
9035 rcStrict = IEMExecDecodedInvlpga(pVCpu, cbInstr);
9036 }
9037 else
9038 {
9039 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
9040 rcStrict = IEMExecOne(pVCpu);
9041 }
9042
9043 if (rcStrict == VINF_IEM_RAISED_XCPT)
9044 {
9045 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9046 rcStrict = VINF_SUCCESS;
9047 }
9048 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9049 return rcStrict;
9050}
9051
9052
9053/**
9054 * \#VMEXIT handler for STGI (SVM_EXIT_VMRUN). Conditional \#VMEXIT.
9055 */
9056HMSVM_EXIT_DECL hmR0SvmExitVmrun(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9057{
9058 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9059 /* We shall import the entire state here, just in case we enter and continue execution of
9060 the nested-guest with hardware-assisted SVM in ring-0, we would be switching VMCBs and
9061 could lose lose part of CPU state. */
9062 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
9063
9064 VBOXSTRICTRC rcStrict;
9065 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
9066 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitVmentry, z);
9067 if (fSupportsNextRipSave)
9068 {
9069 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
9070 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
9071 rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr);
9072 }
9073 else
9074 {
9075 /* We use IEMExecOneBypassEx() here as it supresses attempt to continue emulating any
9076 instruction(s) when interrupt inhibition is set as part of emulating the VMRUN
9077 instruction itself, see @bugref{7243#c126} */
9078 rcStrict = IEMExecOneBypassEx(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), NULL /* pcbWritten */);
9079 }
9080 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitVmentry, z);
9081
9082 if (rcStrict == VINF_SUCCESS)
9083 {
9084 rcStrict = VINF_SVM_VMRUN;
9085 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_VMRUN_MASK);
9086 }
9087 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9088 {
9089 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9090 rcStrict = VINF_SUCCESS;
9091 }
9092 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9093 return rcStrict;
9094}
9095
9096
9097/**
9098 * Nested-guest \#VMEXIT handler for debug exceptions (SVM_EXIT_XCPT_1).
9099 * Unconditional \#VMEXIT.
9100 */
9101HMSVM_EXIT_DECL hmR0SvmNestedExitXcptDB(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9102{
9103 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9104 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
9105
9106 if (pVCpu->hm.s.Event.fPending)
9107 {
9108 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
9109 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9110 }
9111
9112 hmR0SvmSetPendingXcptDB(pVCpu);
9113 return VINF_SUCCESS;
9114}
9115
9116
9117/**
9118 * Nested-guest \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_XCPT_3).
9119 * Conditional \#VMEXIT.
9120 */
9121HMSVM_EXIT_DECL hmR0SvmNestedExitXcptBP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9122{
9123 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9124 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
9125
9126 SVMEVENT Event;
9127 Event.u = 0;
9128 Event.n.u1Valid = 1;
9129 Event.n.u3Type = SVM_EVENT_EXCEPTION;
9130 Event.n.u8Vector = X86_XCPT_BP;
9131 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
9132 return VINF_SUCCESS;
9133}
9134#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
9135
9136/** @} */
9137
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette