VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp@ 100039

Last change on this file since 100039 was 99753, checked in by vboxsync, 18 months ago

VMM: More Clang 13 build fixes (warnings as errors).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 380.3 KB
Line 
1/* $Id: HMSVMR0.cpp 99753 2023-05-11 15:02:39Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2013-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_HM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <iprt/asm-amd64-x86.h>
35#include <iprt/thread.h>
36
37#include <VBox/vmm/pdmapi.h>
38#include <VBox/vmm/dbgf.h>
39#include <VBox/vmm/iem.h>
40#include <VBox/vmm/iom.h>
41#include <VBox/vmm/tm.h>
42#include <VBox/vmm/em.h>
43#include <VBox/vmm/gcm.h>
44#include <VBox/vmm/gim.h>
45#include <VBox/vmm/apic.h>
46#include "HMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include <VBox/err.h>
49#include "HMSVMR0.h"
50#include "dtrace/VBoxVMM.h"
51
52#ifdef DEBUG_ramshankar
53# define HMSVM_SYNC_FULL_GUEST_STATE
54# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
55# define HMSVM_ALWAYS_TRAP_PF
56# define HMSVM_ALWAYS_TRAP_TASK_SWITCH
57#endif
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63#ifdef VBOX_WITH_STATISTICS
64# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
65 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \
66 if ((u64ExitCode) == SVM_EXIT_NPF) \
67 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
68 else \
69 STAM_COUNTER_INC(&pVCpu->hm.s.aStatExitReason[(u64ExitCode) & MASK_EXITREASON_STAT]); \
70 } while (0)
71
72# define HMSVM_DEBUG_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
73 STAM_COUNTER_INC(&pVCpu->hm.s.StatDebugExitAll); \
74 if ((u64ExitCode) == SVM_EXIT_NPF) \
75 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
76 else \
77 STAM_COUNTER_INC(&pVCpu->hm.s.aStatExitReason[(u64ExitCode) & MASK_EXITREASON_STAT]); \
78 } while (0)
79
80# define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
81 STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitAll); \
82 if ((u64ExitCode) == SVM_EXIT_NPF) \
83 STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitReasonNpf); \
84 else \
85 STAM_COUNTER_INC(&pVCpu->hm.s.aStatNestedExitReason[(u64ExitCode) & MASK_EXITREASON_STAT]); \
86 } while (0)
87#else
88# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
89# define HMSVM_DEBUG_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
90# define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
91#endif /* !VBOX_WITH_STATISTICS */
92
93/** If we decide to use a function table approach this can be useful to
94 * switch to a "static DECLCALLBACK(int)". */
95#define HMSVM_EXIT_DECL static VBOXSTRICTRC
96
97/**
98 * Subset of the guest-CPU state that is kept by SVM R0 code while executing the
99 * guest using hardware-assisted SVM.
100 *
101 * This excludes state like TSC AUX, GPRs (other than RSP, RAX) which are always
102 * are swapped and restored across the world-switch and also registers like
103 * EFER, PAT MSR etc. which cannot be modified by the guest without causing a
104 * \#VMEXIT.
105 */
106#define HMSVM_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
107 | CPUMCTX_EXTRN_RFLAGS \
108 | CPUMCTX_EXTRN_RAX \
109 | CPUMCTX_EXTRN_RSP \
110 | CPUMCTX_EXTRN_SREG_MASK \
111 | CPUMCTX_EXTRN_CR0 \
112 | CPUMCTX_EXTRN_CR2 \
113 | CPUMCTX_EXTRN_CR3 \
114 | CPUMCTX_EXTRN_TABLE_MASK \
115 | CPUMCTX_EXTRN_DR6 \
116 | CPUMCTX_EXTRN_DR7 \
117 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
118 | CPUMCTX_EXTRN_SYSCALL_MSRS \
119 | CPUMCTX_EXTRN_SYSENTER_MSRS \
120 | CPUMCTX_EXTRN_HWVIRT \
121 | CPUMCTX_EXTRN_INHIBIT_INT \
122 | CPUMCTX_EXTRN_HM_SVM_MASK)
123
124/**
125 * Subset of the guest-CPU state that is shared between the guest and host.
126 */
127#define HMSVM_CPUMCTX_SHARED_STATE CPUMCTX_EXTRN_DR_MASK
128
129/** Macro for importing guest state from the VMCB back into CPUMCTX. */
130#define HMSVM_CPUMCTX_IMPORT_STATE(a_pVCpu, a_fWhat) \
131 do { \
132 if ((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fWhat)) \
133 hmR0SvmImportGuestState((a_pVCpu), (a_fWhat)); \
134 } while (0)
135
136/** Assert that the required state bits are fetched. */
137#define HMSVM_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Assert that preemption is disabled or covered by thread-context hooks. */
142#define HMSVM_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
143 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
144
145/** Assert that we haven't migrated CPUs when thread-context hooks are not
146 * used. */
147#define HMSVM_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
148 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
149 ("Illegal migration! Entered on CPU %u Current %u\n", \
150 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()));
151
152/** Assert that we're not executing a nested-guest. */
153#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
154# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
155#else
156# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
157#endif
158
159/** Assert that we're executing a nested-guest. */
160#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
161# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
162#else
163# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
164#endif
165
166/** Macro for checking and returning from the using function for
167 * \#VMEXIT intercepts that maybe caused during delivering of another
168 * event in the guest. */
169#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
170# define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \
171 do \
172 { \
173 int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \
174 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
175 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
176 else if ( rc == VINF_EM_RESET \
177 && CPUMIsGuestSvmCtrlInterceptSet((a_pVCpu), &(a_pVCpu)->cpum.GstCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \
178 { \
179 HMSVM_CPUMCTX_IMPORT_STATE((a_pVCpu), HMSVM_CPUMCTX_EXTRN_ALL); \
180 return IEMExecSvmVmexit((a_pVCpu), SVM_EXIT_SHUTDOWN, 0, 0); \
181 } \
182 else \
183 return rc; \
184 } while (0)
185#else
186# define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \
187 do \
188 { \
189 int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \
190 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
191 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
192 else \
193 return rc; \
194 } while (0)
195#endif
196
197/** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an
198 * instruction that exited. */
199#define HMSVM_CHECK_SINGLE_STEP(a_pVCpu, a_rc) \
200 do { \
201 if ((a_pVCpu)->hm.s.fSingleInstruction && (a_rc) == VINF_SUCCESS) \
202 (a_rc) = VINF_EM_DBG_STEPPED; \
203 } while (0)
204
205/** Validate segment descriptor granularity bit. */
206#ifdef VBOX_STRICT
207# define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) \
208 AssertMsg( !(a_pCtx)->reg.Attr.n.u1Present \
209 || ( (a_pCtx)->reg.Attr.n.u1Granularity \
210 ? ((a_pCtx)->reg.u32Limit & 0xfff) == 0xfff \
211 : (a_pCtx)->reg.u32Limit <= UINT32_C(0xfffff)), \
212 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", (a_pCtx)->reg.u32Limit, \
213 (a_pCtx)->reg.Attr.u, (a_pCtx)->reg.u64Base))
214#else
215# define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) do { } while (0)
216#endif
217
218/**
219 * Exception bitmap mask for all contributory exceptions.
220 *
221 * Page fault is deliberately excluded here as it's conditional as to whether
222 * it's contributory or benign. Page faults are handled separately.
223 */
224#define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
225 | RT_BIT(X86_XCPT_DE))
226
227/**
228 * Mandatory/unconditional guest control intercepts.
229 *
230 * SMIs can and do happen in normal operation. We need not intercept them
231 * while executing the guest (or nested-guest).
232 */
233#define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS ( SVM_CTRL_INTERCEPT_INTR \
234 | SVM_CTRL_INTERCEPT_NMI \
235 | SVM_CTRL_INTERCEPT_INIT \
236 | SVM_CTRL_INTERCEPT_RDPMC \
237 | SVM_CTRL_INTERCEPT_CPUID \
238 | SVM_CTRL_INTERCEPT_RSM \
239 | SVM_CTRL_INTERCEPT_HLT \
240 | SVM_CTRL_INTERCEPT_IOIO_PROT \
241 | SVM_CTRL_INTERCEPT_MSR_PROT \
242 | SVM_CTRL_INTERCEPT_INVLPGA \
243 | SVM_CTRL_INTERCEPT_SHUTDOWN \
244 | SVM_CTRL_INTERCEPT_FERR_FREEZE \
245 | SVM_CTRL_INTERCEPT_VMRUN \
246 | SVM_CTRL_INTERCEPT_SKINIT \
247 | SVM_CTRL_INTERCEPT_WBINVD \
248 | SVM_CTRL_INTERCEPT_MONITOR \
249 | SVM_CTRL_INTERCEPT_MWAIT \
250 | SVM_CTRL_INTERCEPT_CR0_SEL_WRITE \
251 | SVM_CTRL_INTERCEPT_XSETBV)
252
253/** @name VMCB Clean Bits.
254 *
255 * These flags are used for VMCB-state caching. A set VMCB Clean bit indicates
256 * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
257 * memory.
258 *
259 * @{ */
260/** All intercepts vectors, TSC offset, PAUSE filter counter. */
261#define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
262/** I/O permission bitmap, MSR permission bitmap. */
263#define HMSVM_VMCB_CLEAN_IOPM_MSRPM RT_BIT(1)
264/** ASID. */
265#define HMSVM_VMCB_CLEAN_ASID RT_BIT(2)
266/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
267V_INTR_VECTOR. */
268#define HMSVM_VMCB_CLEAN_INT_CTRL RT_BIT(3)
269/** Nested Paging: Nested CR3 (nCR3), PAT. */
270#define HMSVM_VMCB_CLEAN_NP RT_BIT(4)
271/** Control registers (CR0, CR3, CR4, EFER). */
272#define HMSVM_VMCB_CLEAN_CRX_EFER RT_BIT(5)
273/** Debug registers (DR6, DR7). */
274#define HMSVM_VMCB_CLEAN_DRX RT_BIT(6)
275/** GDT, IDT limit and base. */
276#define HMSVM_VMCB_CLEAN_DT RT_BIT(7)
277/** Segment register: CS, SS, DS, ES limit and base. */
278#define HMSVM_VMCB_CLEAN_SEG RT_BIT(8)
279/** CR2.*/
280#define HMSVM_VMCB_CLEAN_CR2 RT_BIT(9)
281/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
282#define HMSVM_VMCB_CLEAN_LBR RT_BIT(10)
283/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
284PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
285#define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11)
286/** Mask of all valid VMCB Clean bits. */
287#define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \
288 | HMSVM_VMCB_CLEAN_IOPM_MSRPM \
289 | HMSVM_VMCB_CLEAN_ASID \
290 | HMSVM_VMCB_CLEAN_INT_CTRL \
291 | HMSVM_VMCB_CLEAN_NP \
292 | HMSVM_VMCB_CLEAN_CRX_EFER \
293 | HMSVM_VMCB_CLEAN_DRX \
294 | HMSVM_VMCB_CLEAN_DT \
295 | HMSVM_VMCB_CLEAN_SEG \
296 | HMSVM_VMCB_CLEAN_CR2 \
297 | HMSVM_VMCB_CLEAN_LBR \
298 | HMSVM_VMCB_CLEAN_AVIC)
299/** @} */
300
301/**
302 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
303 */
304typedef enum SVMMSREXITREAD
305{
306 /** Reading this MSR causes a \#VMEXIT. */
307 SVMMSREXIT_INTERCEPT_READ = 0xb,
308 /** Reading this MSR does not cause a \#VMEXIT. */
309 SVMMSREXIT_PASSTHRU_READ
310} SVMMSREXITREAD;
311
312/**
313 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
314 */
315typedef enum SVMMSREXITWRITE
316{
317 /** Writing to this MSR causes a \#VMEXIT. */
318 SVMMSREXIT_INTERCEPT_WRITE = 0xd,
319 /** Writing to this MSR does not cause a \#VMEXIT. */
320 SVMMSREXIT_PASSTHRU_WRITE
321} SVMMSREXITWRITE;
322
323/**
324 * SVM \#VMEXIT handler.
325 *
326 * @returns Strict VBox status code.
327 * @param pVCpu The cross context virtual CPU structure.
328 * @param pSvmTransient Pointer to the SVM-transient structure.
329 */
330typedef VBOXSTRICTRC FNSVMEXITHANDLER(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
331
332
333/*********************************************************************************************************************************
334* Internal Functions *
335*********************************************************************************************************************************/
336static void hmR0SvmPendingEventToTrpmTrap(PVMCPUCC pVCpu);
337static void hmR0SvmLeave(PVMCPUCC pVCpu, bool fImportState);
338
339
340/** @name \#VMEXIT handlers.
341 * @{
342 */
343static FNSVMEXITHANDLER hmR0SvmExitIntr;
344static FNSVMEXITHANDLER hmR0SvmExitWbinvd;
345static FNSVMEXITHANDLER hmR0SvmExitInvd;
346static FNSVMEXITHANDLER hmR0SvmExitCpuid;
347static FNSVMEXITHANDLER hmR0SvmExitRdtsc;
348static FNSVMEXITHANDLER hmR0SvmExitRdtscp;
349static FNSVMEXITHANDLER hmR0SvmExitRdpmc;
350static FNSVMEXITHANDLER hmR0SvmExitInvlpg;
351static FNSVMEXITHANDLER hmR0SvmExitHlt;
352static FNSVMEXITHANDLER hmR0SvmExitMonitor;
353static FNSVMEXITHANDLER hmR0SvmExitMwait;
354static FNSVMEXITHANDLER hmR0SvmExitShutdown;
355static FNSVMEXITHANDLER hmR0SvmExitUnexpected;
356static FNSVMEXITHANDLER hmR0SvmExitReadCRx;
357static FNSVMEXITHANDLER hmR0SvmExitWriteCRx;
358static FNSVMEXITHANDLER hmR0SvmExitMsr;
359static FNSVMEXITHANDLER hmR0SvmExitReadDRx;
360static FNSVMEXITHANDLER hmR0SvmExitWriteDRx;
361static FNSVMEXITHANDLER hmR0SvmExitXsetbv;
362static FNSVMEXITHANDLER hmR0SvmExitIOInstr;
363static FNSVMEXITHANDLER hmR0SvmExitNestedPF;
364static FNSVMEXITHANDLER hmR0SvmExitVIntr;
365static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch;
366static FNSVMEXITHANDLER hmR0SvmExitVmmCall;
367static FNSVMEXITHANDLER hmR0SvmExitPause;
368static FNSVMEXITHANDLER hmR0SvmExitFerrFreeze;
369static FNSVMEXITHANDLER hmR0SvmExitIret;
370static FNSVMEXITHANDLER hmR0SvmExitXcptDE;
371static FNSVMEXITHANDLER hmR0SvmExitXcptPF;
372static FNSVMEXITHANDLER hmR0SvmExitXcptUD;
373static FNSVMEXITHANDLER hmR0SvmExitXcptMF;
374static FNSVMEXITHANDLER hmR0SvmExitXcptDB;
375static FNSVMEXITHANDLER hmR0SvmExitXcptAC;
376static FNSVMEXITHANDLER hmR0SvmExitXcptBP;
377static FNSVMEXITHANDLER hmR0SvmExitXcptGP;
378static FNSVMEXITHANDLER hmR0SvmExitXcptGeneric;
379static FNSVMEXITHANDLER hmR0SvmExitSwInt;
380static FNSVMEXITHANDLER hmR0SvmExitTrRead;
381static FNSVMEXITHANDLER hmR0SvmExitTrWrite;
382#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
383static FNSVMEXITHANDLER hmR0SvmExitClgi;
384static FNSVMEXITHANDLER hmR0SvmExitStgi;
385static FNSVMEXITHANDLER hmR0SvmExitVmload;
386static FNSVMEXITHANDLER hmR0SvmExitVmsave;
387static FNSVMEXITHANDLER hmR0SvmExitInvlpga;
388static FNSVMEXITHANDLER hmR0SvmExitVmrun;
389static FNSVMEXITHANDLER hmR0SvmNestedExitXcptDB;
390static FNSVMEXITHANDLER hmR0SvmNestedExitXcptBP;
391#endif
392/** @} */
393
394static VBOXSTRICTRC hmR0SvmHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
395#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
396static VBOXSTRICTRC hmR0SvmHandleExitNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
397#endif
398static VBOXSTRICTRC hmR0SvmRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops);
399
400
401/*********************************************************************************************************************************
402* Global Variables *
403*********************************************************************************************************************************/
404/** Ring-0 memory object for the IO bitmap. */
405static RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
406/** Physical address of the IO bitmap. */
407static RTHCPHYS g_HCPhysIOBitmap;
408/** Pointer to the IO bitmap. */
409static R0PTRTYPE(void *) g_pvIOBitmap;
410
411#ifdef VBOX_STRICT
412# define HMSVM_LOG_RBP_RSP RT_BIT_32(0)
413# define HMSVM_LOG_CR_REGS RT_BIT_32(1)
414# define HMSVM_LOG_CS RT_BIT_32(2)
415# define HMSVM_LOG_SS RT_BIT_32(3)
416# define HMSVM_LOG_FS RT_BIT_32(4)
417# define HMSVM_LOG_GS RT_BIT_32(5)
418# define HMSVM_LOG_LBR RT_BIT_32(6)
419# define HMSVM_LOG_ALL ( HMSVM_LOG_RBP_RSP \
420 | HMSVM_LOG_CR_REGS \
421 | HMSVM_LOG_CS \
422 | HMSVM_LOG_SS \
423 | HMSVM_LOG_FS \
424 | HMSVM_LOG_GS \
425 | HMSVM_LOG_LBR)
426
427/**
428 * Dumps virtual CPU state and additional info. to the logger for diagnostics.
429 *
430 * @param pVCpu The cross context virtual CPU structure.
431 * @param pVmcb Pointer to the VM control block.
432 * @param pszPrefix Log prefix.
433 * @param fFlags Log flags, see HMSVM_LOG_XXX.
434 * @param uVerbose The verbosity level, currently unused.
435 */
436static void hmR0SvmLogState(PVMCPUCC pVCpu, PCSVMVMCB pVmcb, const char *pszPrefix, uint32_t fFlags, uint8_t uVerbose)
437{
438 RT_NOREF2(pVCpu, uVerbose);
439 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
440
441 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
442 Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX64\n", pszPrefix, pCtx->cs.Sel, pCtx->rip, pCtx->rflags.u));
443
444 if (fFlags & HMSVM_LOG_RBP_RSP)
445 {
446 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RBP);
447 Log4(("%s: rsp=%#RX64 rbp=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp));
448 }
449
450 if (fFlags & HMSVM_LOG_CR_REGS)
451 {
452 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4);
453 Log4(("%s: cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", pszPrefix, pCtx->cr0, pCtx->cr3, pCtx->cr4));
454 }
455
456 if (fFlags & HMSVM_LOG_CS)
457 {
458 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
459 Log4(("%s: cs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base,
460 pCtx->cs.u32Limit, pCtx->cs.Attr.u));
461 }
462 if (fFlags & HMSVM_LOG_SS)
463 {
464 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
465 Log4(("%s: ss={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base,
466 pCtx->ss.u32Limit, pCtx->ss.Attr.u));
467 }
468 if (fFlags & HMSVM_LOG_FS)
469 {
470 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
471 Log4(("%s: fs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base,
472 pCtx->fs.u32Limit, pCtx->fs.Attr.u));
473 }
474 if (fFlags & HMSVM_LOG_GS)
475 {
476 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
477 Log4(("%s: gs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base,
478 pCtx->gs.u32Limit, pCtx->gs.Attr.u));
479 }
480
481 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
482 if (fFlags & HMSVM_LOG_LBR)
483 {
484 Log4(("%s: br_from=%#RX64 br_to=%#RX64 lastxcpt_from=%#RX64 lastxcpt_to=%#RX64\n", pszPrefix, pVmcbGuest->u64BR_FROM,
485 pVmcbGuest->u64BR_TO, pVmcbGuest->u64LASTEXCPFROM, pVmcbGuest->u64LASTEXCPTO));
486 }
487 NOREF(pszPrefix); NOREF(pVmcbGuest); NOREF(pCtx);
488}
489#endif /* VBOX_STRICT */
490
491
492/**
493 * Sets up and activates AMD-V on the current CPU.
494 *
495 * @returns VBox status code.
496 * @param pHostCpu The HM physical-CPU structure.
497 * @param pVM The cross context VM structure. Can be
498 * NULL after a resume!
499 * @param pvCpuPage Pointer to the global CPU page.
500 * @param HCPhysCpuPage Physical address of the global CPU page.
501 * @param fEnabledByHost Whether the host OS has already initialized AMD-V.
502 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs (currently
503 * unused).
504 */
505VMMR0DECL(int) SVMR0EnableCpu(PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
506 PCSUPHWVIRTMSRS pHwvirtMsrs)
507{
508 Assert(!fEnabledByHost);
509 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
510 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
511 Assert(pvCpuPage); NOREF(pvCpuPage);
512 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
513
514 RT_NOREF2(fEnabledByHost, pHwvirtMsrs);
515
516 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */
517 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
518
519 /*
520 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
521 */
522 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
523 if (u64HostEfer & MSR_K6_EFER_SVME)
524 {
525 /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
526 if ( pVM
527 && pVM->hm.s.svm.fIgnoreInUseError)
528 pHostCpu->fIgnoreAMDVInUseError = true;
529
530 if (!pHostCpu->fIgnoreAMDVInUseError)
531 {
532 ASMSetFlags(fEFlags);
533 return VERR_SVM_IN_USE;
534 }
535 }
536
537 /* Turn on AMD-V in the EFER MSR. */
538 ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
539
540 /* Write the physical page address where the CPU will store the host state while executing the VM. */
541 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
542
543 /* Restore interrupts. */
544 ASMSetFlags(fEFlags);
545
546 /*
547 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all
548 * non-zero ASIDs when enabling SVM. AMD doesn't have an SVM instruction to flush all
549 * ASIDs (flushing is done upon VMRUN). Therefore, flag that we need to flush the TLB
550 * entirely with before executing any guest code.
551 */
552 pHostCpu->fFlushAsidBeforeUse = true;
553
554 /*
555 * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}.
556 */
557 ++pHostCpu->cTlbFlushes;
558
559 return VINF_SUCCESS;
560}
561
562
563/**
564 * Deactivates AMD-V on the current CPU.
565 *
566 * @returns VBox status code.
567 * @param pHostCpu The HM physical-CPU structure.
568 * @param pvCpuPage Pointer to the global CPU page.
569 * @param HCPhysCpuPage Physical address of the global CPU page.
570 */
571VMMR0DECL(int) SVMR0DisableCpu(PHMPHYSCPU pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
572{
573 RT_NOREF1(pHostCpu);
574 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
575 AssertReturn( HCPhysCpuPage
576 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
577 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
578
579 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
580 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
581
582 /* Turn off AMD-V in the EFER MSR. */
583 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
584 ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
585
586 /* Invalidate host state physical address. */
587 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
588
589 /* Restore interrupts. */
590 ASMSetFlags(fEFlags);
591
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * Does global AMD-V initialization (called during module initialization).
598 *
599 * @returns VBox status code.
600 */
601VMMR0DECL(int) SVMR0GlobalInit(void)
602{
603 /*
604 * Allocate 12 KB (3 pages) for the IO bitmap. Since this is non-optional and we always
605 * intercept all IO accesses, it's done once globally here instead of per-VM.
606 */
607 Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ);
608 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */);
609 if (RT_FAILURE(rc))
610 return rc;
611
612 g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
613 g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
614
615 /* Set all bits to intercept all IO accesses. */
616 ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
617
618 return VINF_SUCCESS;
619}
620
621
622/**
623 * Does global AMD-V termination (called during module termination).
624 */
625VMMR0DECL(void) SVMR0GlobalTerm(void)
626{
627 if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
628 {
629 RTR0MemObjFree(g_hMemObjIOBitmap, true /* fFreeMappings */);
630 g_pvIOBitmap = NULL;
631 g_HCPhysIOBitmap = 0;
632 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
633 }
634}
635
636
637/**
638 * Frees any allocated per-VCPU structures for a VM.
639 *
640 * @param pVM The cross context VM structure.
641 */
642DECLINLINE(void) hmR0SvmFreeStructs(PVMCC pVM)
643{
644 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
645 {
646 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
647 AssertPtr(pVCpu);
648
649 if (pVCpu->hmr0.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
650 {
651 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjVmcbHost, false);
652 pVCpu->hmr0.s.svm.HCPhysVmcbHost = 0;
653 pVCpu->hmr0.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
654 }
655
656 if (pVCpu->hmr0.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ)
657 {
658 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjVmcb, false);
659 pVCpu->hmr0.s.svm.pVmcb = NULL;
660 pVCpu->hmr0.s.svm.HCPhysVmcb = 0;
661 pVCpu->hmr0.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
662 }
663
664 if (pVCpu->hmr0.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
665 {
666 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjMsrBitmap, false);
667 pVCpu->hmr0.s.svm.pvMsrBitmap = NULL;
668 pVCpu->hmr0.s.svm.HCPhysMsrBitmap = 0;
669 pVCpu->hmr0.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
670 }
671 }
672}
673
674
675/**
676 * Sets pfnVMRun to the best suited variant.
677 *
678 * This must be called whenever anything changes relative to the SVMR0VMRun
679 * variant selection:
680 * - pVCpu->hm.s.fLoadSaveGuestXcr0
681 * - CPUMCTX_WSF_IBPB_ENTRY in pVCpu->cpum.GstCtx.fWorldSwitcher
682 * - CPUMCTX_WSF_IBPB_EXIT in pVCpu->cpum.GstCtx.fWorldSwitcher
683 * - Perhaps: CPUMIsGuestFPUStateActive() (windows only)
684 * - Perhaps: CPUMCTX.fXStateMask (windows only)
685 *
686 * We currently ASSUME that neither CPUMCTX_WSF_IBPB_ENTRY nor
687 * CPUMCTX_WSF_IBPB_EXIT cannot be changed at runtime.
688 */
689static void hmR0SvmUpdateVmRunFunction(PVMCPUCC pVCpu)
690{
691 static const struct CLANGWORKAROUND { PFNHMSVMVMRUN pfn; } s_aHmR0SvmVmRunFunctions[] =
692 {
693 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit },
694 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit },
695 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit },
696 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit },
697 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit },
698 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit },
699 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit },
700 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit },
701 };
702 uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0 ? 1 : 0)
703 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ? 2 : 0)
704 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT ? 4 : 0);
705 PFNHMSVMVMRUN const pfnVMRun = s_aHmR0SvmVmRunFunctions[idx].pfn;
706 if (pVCpu->hmr0.s.svm.pfnVMRun != pfnVMRun)
707 pVCpu->hmr0.s.svm.pfnVMRun = pfnVMRun;
708}
709
710
711/**
712 * Selector FNHMSVMVMRUN implementation.
713 */
714static DECLCALLBACK(int) hmR0SvmVMRunSelector(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB)
715{
716 hmR0SvmUpdateVmRunFunction(pVCpu);
717 return pVCpu->hmr0.s.svm.pfnVMRun(pVM, pVCpu, HCPhysVMCB);
718}
719
720
721/**
722 * Does per-VM AMD-V initialization.
723 *
724 * @returns VBox status code.
725 * @param pVM The cross context VM structure.
726 */
727VMMR0DECL(int) SVMR0InitVM(PVMCC pVM)
728{
729 int rc = VERR_INTERNAL_ERROR_5;
730
731 /*
732 * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
733 */
734 uint32_t u32Family;
735 uint32_t u32Model;
736 uint32_t u32Stepping;
737 if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
738 {
739 Log4Func(("AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
740 pVM->hmr0.s.svm.fAlwaysFlushTLB = true;
741 }
742
743 /*
744 * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
745 */
746 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
747 {
748 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
749 pVCpu->hmr0.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
750 pVCpu->hmr0.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
751 pVCpu->hmr0.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
752 }
753
754 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
755 {
756 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
757
758 /*
759 * Initialize the hardware-assisted SVM guest-execution handler.
760 * We now use a single handler for both 32-bit and 64-bit guests, see @bugref{6208#c73}.
761 */
762 pVCpu->hmr0.s.svm.pfnVMRun = hmR0SvmVMRunSelector;
763
764 /*
765 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
766 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
767 */
768/** @todo Does this need to be below 4G? */
769 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjVmcbHost, SVM_VMCB_PAGES << HOST_PAGE_SHIFT, false /* fExecutable */);
770 if (RT_FAILURE(rc))
771 goto failure_cleanup;
772
773 void *pvVmcbHost = RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjVmcbHost);
774 pVCpu->hmr0.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjVmcbHost, 0 /* iPage */);
775 Assert(pVCpu->hmr0.s.svm.HCPhysVmcbHost < _4G);
776 RT_BZERO(pvVmcbHost, HOST_PAGE_SIZE);
777
778 /*
779 * Allocate one page for the guest-state VMCB.
780 */
781/** @todo Does this need to be below 4G? */
782 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjVmcb, SVM_VMCB_PAGES << HOST_PAGE_SHIFT, false /* fExecutable */);
783 if (RT_FAILURE(rc))
784 goto failure_cleanup;
785
786 pVCpu->hmr0.s.svm.pVmcb = (PSVMVMCB)RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjVmcb);
787 pVCpu->hmr0.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjVmcb, 0 /* iPage */);
788 Assert(pVCpu->hmr0.s.svm.HCPhysVmcb < _4G);
789 RT_BZERO(pVCpu->hmr0.s.svm.pVmcb, HOST_PAGE_SIZE);
790
791 /*
792 * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
793 * SVM to not require one.
794 */
795/** @todo Does this need to be below 4G? */
796 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjMsrBitmap, SVM_MSRPM_PAGES << HOST_PAGE_SHIFT,
797 false /* fExecutable */);
798 if (RT_FAILURE(rc))
799 goto failure_cleanup;
800
801 pVCpu->hmr0.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjMsrBitmap);
802 pVCpu->hmr0.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
803 /* Set all bits to intercept all MSR accesses (changed later on). */
804 ASMMemFill32(pVCpu->hmr0.s.svm.pvMsrBitmap, SVM_MSRPM_PAGES << HOST_PAGE_SHIFT, UINT32_C(0xffffffff));
805 }
806
807 return VINF_SUCCESS;
808
809failure_cleanup:
810 hmR0SvmFreeStructs(pVM);
811 return rc;
812}
813
814
815/**
816 * Does per-VM AMD-V termination.
817 *
818 * @returns VBox status code.
819 * @param pVM The cross context VM structure.
820 */
821VMMR0DECL(int) SVMR0TermVM(PVMCC pVM)
822{
823 hmR0SvmFreeStructs(pVM);
824 return VINF_SUCCESS;
825}
826
827
828/**
829 * Returns whether the VMCB Clean Bits feature is supported.
830 *
831 * @returns @c true if supported, @c false otherwise.
832 * @param pVCpu The cross context virtual CPU structure.
833 * @param fIsNestedGuest Whether we are currently executing the nested-guest.
834 */
835DECL_FORCE_INLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPUCC pVCpu, bool fIsNestedGuest)
836{
837 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
838 bool const fHostVmcbCleanBits = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN);
839 if (!fIsNestedGuest)
840 return fHostVmcbCleanBits;
841 return fHostVmcbCleanBits && pVM->cpum.ro.GuestFeatures.fSvmVmcbClean;
842}
843
844
845/**
846 * Returns whether the decode assists feature is supported.
847 *
848 * @returns @c true if supported, @c false otherwise.
849 * @param pVCpu The cross context virtual CPU structure.
850 */
851DECLINLINE(bool) hmR0SvmSupportsDecodeAssists(PVMCPUCC pVCpu)
852{
853 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
854#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
855 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
856 return (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS)
857 && pVM->cpum.ro.GuestFeatures.fSvmDecodeAssists;
858#endif
859 return RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
860}
861
862
863/**
864 * Returns whether the NRIP_SAVE feature is supported.
865 *
866 * @returns @c true if supported, @c false otherwise.
867 * @param pVCpu The cross context virtual CPU structure.
868 */
869DECLINLINE(bool) hmR0SvmSupportsNextRipSave(PVMCPUCC pVCpu)
870{
871 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
872#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
873 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
874 return (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
875 && pVM->cpum.ro.GuestFeatures.fSvmNextRipSave;
876#endif
877 return RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
878}
879
880
881/**
882 * Sets the permission bits for the specified MSR in the MSRPM bitmap.
883 *
884 * @param pVCpu The cross context virtual CPU structure.
885 * @param pbMsrBitmap Pointer to the MSR bitmap.
886 * @param idMsr The MSR for which the permissions are being set.
887 * @param enmRead MSR read permissions.
888 * @param enmWrite MSR write permissions.
889 *
890 * @remarks This function does -not- clear the VMCB clean bits for MSRPM. The
891 * caller needs to take care of this.
892 */
893static void hmR0SvmSetMsrPermission(PVMCPUCC pVCpu, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead,
894 SVMMSREXITWRITE enmWrite)
895{
896 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
897 uint16_t offMsrpm;
898 uint8_t uMsrpmBit;
899 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
900 AssertRC(rc);
901
902 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
903 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
904
905 pbMsrBitmap += offMsrpm;
906 if (enmRead == SVMMSREXIT_INTERCEPT_READ)
907 *pbMsrBitmap |= RT_BIT(uMsrpmBit);
908 else
909 {
910 if (!fInNestedGuestMode)
911 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit);
912#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
913 else
914 {
915 /* Only clear the bit if the nested-guest is also not intercepting the MSR read.*/
916 if (!(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit)))
917 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit);
918 else
919 Assert(*pbMsrBitmap & RT_BIT(uMsrpmBit));
920 }
921#endif
922 }
923
924 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
925 *pbMsrBitmap |= RT_BIT(uMsrpmBit + 1);
926 else
927 {
928 if (!fInNestedGuestMode)
929 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1);
930#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
931 else
932 {
933 /* Only clear the bit if the nested-guest is also not intercepting the MSR write.*/
934 if (!(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit + 1)))
935 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1);
936 else
937 Assert(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1));
938 }
939#endif
940 }
941}
942
943
944/**
945 * Sets up AMD-V for the specified VM.
946 * This function is only called once per-VM during initalization.
947 *
948 * @returns VBox status code.
949 * @param pVM The cross context VM structure.
950 */
951VMMR0DECL(int) SVMR0SetupVM(PVMCC pVM)
952{
953 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
954 AssertReturn(pVM, VERR_INVALID_PARAMETER);
955
956 /*
957 * Validate and copy over some parameters.
958 */
959 AssertReturn(pVM->hm.s.svm.fSupported, VERR_INCOMPATIBLE_CONFIG);
960 bool const fNestedPaging = pVM->hm.s.fNestedPagingCfg;
961 AssertReturn(!fNestedPaging || (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING), VERR_INCOMPATIBLE_CONFIG);
962 pVM->hmr0.s.fNestedPaging = fNestedPaging;
963 pVM->hmr0.s.fAllow64BitGuests = pVM->hm.s.fAllow64BitGuestsCfg;
964
965 /*
966 * Determin some configuration parameters.
967 */
968 bool const fPauseFilter = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
969 bool const fPauseFilterThreshold = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
970 bool const fUsePauseFilter = fPauseFilter && pVM->hm.s.svm.cPauseFilter;
971
972 bool const fLbrVirt = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT);
973 bool const fUseLbrVirt = fLbrVirt && pVM->hm.s.svm.fLbrVirt; /** @todo IEM implementation etc. */
974
975#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
976 bool const fVirtVmsaveVmload = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD);
977 bool const fUseVirtVmsaveVmload = fVirtVmsaveVmload && pVM->hm.s.svm.fVirtVmsaveVmload && fNestedPaging;
978
979 bool const fVGif = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF);
980 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
981#endif
982
983 PVMCPUCC pVCpu0 = VMCC_GET_CPU_0(pVM);
984 PSVMVMCB pVmcb0 = pVCpu0->hmr0.s.svm.pVmcb;
985 AssertMsgReturn(RT_VALID_PTR(pVmcb0), ("Invalid pVmcb (%p) for vcpu[0]\n", pVmcb0), VERR_SVM_INVALID_PVMCB);
986 PSVMVMCBCTRL pVmcbCtrl0 = &pVmcb0->ctrl;
987
988 /* Always trap #AC for reasons of security. */
989 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_AC);
990
991 /* Always trap #DB for reasons of security. */
992 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_DB);
993
994 /* Trap exceptions unconditionally (debug purposes). */
995#ifdef HMSVM_ALWAYS_TRAP_PF
996 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_PF);
997#endif
998#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
999 /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
1000 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_BP)
1001 | RT_BIT_32(X86_XCPT_DE)
1002 | RT_BIT_32(X86_XCPT_NM)
1003 | RT_BIT_32(X86_XCPT_UD)
1004 | RT_BIT_32(X86_XCPT_NP)
1005 | RT_BIT_32(X86_XCPT_SS)
1006 | RT_BIT_32(X86_XCPT_GP)
1007 | RT_BIT_32(X86_XCPT_PF)
1008 | RT_BIT_32(X86_XCPT_MF)
1009 ;
1010#endif
1011
1012 /* Apply the exceptions intercepts needed by the GIM provider. */
1013 if (pVCpu0->hm.s.fGIMTrapXcptUD || pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
1014 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
1015
1016 /* Apply the exceptions intercepts needed by the GCM fixers. */
1017 if (pVCpu0->hm.s.fGCMTrapXcptDE)
1018 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_DE);
1019
1020 /* The mesa 3d driver hack needs #GP. */
1021 if (pVCpu0->hm.s.fTrapXcptGpForLovelyMesaDrv)
1022 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_GP);
1023
1024 /* Set up unconditional intercepts and conditions. */
1025 pVmcbCtrl0->u64InterceptCtrl = HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS
1026 | SVM_CTRL_INTERCEPT_VMMCALL
1027 | SVM_CTRL_INTERCEPT_VMSAVE
1028 | SVM_CTRL_INTERCEPT_VMLOAD
1029 | SVM_CTRL_INTERCEPT_CLGI
1030 | SVM_CTRL_INTERCEPT_STGI;
1031
1032#ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
1033 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH;
1034#endif
1035
1036#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1037 if (pVCpu0->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvm)
1038 {
1039 /* Virtualized VMSAVE/VMLOAD. */
1040 if (fUseVirtVmsaveVmload)
1041 {
1042 pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload = 1;
1043 pVmcbCtrl0->u64InterceptCtrl &= ~( SVM_CTRL_INTERCEPT_VMSAVE
1044 | SVM_CTRL_INTERCEPT_VMLOAD);
1045 }
1046 else
1047 Assert(!pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload);
1048
1049 /* Virtual GIF. */
1050 if (fUseVGif)
1051 {
1052 pVmcbCtrl0->IntCtrl.n.u1VGifEnable = 1;
1053 pVmcbCtrl0->u64InterceptCtrl &= ~( SVM_CTRL_INTERCEPT_CLGI
1054 | SVM_CTRL_INTERCEPT_STGI);
1055 }
1056 else
1057 Assert(!pVmcbCtrl0->IntCtrl.n.u1VGifEnable);
1058 }
1059 else
1060#endif
1061 {
1062 Assert(!pVCpu0->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvm);
1063 Assert(!pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload);
1064 Assert(!pVmcbCtrl0->IntCtrl.n.u1VGifEnable);
1065 }
1066
1067 /* CR4 writes must always be intercepted for tracking PGM mode changes and
1068 AVX (for XCR0 syncing during worlds switching). */
1069 pVmcbCtrl0->u16InterceptWrCRx = RT_BIT(4);
1070
1071 /* Intercept all DRx reads and writes by default. Changed later on. */
1072 pVmcbCtrl0->u16InterceptRdDRx = 0xffff;
1073 pVmcbCtrl0->u16InterceptWrDRx = 0xffff;
1074
1075 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
1076 pVmcbCtrl0->IntCtrl.n.u1VIntrMasking = 1;
1077
1078 /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts
1079 and we currently deliver both PIC and APIC interrupts alike, see hmR0SvmEvaluatePendingEvent() */
1080 pVmcbCtrl0->IntCtrl.n.u1IgnoreTPR = 1;
1081
1082 /* Set the IO permission bitmap physical addresses. */
1083 pVmcbCtrl0->u64IOPMPhysAddr = g_HCPhysIOBitmap;
1084
1085 /* LBR virtualization. */
1086 pVmcbCtrl0->LbrVirt.n.u1LbrVirt = fUseLbrVirt;
1087
1088 /* The host ASID MBZ, for the guest start with 1. */
1089 pVmcbCtrl0->TLBCtrl.n.u32ASID = 1;
1090
1091 /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
1092 pVmcbCtrl0->NestedPagingCtrl.n.u1NestedPaging = fNestedPaging;
1093
1094 /* Without Nested Paging, we need additionally intercepts. */
1095 if (!fNestedPaging)
1096 {
1097 /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
1098 pVmcbCtrl0->u16InterceptRdCRx |= RT_BIT(3);
1099 pVmcbCtrl0->u16InterceptWrCRx |= RT_BIT(3);
1100
1101 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
1102 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG
1103 | SVM_CTRL_INTERCEPT_TASK_SWITCH;
1104
1105 /* Page faults must be intercepted to implement shadow paging. */
1106 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
1107 }
1108
1109 /* Workaround for missing OS/2 TLB flush, see ticketref:20625. */
1110 if (pVM->hm.s.fMissingOS2TlbFlushWorkaround)
1111 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TR_WRITES;
1112
1113 /* Setup Pause Filter for guest pause-loop (spinlock) exiting. */
1114 if (fUsePauseFilter)
1115 {
1116 Assert(pVM->hm.s.svm.cPauseFilter > 0);
1117 pVmcbCtrl0->u16PauseFilterCount = pVM->hm.s.svm.cPauseFilter;
1118 if (fPauseFilterThreshold)
1119 pVmcbCtrl0->u16PauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
1120 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_PAUSE;
1121 }
1122
1123 /*
1124 * Setup the MSR permission bitmap.
1125 * The following MSRs are saved/restored automatically during the world-switch.
1126 * Don't intercept guest read/write accesses to these MSRs.
1127 */
1128 uint8_t *pbMsrBitmap0 = (uint8_t *)pVCpu0->hmr0.s.svm.pvMsrBitmap;
1129 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1130 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1131 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1132 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1133 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1134 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1135 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1136 if (!pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
1137 {
1138 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1139 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1140 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1141 }
1142 else
1143 {
1144 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1145 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1146 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1147 }
1148 pVmcbCtrl0->u64MSRPMPhysAddr = pVCpu0->hmr0.s.svm.HCPhysMsrBitmap;
1149
1150 /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
1151 Assert(pVmcbCtrl0->u32VmcbCleanBits == 0);
1152
1153 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
1154 {
1155 PVMCPUCC pVCpuCur = VMCC_GET_CPU(pVM, idCpu);
1156 PSVMVMCB pVmcbCur = pVCpuCur->hmr0.s.svm.pVmcb;
1157 AssertMsgReturn(RT_VALID_PTR(pVmcbCur), ("Invalid pVmcb (%p) for vcpu[%u]\n", pVmcbCur, idCpu), VERR_SVM_INVALID_PVMCB);
1158 PSVMVMCBCTRL pVmcbCtrlCur = &pVmcbCur->ctrl;
1159
1160 /* Copy the VMCB control area. */
1161 memcpy(pVmcbCtrlCur, pVmcbCtrl0, sizeof(*pVmcbCtrlCur));
1162
1163 /* Copy the MSR bitmap and setup the VCPU-specific host physical address. */
1164 uint8_t *pbMsrBitmapCur = (uint8_t *)pVCpuCur->hmr0.s.svm.pvMsrBitmap;
1165 memcpy(pbMsrBitmapCur, pbMsrBitmap0, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
1166 pVmcbCtrlCur->u64MSRPMPhysAddr = pVCpuCur->hmr0.s.svm.HCPhysMsrBitmap;
1167
1168 /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
1169 Assert(pVmcbCtrlCur->u32VmcbCleanBits == 0);
1170
1171 /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs initially. */
1172 Assert(pVCpuCur->hm.s.fGIMTrapXcptUD == pVCpu0->hm.s.fGIMTrapXcptUD);
1173 /* Same for GCM, #DE trapping should be uniform across VCPUs. */
1174 Assert(pVCpuCur->hm.s.fGCMTrapXcptDE == pVCpu0->hm.s.fGCMTrapXcptDE);
1175 }
1176
1177#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1178 LogRel(("HM: fUsePauseFilter=%RTbool fUseLbrVirt=%RTbool fUseVGif=%RTbool fUseVirtVmsaveVmload=%RTbool\n", fUsePauseFilter,
1179 fUseLbrVirt, fUseVGif, fUseVirtVmsaveVmload));
1180#else
1181 LogRel(("HM: fUsePauseFilter=%RTbool fUseLbrVirt=%RTbool\n", fUsePauseFilter, fUseLbrVirt));
1182#endif
1183 return VINF_SUCCESS;
1184}
1185
1186
1187/**
1188 * Gets a pointer to the currently active guest (or nested-guest) VMCB.
1189 *
1190 * @returns Pointer to the current context VMCB.
1191 * @param pVCpu The cross context virtual CPU structure.
1192 */
1193DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPUCC pVCpu)
1194{
1195#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1196 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1197 return &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
1198#endif
1199 return pVCpu->hmr0.s.svm.pVmcb;
1200}
1201
1202
1203/**
1204 * Gets a pointer to the nested-guest VMCB cache.
1205 *
1206 * @returns Pointer to the nested-guest VMCB cache.
1207 * @param pVCpu The cross context virtual CPU structure.
1208 */
1209DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPUCC pVCpu)
1210{
1211#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1212 Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
1213 return &pVCpu->hm.s.svm.NstGstVmcbCache;
1214#else
1215 RT_NOREF(pVCpu);
1216 return NULL;
1217#endif
1218}
1219
1220
1221/**
1222 * Invalidates a guest page by guest virtual address.
1223 *
1224 * @returns VBox status code.
1225 * @param pVCpu The cross context virtual CPU structure.
1226 * @param GCVirt Guest virtual address of the page to invalidate.
1227 */
1228VMMR0DECL(int) SVMR0InvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCVirt)
1229{
1230 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
1231
1232 bool const fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH) || pVCpu->CTX_SUFF(pVM)->hmr0.s.svm.fAlwaysFlushTLB;
1233
1234 /* Skip it if a TLB flush is already pending. */
1235 if (!fFlushPending)
1236 {
1237 Log4Func(("%#RGv\n", GCVirt));
1238
1239 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
1240 AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
1241
1242 SVMR0InvlpgA(GCVirt, pVmcb->ctrl.TLBCtrl.n.u32ASID);
1243 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1244 }
1245 return VINF_SUCCESS;
1246}
1247
1248
1249/**
1250 * Flushes the appropriate tagged-TLB entries.
1251 *
1252 * @param pHostCpu The HM physical-CPU structure.
1253 * @param pVCpu The cross context virtual CPU structure.
1254 * @param pVmcb Pointer to the VM control block.
1255 */
1256static void hmR0SvmFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1257{
1258 /*
1259 * Force a TLB flush for the first world switch if the current CPU differs from the one
1260 * we ran on last. This can happen both for start & resume due to long jumps back to
1261 * ring-3.
1262 *
1263 * We also force a TLB flush every time when executing a nested-guest VCPU as there is no
1264 * correlation between it and the physical CPU.
1265 *
1266 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while
1267 * flushing the TLB, so we cannot reuse the ASIDs without flushing.
1268 */
1269 bool fNewAsid = false;
1270 Assert(pHostCpu->idCpu != NIL_RTCPUID);
1271 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu
1272 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes
1273#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1274 || CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)
1275#endif
1276 )
1277 {
1278 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1279 pVCpu->hmr0.s.fForceTLBFlush = true;
1280 fNewAsid = true;
1281 }
1282
1283 /* Set TLB flush state as checked until we return from the world switch. */
1284 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
1285
1286 /* Check for explicit TLB flushes. */
1287 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1288 {
1289 pVCpu->hmr0.s.fForceTLBFlush = true;
1290 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1291 }
1292
1293 /*
1294 * If the AMD CPU erratum 170, We need to flush the entire TLB for each world switch. Sad.
1295 * This Host CPU requirement takes precedence.
1296 */
1297 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1298 if (pVM->hmr0.s.svm.fAlwaysFlushTLB)
1299 {
1300 pHostCpu->uCurrentAsid = 1;
1301 pVCpu->hmr0.s.uCurrentAsid = 1;
1302 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
1303 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
1304 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1305
1306 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
1307 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1308 }
1309 else
1310 {
1311 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
1312 if (pVCpu->hmr0.s.fForceTLBFlush)
1313 {
1314 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
1315 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1316
1317 if (fNewAsid)
1318 {
1319 ++pHostCpu->uCurrentAsid;
1320
1321 bool fHitASIDLimit = false;
1322 if (pHostCpu->uCurrentAsid >= g_uHmMaxAsid)
1323 {
1324 pHostCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */
1325 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new ASID. */
1326 fHitASIDLimit = true;
1327 }
1328
1329 if ( fHitASIDLimit
1330 || pHostCpu->fFlushAsidBeforeUse)
1331 {
1332 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1333 pHostCpu->fFlushAsidBeforeUse = false;
1334 }
1335
1336 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid;
1337 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
1338 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
1339 }
1340 else
1341 {
1342 if (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
1343 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
1344 else
1345 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1346 }
1347
1348 pVCpu->hmr0.s.fForceTLBFlush = false;
1349 }
1350 }
1351
1352 /* Update VMCB with the ASID. */
1353 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hmr0.s.uCurrentAsid)
1354 {
1355 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hmr0.s.uCurrentAsid;
1356 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
1357 }
1358
1359 AssertMsg(pVCpu->hmr0.s.idLastCpu == pHostCpu->idCpu,
1360 ("vcpu idLastCpu=%u hostcpu idCpu=%u\n", pVCpu->hmr0.s.idLastCpu, pHostCpu->idCpu));
1361 AssertMsg(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes,
1362 ("Flush count mismatch for cpu %u (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hmr0.s.cTlbFlushes, pHostCpu->cTlbFlushes));
1363 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < g_uHmMaxAsid,
1364 ("cpu%d uCurrentAsid = %x\n", pHostCpu->idCpu, pHostCpu->uCurrentAsid));
1365 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < g_uHmMaxAsid,
1366 ("cpu%d VM uCurrentAsid = %x\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid));
1367
1368#ifdef VBOX_WITH_STATISTICS
1369 if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
1370 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1371 else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
1372 || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
1373 {
1374 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1375 }
1376 else
1377 {
1378 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE);
1379 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushEntire);
1380 }
1381#endif
1382}
1383
1384
1385/**
1386 * Sets an exception intercept in the specified VMCB.
1387 *
1388 * @param pVmcb Pointer to the VM control block.
1389 * @param uXcpt The exception (X86_XCPT_*).
1390 */
1391DECLINLINE(void) hmR0SvmSetXcptIntercept(PSVMVMCB pVmcb, uint8_t uXcpt)
1392{
1393 if (!(pVmcb->ctrl.u32InterceptXcpt & RT_BIT(uXcpt)))
1394 {
1395 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(uXcpt);
1396 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1397 }
1398}
1399
1400
1401/**
1402 * Clears an exception intercept in the specified VMCB.
1403 *
1404 * @param pVCpu The cross context virtual CPU structure.
1405 * @param pVmcb Pointer to the VM control block.
1406 * @param uXcpt The exception (X86_XCPT_*).
1407 *
1408 * @remarks This takes into account if we're executing a nested-guest and only
1409 * removes the exception intercept if both the guest -and- nested-guest
1410 * are not intercepting it.
1411 */
1412DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPUCC pVCpu, PSVMVMCB pVmcb, uint8_t uXcpt)
1413{
1414 Assert(uXcpt != X86_XCPT_DB);
1415 Assert(uXcpt != X86_XCPT_AC);
1416 Assert(uXcpt != X86_XCPT_GP);
1417#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
1418 if (pVmcb->ctrl.u32InterceptXcpt & RT_BIT(uXcpt))
1419 {
1420 bool fRemove = true;
1421# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1422 /* Only remove the intercept if the nested-guest is also not intercepting it! */
1423 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1424 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1425 {
1426 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1427 fRemove = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(uXcpt));
1428 }
1429# else
1430 RT_NOREF(pVCpu);
1431# endif
1432 if (fRemove)
1433 {
1434 pVmcb->ctrl.u32InterceptXcpt &= ~RT_BIT(uXcpt);
1435 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1436 }
1437 }
1438#else
1439 RT_NOREF3(pVCpu, pVmcb, uXcpt);
1440#endif
1441}
1442
1443
1444/**
1445 * Sets a control intercept in the specified VMCB.
1446 *
1447 * @param pVmcb Pointer to the VM control block.
1448 * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*).
1449 */
1450DECLINLINE(void) hmR0SvmSetCtrlIntercept(PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
1451{
1452 if (!(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept))
1453 {
1454 pVmcb->ctrl.u64InterceptCtrl |= fCtrlIntercept;
1455 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1456 }
1457}
1458
1459
1460/**
1461 * Clears a control intercept in the specified VMCB.
1462 *
1463 * @returns @c true if the intercept is still set, @c false otherwise.
1464 * @param pVCpu The cross context virtual CPU structure.
1465 * @param pVmcb Pointer to the VM control block.
1466 * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*).
1467 *
1468 * @remarks This takes into account if we're executing a nested-guest and only
1469 * removes the control intercept if both the guest -and- nested-guest
1470 * are not intercepting it.
1471 */
1472static bool hmR0SvmClearCtrlIntercept(PVMCPUCC pVCpu, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
1473{
1474 if (pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept)
1475 {
1476 bool fRemove = true;
1477#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1478 /* Only remove the control intercept if the nested-guest is also not intercepting it! */
1479 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1480 {
1481 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1482 fRemove = !(pVmcbNstGstCache->u64InterceptCtrl & fCtrlIntercept);
1483 }
1484#else
1485 RT_NOREF(pVCpu);
1486#endif
1487 if (fRemove)
1488 {
1489 pVmcb->ctrl.u64InterceptCtrl &= ~fCtrlIntercept;
1490 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1491 }
1492 }
1493
1494 return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept);
1495}
1496
1497
1498/**
1499 * Exports the guest (or nested-guest) CR0 into the VMCB.
1500 *
1501 * @param pVCpu The cross context virtual CPU structure.
1502 * @param pVmcb Pointer to the VM control block.
1503 *
1504 * @remarks This assumes we always pre-load the guest FPU.
1505 * @remarks No-long-jump zone!!!
1506 */
1507static void hmR0SvmExportGuestCR0(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1508{
1509 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1510
1511 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1512 uint64_t const uGuestCr0 = pCtx->cr0;
1513 uint64_t uShadowCr0 = uGuestCr0;
1514
1515 /* Always enable caching. */
1516 uShadowCr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1517
1518 /* When Nested Paging is not available use shadow page tables and intercept #PFs (latter done in SVMR0SetupVM()). */
1519 if (!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
1520 {
1521 uShadowCr0 |= X86_CR0_PG /* Use shadow page tables. */
1522 | X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
1523 }
1524
1525 /*
1526 * Use the #MF style of legacy-FPU error reporting for now. Although AMD-V has MSRs that
1527 * lets us isolate the host from it, IEM/REM still needs work to emulate it properly,
1528 * see @bugref{7243#c103}.
1529 */
1530 if (!(uGuestCr0 & X86_CR0_NE))
1531 {
1532 uShadowCr0 |= X86_CR0_NE;
1533 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_MF);
1534 }
1535 else
1536 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_MF);
1537
1538 /*
1539 * If the shadow and guest CR0 are identical we can avoid intercepting CR0 reads.
1540 *
1541 * CR0 writes still needs interception as PGM requires tracking paging mode changes,
1542 * see @bugref{6944}.
1543 *
1544 * We also don't ever want to honor weird things like cache disable from the guest.
1545 * However, we can avoid intercepting changes to the TS & MP bits by clearing the CR0
1546 * write intercept below and keeping SVM_CTRL_INTERCEPT_CR0_SEL_WRITE instead.
1547 */
1548 if (uShadowCr0 == uGuestCr0)
1549 {
1550 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1551 {
1552 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(0);
1553 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(0);
1554 Assert(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_CR0_SEL_WRITE);
1555 }
1556 else
1557 {
1558 /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */
1559 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1560 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(0))
1561 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(0));
1562 pVmcb->ctrl.u16InterceptWrCRx = (pVmcb->ctrl.u16InterceptWrCRx & ~RT_BIT(0))
1563 | (pVmcbNstGstCache->u16InterceptWrCRx & RT_BIT(0));
1564 }
1565 }
1566 else
1567 {
1568 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(0);
1569 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(0);
1570 }
1571 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1572
1573 Assert(!RT_HI_U32(uShadowCr0));
1574 if (pVmcb->guest.u64CR0 != uShadowCr0)
1575 {
1576 pVmcb->guest.u64CR0 = uShadowCr0;
1577 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1578 }
1579}
1580
1581
1582/**
1583 * Exports the guest (or nested-guest) CR3 into the VMCB.
1584 *
1585 * @param pVCpu The cross context virtual CPU structure.
1586 * @param pVmcb Pointer to the VM control block.
1587 *
1588 * @remarks No-long-jump zone!!!
1589 */
1590static void hmR0SvmExportGuestCR3(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1591{
1592 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1593
1594 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1595 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1596 if (pVM->hmr0.s.fNestedPaging)
1597 {
1598 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetHyperCR3(pVCpu);
1599 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1600 pVmcb->guest.u64CR3 = pCtx->cr3;
1601 Assert(pVmcb->ctrl.u64NestedPagingCR3);
1602 }
1603 else
1604 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
1605
1606 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1607}
1608
1609
1610/**
1611 * Exports the guest (or nested-guest) CR4 into the VMCB.
1612 *
1613 * @param pVCpu The cross context virtual CPU structure.
1614 * @param pVmcb Pointer to the VM control block.
1615 *
1616 * @remarks No-long-jump zone!!!
1617 */
1618static int hmR0SvmExportGuestCR4(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1619{
1620 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1621
1622 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1623 uint64_t uShadowCr4 = pCtx->cr4;
1624 if (!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
1625 {
1626 switch (pVCpu->hm.s.enmShadowMode)
1627 {
1628 case PGMMODE_REAL:
1629 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
1630 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1631
1632 case PGMMODE_32_BIT: /* 32-bit paging. */
1633 uShadowCr4 &= ~X86_CR4_PAE;
1634 break;
1635
1636 case PGMMODE_PAE: /* PAE paging. */
1637 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
1638 /** Must use PAE paging as we could use physical memory > 4 GB */
1639 uShadowCr4 |= X86_CR4_PAE;
1640 break;
1641
1642 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
1643 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
1644#ifdef VBOX_WITH_64_BITS_GUESTS
1645 break;
1646#else
1647 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1648#endif
1649
1650 default: /* shut up gcc */
1651 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1652 }
1653 }
1654
1655 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
1656 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
1657 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
1658 {
1659 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
1660 hmR0SvmUpdateVmRunFunction(pVCpu);
1661 }
1662
1663 /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */
1664 if (uShadowCr4 == pCtx->cr4)
1665 {
1666 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1667 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(4);
1668 else
1669 {
1670 /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */
1671 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1672 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(4))
1673 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(4));
1674 }
1675 }
1676 else
1677 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(4);
1678
1679 /* CR4 writes are always intercepted (both guest, nested-guest) for tracking
1680 PGM mode changes and AVX (for XCR0 syncing during worlds switching). */
1681 Assert(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(4));
1682
1683 /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */
1684 Assert(!RT_HI_U32(uShadowCr4));
1685 pVmcb->guest.u64CR4 = uShadowCr4;
1686 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS);
1687
1688 return VINF_SUCCESS;
1689}
1690
1691
1692/**
1693 * Exports the guest (or nested-guest) control registers into the VMCB.
1694 *
1695 * @returns VBox status code.
1696 * @param pVCpu The cross context virtual CPU structure.
1697 * @param pVmcb Pointer to the VM control block.
1698 *
1699 * @remarks No-long-jump zone!!!
1700 */
1701static int hmR0SvmExportGuestControlRegs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1702{
1703 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1704
1705 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR_MASK)
1706 {
1707 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR0)
1708 hmR0SvmExportGuestCR0(pVCpu, pVmcb);
1709
1710 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR2)
1711 {
1712 pVmcb->guest.u64CR2 = pVCpu->cpum.GstCtx.cr2;
1713 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
1714 }
1715
1716 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR3)
1717 hmR0SvmExportGuestCR3(pVCpu, pVmcb);
1718
1719 /* CR4 re-loading is ASSUMED to be done everytime we get in from ring-3! (XCR0) */
1720 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR4)
1721 {
1722 int rc = hmR0SvmExportGuestCR4(pVCpu, pVmcb);
1723 if (RT_FAILURE(rc))
1724 return rc;
1725 }
1726
1727 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_CR_MASK;
1728 }
1729 return VINF_SUCCESS;
1730}
1731
1732
1733/**
1734 * Exports the guest (or nested-guest) segment registers into the VMCB.
1735 *
1736 * @param pVCpu The cross context virtual CPU structure.
1737 * @param pVmcb Pointer to the VM control block.
1738 *
1739 * @remarks No-long-jump zone!!!
1740 */
1741static void hmR0SvmExportGuestSegmentRegs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1742{
1743 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1744 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1745
1746 /* Guest segment registers. */
1747 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SREG_MASK)
1748 {
1749 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CS)
1750 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, CS, cs);
1751
1752 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SS)
1753 {
1754 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, SS, ss);
1755 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
1756 }
1757
1758 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DS)
1759 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, DS, ds);
1760
1761 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_ES)
1762 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, ES, es);
1763
1764 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_FS)
1765 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs);
1766
1767 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GS)
1768 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs);
1769
1770 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
1771 }
1772
1773 /* Guest TR. */
1774 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_TR)
1775 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr);
1776
1777 /* Guest LDTR. */
1778 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_LDTR)
1779 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);
1780
1781 /* Guest GDTR. */
1782 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GDTR)
1783 {
1784 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
1785 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
1786 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1787 }
1788
1789 /* Guest IDTR. */
1790 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_IDTR)
1791 {
1792 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
1793 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
1794 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1795 }
1796
1797 pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SREG_MASK
1798 | HM_CHANGED_GUEST_TABLE_MASK);
1799}
1800
1801
1802/**
1803 * Exports the guest (or nested-guest) MSRs into the VMCB.
1804 *
1805 * @param pVCpu The cross context virtual CPU structure.
1806 * @param pVmcb Pointer to the VM control block.
1807 *
1808 * @remarks No-long-jump zone!!!
1809 */
1810static void hmR0SvmExportGuestMsrs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1811{
1812 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1813 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1814
1815 /* Guest Sysenter MSRs. */
1816 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
1817 {
1818 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
1819 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
1820
1821 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
1822 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
1823
1824 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
1825 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
1826 }
1827
1828 /*
1829 * Guest EFER MSR.
1830 * AMD-V requires guest EFER.SVME to be set. Weird.
1831 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
1832 */
1833 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_EFER_MSR)
1834 {
1835 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
1836 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1837 }
1838
1839 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit, otherwise SVM expects amd64 shadow paging. */
1840 if ( !CPUMIsGuestInLongModeEx(pCtx)
1841 && (pCtx->msrEFER & MSR_K6_EFER_LME))
1842 {
1843 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
1844 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1845 }
1846
1847 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSCALL_MSRS)
1848 {
1849 pVmcb->guest.u64STAR = pCtx->msrSTAR;
1850 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
1851 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
1852 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
1853 }
1854
1855 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_KERNEL_GS_BASE)
1856 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1857
1858 pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SYSENTER_MSR_MASK
1859 | HM_CHANGED_GUEST_EFER_MSR
1860 | HM_CHANGED_GUEST_SYSCALL_MSRS
1861 | HM_CHANGED_GUEST_KERNEL_GS_BASE);
1862
1863 /*
1864 * Setup the PAT MSR (applicable for Nested Paging only).
1865 *
1866 * The default value should be MSR_IA32_CR_PAT_INIT_VAL, but we treat all guest memory
1867 * as WB, so choose type 6 for all PAT slots, see @bugref{9634}.
1868 *
1869 * While guests can modify and see the modified values through the shadow values,
1870 * we shall not honor any guest modifications of this MSR to ensure caching is always
1871 * enabled similar to how we clear CR0.CD and NW bits.
1872 *
1873 * For nested-guests this needs to always be set as well, see @bugref{7243#c109}.
1874 */
1875 pVmcb->guest.u64PAT = UINT64_C(0x0006060606060606);
1876
1877 /* Enable the last branch record bit if LBR virtualization is enabled. */
1878 if (pVmcb->ctrl.LbrVirt.n.u1LbrVirt)
1879 pVmcb->guest.u64DBGCTL = MSR_IA32_DEBUGCTL_LBR;
1880}
1881
1882
1883/**
1884 * Exports the guest (or nested-guest) debug state into the VMCB and programs
1885 * the necessary intercepts accordingly.
1886 *
1887 * @param pVCpu The cross context virtual CPU structure.
1888 * @param pVmcb Pointer to the VM control block.
1889 *
1890 * @remarks No-long-jump zone!!!
1891 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
1892 */
1893static void hmR0SvmExportSharedDebugState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1894{
1895 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1896
1897 /** @todo Figure out stepping with nested-guest. */
1898 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1899 {
1900 /*
1901 * We don't want to always intercept DRx read/writes for nested-guests as it causes
1902 * problems when the nested hypervisor isn't intercepting them, see @bugref{10080}.
1903 * Instead, they are strictly only requested when the nested hypervisor intercepts
1904 * them -- handled while merging VMCB controls.
1905 *
1906 * If neither the outer nor the nested-hypervisor is intercepting DRx read/writes,
1907 * then the nested-guest debug state should be actively loaded on the host so that
1908 * nested-guest reads/writes its own debug registers without causing VM-exits.
1909 */
1910 if ( ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
1911 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
1912 && !CPUMIsGuestDebugStateActive(pVCpu))
1913 {
1914 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
1915 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1916 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1917 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1918 }
1919
1920 pVmcb->guest.u64DR6 = pCtx->dr[6];
1921 pVmcb->guest.u64DR7 = pCtx->dr[7];
1922 return;
1923 }
1924
1925 /*
1926 * Anyone single stepping on the host side? If so, we'll have to use the
1927 * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
1928 * the VMM level like the VT-x implementations does.
1929 */
1930 bool fInterceptMovDRx = false;
1931 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
1932 if (fStepping)
1933 {
1934 pVCpu->hmr0.s.fClearTrapFlag = true;
1935 pVmcb->guest.u64RFlags |= X86_EFL_TF;
1936 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
1937 }
1938
1939 if ( fStepping
1940 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
1941 {
1942 /*
1943 * Use the combined guest and host DRx values found in the hypervisor
1944 * register set because the debugger has breakpoints active or someone
1945 * is single stepping on the host side.
1946 *
1947 * Note! DBGF expects a clean DR6 state before executing guest code.
1948 */
1949 if (!CPUMIsHyperDebugStateActive(pVCpu))
1950 {
1951 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1952 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1953 Assert(CPUMIsHyperDebugStateActive(pVCpu));
1954 }
1955
1956 /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
1957 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
1958 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu))
1959 {
1960 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
1961 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
1962 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1963 }
1964
1965 /** @todo If we cared, we could optimize to allow the guest to read registers
1966 * with the same values. */
1967 fInterceptMovDRx = true;
1968 pVCpu->hmr0.s.fUsingHyperDR7 = true;
1969 Log5(("hmR0SvmExportSharedDebugState: Loaded hyper DRx\n"));
1970 }
1971 else
1972 {
1973 /*
1974 * Update DR6, DR7 with the guest values if necessary.
1975 */
1976 if ( pVmcb->guest.u64DR7 != pCtx->dr[7]
1977 || pVmcb->guest.u64DR6 != pCtx->dr[6])
1978 {
1979 pVmcb->guest.u64DR7 = pCtx->dr[7];
1980 pVmcb->guest.u64DR6 = pCtx->dr[6];
1981 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1982 }
1983 pVCpu->hmr0.s.fUsingHyperDR7 = false;
1984
1985 /*
1986 * If the guest has enabled debug registers, we need to load them prior to
1987 * executing guest code so they'll trigger at the right time.
1988 */
1989 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
1990 {
1991 if (!CPUMIsGuestDebugStateActive(pVCpu))
1992 {
1993 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
1994 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1995 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1996 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1997 }
1998 Log5(("hmR0SvmExportSharedDebugState: Loaded guest DRx\n"));
1999 }
2000 /*
2001 * If no debugging enabled, we'll lazy load DR0-3. We don't need to
2002 * intercept #DB as DR6 is updated in the VMCB.
2003 *
2004 * Note! If we cared and dared, we could skip intercepting \#DB here.
2005 * However, \#DB shouldn't be performance critical, so we'll play safe
2006 * and keep the code similar to the VT-x code and always intercept it.
2007 */
2008 else if (!CPUMIsGuestDebugStateActive(pVCpu))
2009 fInterceptMovDRx = true;
2010 }
2011
2012 Assert(pVmcb->ctrl.u32InterceptXcpt & RT_BIT_32(X86_XCPT_DB));
2013 if (fInterceptMovDRx)
2014 {
2015 if ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
2016 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
2017 {
2018 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
2019 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
2020 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2021 }
2022 }
2023 else
2024 {
2025 if ( pVmcb->ctrl.u16InterceptRdDRx
2026 || pVmcb->ctrl.u16InterceptWrDRx)
2027 {
2028 pVmcb->ctrl.u16InterceptRdDRx = 0;
2029 pVmcb->ctrl.u16InterceptWrDRx = 0;
2030 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2031 }
2032 }
2033 Log4Func(("DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7]));
2034}
2035
2036/**
2037 * Exports the hardware virtualization state into the nested-guest
2038 * VMCB.
2039 *
2040 * @param pVCpu The cross context virtual CPU structure.
2041 * @param pVmcb Pointer to the VM control block.
2042 *
2043 * @remarks No-long-jump zone!!!
2044 */
2045static void hmR0SvmExportGuestHwvirtState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2046{
2047 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2048
2049 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_HWVIRT)
2050 {
2051 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
2052 {
2053 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2054 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2055
2056 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); /* Nested VGIF is not supported yet. */
2057 Assert(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF); /* Physical hardware supports VGIF. */
2058 Assert(HMIsSvmVGifActive(pVM)); /* Outer VM has enabled VGIF. */
2059 NOREF(pVM);
2060
2061 pVmcb->ctrl.IntCtrl.n.u1VGif = CPUMGetGuestGif(pCtx);
2062 }
2063
2064 /*
2065 * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp.
2066 * since SVM doesn't have a preemption timer.
2067 *
2068 * We do this here rather than in hmR0SvmSetupVmcbNested() as we may have been executing the
2069 * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters
2070 * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between.
2071 */
2072 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2073 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
2074 uint16_t const uGuestPauseFilterCount = pVM->hm.s.svm.cPauseFilter;
2075 uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
2076 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_PAUSE))
2077 {
2078 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2079 pVmcbCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount);
2080 pVmcbCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold);
2081 }
2082 else
2083 {
2084 /** @todo r=ramshankar: We can turn these assignments into assertions. */
2085 pVmcbCtrl->u16PauseFilterCount = uGuestPauseFilterCount;
2086 pVmcbCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold;
2087 }
2088 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2089
2090 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_HWVIRT;
2091 }
2092}
2093
2094
2095/**
2096 * Exports the guest APIC TPR state into the VMCB.
2097 *
2098 * @returns VBox status code.
2099 * @param pVCpu The cross context virtual CPU structure.
2100 * @param pVmcb Pointer to the VM control block.
2101 */
2102static int hmR0SvmExportGuestApicTpr(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2103{
2104 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2105
2106 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
2107 {
2108 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2109 if ( PDMHasApic(pVM)
2110 && APICIsEnabled(pVCpu))
2111 {
2112 bool fPendingIntr;
2113 uint8_t u8Tpr;
2114 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
2115 AssertRCReturn(rc, rc);
2116
2117 /* Assume that we need to trap all TPR accesses and thus need not check on
2118 every #VMEXIT if we should update the TPR. */
2119 Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking);
2120 pVCpu->hmr0.s.svm.fSyncVTpr = false;
2121
2122 if (!pVM->hm.s.fTprPatchingActive)
2123 {
2124 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2125 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
2126
2127 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we
2128 can deliver the interrupt to the guest. */
2129 if (fPendingIntr)
2130 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
2131 else
2132 {
2133 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
2134 pVCpu->hmr0.s.svm.fSyncVTpr = true;
2135 }
2136
2137 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL);
2138 }
2139 else
2140 {
2141 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
2142 pVmcb->guest.u64LSTAR = u8Tpr;
2143 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hmr0.s.svm.pvMsrBitmap;
2144
2145 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
2146 if (fPendingIntr)
2147 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
2148 else
2149 {
2150 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
2151 pVCpu->hmr0.s.svm.fSyncVTpr = true;
2152 }
2153 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
2154 }
2155 }
2156 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
2157 }
2158 return VINF_SUCCESS;
2159}
2160
2161
2162/**
2163 * Sets up the exception interrupts required for guest execution in the VMCB.
2164 *
2165 * @param pVCpu The cross context virtual CPU structure.
2166 * @param pVmcb Pointer to the VM control block.
2167 *
2168 * @remarks No-long-jump zone!!!
2169 */
2170static void hmR0SvmExportGuestXcptIntercepts(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2171{
2172 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2173
2174 /* If we modify intercepts from here, please check & adjust hmR0SvmMergeVmcbCtrlsNested() if required. */
2175 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_SVM_XCPT_INTERCEPTS)
2176 {
2177 /* Trap #UD for GIM provider (e.g. for hypercalls). */
2178 if (pVCpu->hm.s.fGIMTrapXcptUD || pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
2179 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_UD);
2180 else
2181 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_UD);
2182
2183 /* Trap #BP for INT3 debug breakpoints set by the VM debugger. */
2184 if (pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
2185 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_BP);
2186 else
2187 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_BP);
2188
2189 /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmExportGuestCR0(). */
2190 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_SVM_XCPT_INTERCEPTS);
2191 }
2192}
2193
2194
2195#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2196/**
2197 * Merges guest and nested-guest intercepts for executing the nested-guest using
2198 * hardware-assisted SVM.
2199 *
2200 * This merges the guest and nested-guest intercepts in a way that if the outer
2201 * guest intercept is set we need to intercept it in the nested-guest as
2202 * well.
2203 *
2204 * @param pVCpu The cross context virtual CPU structure.
2205 * @param pVmcbNstGst Pointer to the nested-guest VM control block.
2206 */
2207static void hmR0SvmMergeVmcbCtrlsNested(PVMCPUCC pVCpu)
2208{
2209 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2210 PCSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
2211 PSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
2212 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2213
2214 /* Merge the guest's CR intercepts into the nested-guest VMCB. */
2215 pVmcbNstGstCtrl->u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
2216 pVmcbNstGstCtrl->u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
2217
2218 /* Always intercept CR4 writes for tracking PGM mode changes and AVX (for
2219 XCR0 syncing during worlds switching). */
2220 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(4);
2221
2222 /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */
2223 if (!pVM->hmr0.s.fNestedPaging)
2224 {
2225 pVmcbNstGstCtrl->u16InterceptRdCRx |= RT_BIT(3);
2226 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(3);
2227 }
2228
2229 /* Merge the guest's DR intercepts into the nested-guest VMCB. */
2230 pVmcbNstGstCtrl->u16InterceptRdDRx |= pVmcb->ctrl.u16InterceptRdDRx;
2231 pVmcbNstGstCtrl->u16InterceptWrDRx |= pVmcb->ctrl.u16InterceptWrDRx;
2232
2233 /*
2234 * Merge the guest's exception intercepts into the nested-guest VMCB.
2235 *
2236 * - #UD: Exclude these as the outer guest's GIM hypercalls are not applicable
2237 * while executing the nested-guest.
2238 *
2239 * - #BP: Exclude breakpoints set by the VM debugger for the outer guest. This can
2240 * be tweaked later depending on how we wish to implement breakpoints.
2241 *
2242 * - #GP: Exclude these as it's the inner VMMs problem to get vmsvga 3d drivers
2243 * loaded into their guests, not ours.
2244 *
2245 * Warning!! This ASSUMES we only intercept \#UD for hypercall purposes and \#BP
2246 * for VM debugger breakpoints, see hmR0SvmExportGuestXcptIntercepts().
2247 */
2248#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
2249 pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt
2250 & ~( RT_BIT(X86_XCPT_UD)
2251 | RT_BIT(X86_XCPT_BP)
2252 | (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv ? RT_BIT(X86_XCPT_GP) : 0));
2253#else
2254 pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt;
2255#endif
2256
2257 /*
2258 * Adjust intercepts while executing the nested-guest that differ from the
2259 * outer guest intercepts.
2260 *
2261 * - VINTR: Exclude the outer guest intercept as we don't need to cause VINTR #VMEXITs
2262 * that belong to the nested-guest to the outer guest.
2263 *
2264 * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by
2265 * the nested-guest, the physical CPU raises a \#UD exception as expected.
2266 */
2267 pVmcbNstGstCtrl->u64InterceptCtrl |= (pVmcb->ctrl.u64InterceptCtrl & ~( SVM_CTRL_INTERCEPT_VINTR
2268 | SVM_CTRL_INTERCEPT_VMMCALL))
2269 | HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;
2270
2271 Assert( (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
2272 == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
2273
2274 /* Finally, update the VMCB clean bits. */
2275 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2276}
2277#endif
2278
2279
2280/**
2281 * Enters the AMD-V session.
2282 *
2283 * @returns VBox status code.
2284 * @param pVCpu The cross context virtual CPU structure.
2285 */
2286VMMR0DECL(int) SVMR0Enter(PVMCPUCC pVCpu)
2287{
2288 AssertPtr(pVCpu);
2289 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
2290 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2291
2292 LogFlowFunc(("pVCpu=%p\n", pVCpu));
2293 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
2294 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
2295
2296 pVCpu->hmr0.s.fLeaveDone = false;
2297 return VINF_SUCCESS;
2298}
2299
2300
2301/**
2302 * Thread-context callback for AMD-V.
2303 *
2304 * This is used together with RTThreadCtxHookCreate() on platforms which
2305 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
2306 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
2307 *
2308 * @param enmEvent The thread-context event.
2309 * @param pVCpu The cross context virtual CPU structure.
2310 * @param fGlobalInit Whether global VT-x/AMD-V init. is used.
2311 * @thread EMT(pVCpu)
2312 */
2313VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit)
2314{
2315 NOREF(fGlobalInit);
2316
2317 switch (enmEvent)
2318 {
2319 case RTTHREADCTXEVENT_OUT:
2320 {
2321 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2322 VMCPU_ASSERT_EMT(pVCpu);
2323
2324 /* No longjmps (log-flush, locks) in this fragile context. */
2325 VMMRZCallRing3Disable(pVCpu);
2326
2327 if (!pVCpu->hmr0.s.fLeaveDone)
2328 {
2329 hmR0SvmLeave(pVCpu, false /* fImportState */);
2330 pVCpu->hmr0.s.fLeaveDone = true;
2331 }
2332
2333 /* Leave HM context, takes care of local init (term). */
2334 int rc = HMR0LeaveCpu(pVCpu);
2335 AssertRC(rc); NOREF(rc);
2336
2337 /* Restore longjmp state. */
2338 VMMRZCallRing3Enable(pVCpu);
2339 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
2340 break;
2341 }
2342
2343 case RTTHREADCTXEVENT_IN:
2344 {
2345 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2346 VMCPU_ASSERT_EMT(pVCpu);
2347
2348 /* No longjmps (log-flush, locks) in this fragile context. */
2349 VMMRZCallRing3Disable(pVCpu);
2350
2351 /*
2352 * Initialize the bare minimum state required for HM. This takes care of
2353 * initializing AMD-V if necessary (onlined CPUs, local init etc.)
2354 */
2355 int rc = hmR0EnterCpu(pVCpu);
2356 AssertRC(rc); NOREF(rc);
2357 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
2358 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
2359
2360 pVCpu->hmr0.s.fLeaveDone = false;
2361
2362 /* Restore longjmp state. */
2363 VMMRZCallRing3Enable(pVCpu);
2364 break;
2365 }
2366
2367 default:
2368 break;
2369 }
2370}
2371
2372
2373/**
2374 * Saves the host state.
2375 *
2376 * @returns VBox status code.
2377 * @param pVCpu The cross context virtual CPU structure.
2378 *
2379 * @remarks No-long-jump zone!!!
2380 */
2381VMMR0DECL(int) SVMR0ExportHostState(PVMCPUCC pVCpu)
2382{
2383 NOREF(pVCpu);
2384
2385 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
2386 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_HOST_CONTEXT);
2387 return VINF_SUCCESS;
2388}
2389
2390
2391/**
2392 * Exports the guest or nested-guest state from the virtual-CPU context into the
2393 * VMCB.
2394 *
2395 * Also sets up the appropriate VMRUN function to execute guest or nested-guest
2396 * code based on the virtual-CPU mode.
2397 *
2398 * @returns VBox status code.
2399 * @param pVCpu The cross context virtual CPU structure.
2400 * @param pSvmTransient Pointer to the SVM-transient structure.
2401 *
2402 * @remarks No-long-jump zone!!!
2403 */
2404static int hmR0SvmExportGuestState(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient)
2405{
2406 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
2407
2408 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
2409 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2410 Assert(pVmcb);
2411
2412 pVmcb->guest.u64RIP = pCtx->rip;
2413 pVmcb->guest.u64RSP = pCtx->rsp;
2414 pVmcb->guest.u64RFlags = pCtx->eflags.u;
2415 pVmcb->guest.u64RAX = pCtx->rax;
2416
2417 bool const fIsNestedGuest = pSvmTransient->fIsNestedGuest;
2418 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
2419
2420 int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcb);
2421 AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc);
2422 hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcb);
2423 hmR0SvmExportGuestMsrs(pVCpu, pVmcb);
2424 hmR0SvmExportGuestHwvirtState(pVCpu, pVmcb);
2425
2426 ASMSetFlags(fEFlags);
2427
2428 if (!fIsNestedGuest)
2429 {
2430 /* hmR0SvmExportGuestApicTpr() must be called -after- hmR0SvmExportGuestMsrs() as we
2431 otherwise we would overwrite the LSTAR MSR that we use for TPR patching. */
2432 hmR0SvmExportGuestApicTpr(pVCpu, pVmcb);
2433 hmR0SvmExportGuestXcptIntercepts(pVCpu, pVmcb);
2434 }
2435
2436 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
2437 uint64_t fUnusedMask = HM_CHANGED_GUEST_RIP
2438 | HM_CHANGED_GUEST_RFLAGS
2439 | HM_CHANGED_GUEST_GPRS_MASK
2440 | HM_CHANGED_GUEST_X87
2441 | HM_CHANGED_GUEST_SSE_AVX
2442 | HM_CHANGED_GUEST_OTHER_XSAVE
2443 | HM_CHANGED_GUEST_XCRx
2444 | HM_CHANGED_GUEST_TSC_AUX
2445 | HM_CHANGED_GUEST_OTHER_MSRS;
2446 if (fIsNestedGuest)
2447 fUnusedMask |= HM_CHANGED_SVM_XCPT_INTERCEPTS
2448 | HM_CHANGED_GUEST_APIC_TPR;
2449
2450 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( fUnusedMask
2451 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_MASK)));
2452
2453#ifdef VBOX_STRICT
2454 /*
2455 * All of the guest-CPU state and SVM keeper bits should be exported here by now,
2456 * except for the host-context and/or shared host-guest context bits.
2457 */
2458 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
2459 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)),
2460 ("fCtxChanged=%#RX64\n", fCtxChanged));
2461
2462 /*
2463 * If we need to log state that isn't always imported, we'll need to import them here.
2464 * See hmR0SvmPostRunGuest() for which part of the state is imported uncondtionally.
2465 */
2466 hmR0SvmLogState(pVCpu, pVmcb, "hmR0SvmExportGuestState", 0 /* fFlags */, 0 /* uVerbose */);
2467#endif
2468
2469 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
2470 return VINF_SUCCESS;
2471}
2472
2473#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2474
2475/**
2476 * Merges the guest and nested-guest MSR permission bitmap.
2477 *
2478 * If the guest is intercepting an MSR we need to intercept it regardless of
2479 * whether the nested-guest is intercepting it or not.
2480 *
2481 * @param pHostCpu The HM physical-CPU structure.
2482 * @param pVCpu The cross context virtual CPU structure.
2483 *
2484 * @remarks No-long-jmp zone!!!
2485 */
2486DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu)
2487{
2488 uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hmr0.s.svm.pvMsrBitmap;
2489 uint64_t const *pu64NstGstMsrpm = (uint64_t const *)&pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[0];
2490 uint64_t *pu64DstMsrpm = (uint64_t *)pHostCpu->n.svm.pvNstGstMsrpm;
2491
2492 /* MSRPM bytes from offset 0x1800 are reserved, so we stop merging there. */
2493 uint32_t const offRsvdQwords = 0x1800 >> 3;
2494 for (uint32_t i = 0; i < offRsvdQwords; i++)
2495 pu64DstMsrpm[i] = pu64NstGstMsrpm[i] | pu64GstMsrpm[i];
2496}
2497
2498
2499/**
2500 * Caches the nested-guest VMCB fields before we modify them for execution using
2501 * hardware-assisted SVM.
2502 *
2503 * @returns true if the VMCB was previously already cached, false otherwise.
2504 * @param pVCpu The cross context virtual CPU structure.
2505 *
2506 * @sa HMNotifySvmNstGstVmexit.
2507 */
2508static bool hmR0SvmCacheVmcbNested(PVMCPUCC pVCpu)
2509{
2510 /*
2511 * Cache the nested-guest programmed VMCB fields if we have not cached it yet.
2512 * Otherwise we risk re-caching the values we may have modified, see @bugref{7243#c44}.
2513 *
2514 * Nested-paging CR3 is not saved back into the VMCB on #VMEXIT, hence no need to
2515 * cache and restore it, see AMD spec. 15.25.4 "Nested Paging and VMRUN/#VMEXIT".
2516 */
2517 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
2518 bool const fWasCached = pVmcbNstGstCache->fCacheValid;
2519 if (!fWasCached)
2520 {
2521 PCSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
2522 PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2523 pVmcbNstGstCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
2524 pVmcbNstGstCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
2525 pVmcbNstGstCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
2526 pVmcbNstGstCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
2527 pVmcbNstGstCache->u16PauseFilterThreshold = pVmcbNstGstCtrl->u16PauseFilterThreshold;
2528 pVmcbNstGstCache->u16PauseFilterCount = pVmcbNstGstCtrl->u16PauseFilterCount;
2529 pVmcbNstGstCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt;
2530 pVmcbNstGstCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl;
2531 pVmcbNstGstCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset;
2532 pVmcbNstGstCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
2533 pVmcbNstGstCache->fNestedPaging = pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging;
2534 pVmcbNstGstCache->fLbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
2535 pVmcbNstGstCache->fCacheValid = true;
2536 Log4Func(("Cached VMCB fields\n"));
2537 }
2538
2539 return fWasCached;
2540}
2541
2542
2543/**
2544 * Sets up the nested-guest VMCB for execution using hardware-assisted SVM.
2545 *
2546 * This is done the first time we enter nested-guest execution using SVM R0
2547 * until the nested-guest \#VMEXIT (not to be confused with physical CPU
2548 * \#VMEXITs which may or may not cause a corresponding nested-guest \#VMEXIT).
2549 *
2550 * @param pVCpu The cross context virtual CPU structure.
2551 */
2552static void hmR0SvmSetupVmcbNested(PVMCPUCC pVCpu)
2553{
2554 PSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
2555 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2556
2557 HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2558
2559 /*
2560 * First cache the nested-guest VMCB fields we may potentially modify.
2561 */
2562 bool const fVmcbCached = hmR0SvmCacheVmcbNested(pVCpu);
2563 if (!fVmcbCached)
2564 {
2565 /*
2566 * The IOPM of the nested-guest can be ignored because the the guest always
2567 * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
2568 * than the nested-guest IOPM and swap the field back on the #VMEXIT.
2569 */
2570 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
2571
2572 /*
2573 * Use the same nested-paging as the outer guest. We can't dynamically switch off
2574 * nested-paging suddenly while executing a VM (see assertion at the end of
2575 * Trap0eHandler() in PGMAllBth.h).
2576 */
2577 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging;
2578
2579 /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */
2580 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = 1;
2581
2582 /*
2583 * Turn off TPR syncing on #VMEXIT for nested-guests as CR8 intercepts are subject
2584 * to the nested-guest intercepts and we always run with V_INTR_MASKING.
2585 */
2586 pVCpu->hmr0.s.svm.fSyncVTpr = false;
2587
2588# ifdef DEBUG_ramshankar
2589 /* For debugging purposes - copy the LBR info. from outer guest VMCB. */
2590 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt;
2591# endif
2592
2593 /*
2594 * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we
2595 * need to intercept VMSAVE/VMLOAD instructions executed by the nested-guest.
2596 */
2597 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
2598 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
2599 | SVM_CTRL_INTERCEPT_VMLOAD;
2600
2601 /*
2602 * If we don't expose Virtual GIF feature to the outer guest, we need to intercept
2603 * CLGI/STGI instructions executed by the nested-guest.
2604 */
2605 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif)
2606 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
2607 | SVM_CTRL_INTERCEPT_STGI;
2608
2609 /* Merge the guest and nested-guest intercepts. */
2610 hmR0SvmMergeVmcbCtrlsNested(pVCpu);
2611
2612 /* Update the VMCB clean bits. */
2613 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2614 }
2615 else
2616 {
2617 Assert(!pVCpu->hmr0.s.svm.fSyncVTpr);
2618 Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap);
2619 Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
2620 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPagingCfg == pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
2621 }
2622}
2623
2624#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
2625
2626/**
2627 * Exports the state shared between the host and guest (or nested-guest) into
2628 * the VMCB.
2629 *
2630 * @param pVCpu The cross context virtual CPU structure.
2631 * @param pVmcb Pointer to the VM control block.
2632 *
2633 * @remarks No-long-jump zone!!!
2634 */
2635static void hmR0SvmExportSharedState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2636{
2637 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2638 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2639
2640 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
2641 hmR0SvmExportSharedDebugState(pVCpu, pVmcb);
2642
2643 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
2644 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE),
2645 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
2646}
2647
2648
2649/**
2650 * Worker for SVMR0ImportStateOnDemand.
2651 *
2652 * @param pVCpu The cross context virtual CPU structure.
2653 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2654 */
2655static void hmR0SvmImportGuestState(PVMCPUCC pVCpu, uint64_t fWhat)
2656{
2657 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
2658
2659 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2660 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
2661 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
2662 PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
2663
2664 /*
2665 * We disable interrupts to make the updating of the state and in particular
2666 * the fExtrn modification atomic wrt to preemption hooks.
2667 */
2668 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
2669
2670 fWhat &= pCtx->fExtrn;
2671 if (fWhat)
2672 {
2673#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2674 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
2675 {
2676 if (pVmcbCtrl->IntCtrl.n.u1VGifEnable)
2677 {
2678 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); /* We don't yet support passing VGIF feature to the guest. */
2679 Assert(HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM))); /* VM has configured it. */
2680 CPUMSetGuestGif(pCtx, pVmcbCtrl->IntCtrl.n.u1VGif);
2681 }
2682 }
2683
2684 if (fWhat & CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)
2685 {
2686 if ( !pVmcbCtrl->IntCtrl.n.u1VIrqPending
2687 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
2688 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2689 }
2690#endif
2691
2692 if (fWhat & CPUMCTX_EXTRN_INHIBIT_INT)
2693 CPUMUpdateInterruptShadowEx(pCtx, pVmcbCtrl->IntShadow.n.u1IntShadow, pVmcbGuest->u64RIP);
2694
2695 if (fWhat & CPUMCTX_EXTRN_RIP)
2696 pCtx->rip = pVmcbGuest->u64RIP;
2697
2698 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
2699 {
2700 pCtx->eflags.u = pVmcbGuest->u64RFlags;
2701 if (pVCpu->hmr0.s.fClearTrapFlag)
2702 {
2703 pVCpu->hmr0.s.fClearTrapFlag = false;
2704 pCtx->eflags.Bits.u1TF = 0;
2705 }
2706 }
2707
2708 if (fWhat & CPUMCTX_EXTRN_RSP)
2709 pCtx->rsp = pVmcbGuest->u64RSP;
2710
2711 if (fWhat & CPUMCTX_EXTRN_RAX)
2712 pCtx->rax = pVmcbGuest->u64RAX;
2713
2714 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
2715 {
2716 if (fWhat & CPUMCTX_EXTRN_CS)
2717 {
2718 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, CS, cs);
2719 /* Correct the CS granularity bit. Haven't seen it being wrong in any other register (yet). */
2720 /** @todo SELM might need to be fixed as it too should not care about the
2721 * granularity bit. See @bugref{6785}. */
2722 if ( !pCtx->cs.Attr.n.u1Granularity
2723 && pCtx->cs.Attr.n.u1Present
2724 && pCtx->cs.u32Limit > UINT32_C(0xfffff))
2725 {
2726 Assert((pCtx->cs.u32Limit & 0xfff) == 0xfff);
2727 pCtx->cs.Attr.n.u1Granularity = 1;
2728 }
2729 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, cs);
2730 }
2731 if (fWhat & CPUMCTX_EXTRN_SS)
2732 {
2733 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, SS, ss);
2734 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ss);
2735 /*
2736 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the
2737 * VMCB and uses that and thus it's possible that when the CPL changes during
2738 * guest execution that the SS DPL isn't updated by AMD-V. Observed on some
2739 * AMD Fusion CPUs with 64-bit guests.
2740 *
2741 * See AMD spec. 15.5.1 "Basic operation".
2742 */
2743 Assert(!(pVmcbGuest->u8CPL & ~0x3));
2744 uint8_t const uCpl = pVmcbGuest->u8CPL;
2745 if (pCtx->ss.Attr.n.u2Dpl != uCpl)
2746 pCtx->ss.Attr.n.u2Dpl = uCpl & 0x3;
2747 }
2748 if (fWhat & CPUMCTX_EXTRN_DS)
2749 {
2750 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, DS, ds);
2751 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ds);
2752 }
2753 if (fWhat & CPUMCTX_EXTRN_ES)
2754 {
2755 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, ES, es);
2756 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, es);
2757 }
2758 if (fWhat & CPUMCTX_EXTRN_FS)
2759 {
2760 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, FS, fs);
2761 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, fs);
2762 }
2763 if (fWhat & CPUMCTX_EXTRN_GS)
2764 {
2765 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, GS, gs);
2766 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, gs);
2767 }
2768 }
2769
2770 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
2771 {
2772 if (fWhat & CPUMCTX_EXTRN_TR)
2773 {
2774 /*
2775 * Fixup TR attributes so it's compatible with Intel. Important when saved-states
2776 * are used between Intel and AMD, see @bugref{6208#c39}.
2777 * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode.
2778 */
2779 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, TR, tr);
2780 if (pCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
2781 {
2782 if ( pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2783 || CPUMIsGuestInLongModeEx(pCtx))
2784 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2785 else if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
2786 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
2787 }
2788 }
2789
2790 if (fWhat & CPUMCTX_EXTRN_LDTR)
2791 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, LDTR, ldtr);
2792
2793 if (fWhat & CPUMCTX_EXTRN_GDTR)
2794 {
2795 pCtx->gdtr.cbGdt = pVmcbGuest->GDTR.u32Limit;
2796 pCtx->gdtr.pGdt = pVmcbGuest->GDTR.u64Base;
2797 }
2798
2799 if (fWhat & CPUMCTX_EXTRN_IDTR)
2800 {
2801 pCtx->idtr.cbIdt = pVmcbGuest->IDTR.u32Limit;
2802 pCtx->idtr.pIdt = pVmcbGuest->IDTR.u64Base;
2803 }
2804 }
2805
2806 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2807 {
2808 pCtx->msrSTAR = pVmcbGuest->u64STAR;
2809 pCtx->msrLSTAR = pVmcbGuest->u64LSTAR;
2810 pCtx->msrCSTAR = pVmcbGuest->u64CSTAR;
2811 pCtx->msrSFMASK = pVmcbGuest->u64SFMASK;
2812 }
2813
2814 if ( (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2815 && !pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit /* Intercepted. AMD-V would clear the high 32 bits of EIP & ESP. */)
2816 {
2817 pCtx->SysEnter.cs = pVmcbGuest->u64SysEnterCS;
2818 pCtx->SysEnter.eip = pVmcbGuest->u64SysEnterEIP;
2819 pCtx->SysEnter.esp = pVmcbGuest->u64SysEnterESP;
2820 }
2821
2822 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
2823 pCtx->msrKERNELGSBASE = pVmcbGuest->u64KernelGSBase;
2824
2825 if (fWhat & CPUMCTX_EXTRN_DR_MASK)
2826 {
2827 if (fWhat & CPUMCTX_EXTRN_DR6)
2828 {
2829 if (!pVCpu->hmr0.s.fUsingHyperDR7)
2830 pCtx->dr[6] = pVmcbGuest->u64DR6;
2831 else
2832 CPUMSetHyperDR6(pVCpu, pVmcbGuest->u64DR6);
2833 }
2834
2835 if (fWhat & CPUMCTX_EXTRN_DR7)
2836 {
2837 if (!pVCpu->hmr0.s.fUsingHyperDR7)
2838 pCtx->dr[7] = pVmcbGuest->u64DR7;
2839 else
2840 Assert(pVmcbGuest->u64DR7 == CPUMGetHyperDR7(pVCpu));
2841 }
2842 }
2843
2844 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
2845 {
2846 if (fWhat & CPUMCTX_EXTRN_CR0)
2847 {
2848 /* We intercept changes to all CR0 bits except maybe TS & MP bits. */
2849 uint64_t const uCr0 = (pCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP))
2850 | (pVmcbGuest->u64CR0 & (X86_CR0_TS | X86_CR0_MP));
2851 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */
2852 CPUMSetGuestCR0(pVCpu, uCr0);
2853 VMMRZCallRing3Enable(pVCpu);
2854 }
2855
2856 if (fWhat & CPUMCTX_EXTRN_CR2)
2857 pCtx->cr2 = pVmcbGuest->u64CR2;
2858
2859 if (fWhat & CPUMCTX_EXTRN_CR3)
2860 {
2861 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
2862 && pCtx->cr3 != pVmcbGuest->u64CR3)
2863 {
2864 CPUMSetGuestCR3(pVCpu, pVmcbGuest->u64CR3);
2865 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2866 }
2867 }
2868
2869 /* Changes to CR4 are always intercepted. */
2870 }
2871
2872 /* Update fExtrn. */
2873 pCtx->fExtrn &= ~fWhat;
2874
2875 /* If everything has been imported, clear the HM keeper bit. */
2876 if (!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL))
2877 {
2878 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
2879 Assert(!pCtx->fExtrn);
2880 }
2881 }
2882 else
2883 Assert(!pCtx->fExtrn || (pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
2884
2885 ASMSetFlags(fEFlags);
2886
2887 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x);
2888
2889 /*
2890 * Honor any pending CR3 updates.
2891 *
2892 * Consider this scenario: #VMEXIT -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp
2893 * -> SVMR0CallRing3Callback() -> VMMRZCallRing3Disable() -> hmR0SvmImportGuestState()
2894 * -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp -> continue with #VMEXIT
2895 * handling -> hmR0SvmImportGuestState() and here we are.
2896 *
2897 * The reason for such complicated handling is because VM-exits that call into PGM expect
2898 * CR3 to be up-to-date and thus any CR3-saves -before- the VM-exit (longjmp) would've
2899 * postponed the CR3 update via the force-flag and cleared CR3 from fExtrn. Any SVM R0
2900 * VM-exit handler that requests CR3 to be saved will end up here and we call PGMUpdateCR3().
2901 *
2902 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again,
2903 * and does not process force-flag like regular exits to ring-3 either, we cover for it here.
2904 */
2905 if ( VMMRZCallRing3IsEnabled(pVCpu)
2906 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
2907 {
2908 AssertMsg(pCtx->cr3 == pVmcbGuest->u64CR3, ("cr3=%#RX64 vmcb_cr3=%#RX64\n", pCtx->cr3, pVmcbGuest->u64CR3));
2909 PGMUpdateCR3(pVCpu, pCtx->cr3);
2910 }
2911}
2912
2913
2914/**
2915 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU
2916 * context.
2917 *
2918 * Currently there is no residual state left in the CPU that is not updated in the
2919 * VMCB.
2920 *
2921 * @returns VBox status code.
2922 * @param pVCpu The cross context virtual CPU structure.
2923 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2924 */
2925VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2926{
2927 hmR0SvmImportGuestState(pVCpu, fWhat);
2928 return VINF_SUCCESS;
2929}
2930
2931
2932/**
2933 * Gets SVM \#VMEXIT auxiliary information.
2934 *
2935 * @returns VBox status code.
2936 * @param pVCpu The cross context virtual CPU structure.
2937 * @param pSvmExitAux Where to store the auxiliary info.
2938 */
2939VMMR0DECL(int) SVMR0GetExitAuxInfo(PVMCPUCC pVCpu, PSVMEXITAUX pSvmExitAux)
2940{
2941 PCSVMTRANSIENT pSvmTransient = pVCpu->hmr0.s.svm.pSvmTransient;
2942 if (RT_LIKELY(pSvmTransient))
2943 {
2944 PCSVMVMCB pVmcb = pSvmTransient->pVmcb;
2945 if (RT_LIKELY(pVmcb))
2946 {
2947 pSvmExitAux->u64ExitCode = pVmcb->ctrl.u64ExitCode;
2948 pSvmExitAux->u64ExitInfo1 = pVmcb->ctrl.u64ExitInfo1;
2949 pSvmExitAux->u64ExitInfo2 = pVmcb->ctrl.u64ExitInfo2;
2950 pSvmExitAux->ExitIntInfo = pVmcb->ctrl.ExitIntInfo;
2951 return VINF_SUCCESS;
2952 }
2953 return VERR_SVM_IPE_5;
2954 }
2955 return VERR_NOT_AVAILABLE;
2956}
2957
2958
2959/**
2960 * Does the necessary state syncing before returning to ring-3 for any reason
2961 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
2962 *
2963 * @param pVCpu The cross context virtual CPU structure.
2964 * @param fImportState Whether to import the guest state from the VMCB back
2965 * to the guest-CPU context.
2966 *
2967 * @remarks No-long-jmp zone!!!
2968 */
2969static void hmR0SvmLeave(PVMCPUCC pVCpu, bool fImportState)
2970{
2971 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2972 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2973
2974 /*
2975 * !!! IMPORTANT !!!
2976 * If you modify code here, make sure to check whether SVMR0CallRing3Callback() needs to be updated too.
2977 */
2978
2979 /* Save the guest state if necessary. */
2980 if (fImportState)
2981 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
2982
2983 /* Restore host FPU state if necessary and resync on next R0 reentry. */
2984 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
2985 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
2986
2987 /*
2988 * Restore host debug registers if necessary and resync on next R0 reentry.
2989 */
2990#ifdef VBOX_STRICT
2991 if (CPUMIsHyperDebugStateActive(pVCpu))
2992 {
2993 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb; /** @todo nested-guest. */
2994 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
2995 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
2996 }
2997#endif
2998 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
2999 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
3000 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
3001
3002 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
3003 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
3004 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
3005 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
3006 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
3007 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitVmentry);
3008 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
3009
3010 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
3011}
3012
3013
3014/**
3015 * Leaves the AMD-V session.
3016 *
3017 * Only used while returning to ring-3 either due to longjump or exits to
3018 * ring-3.
3019 *
3020 * @returns VBox status code.
3021 * @param pVCpu The cross context virtual CPU structure.
3022 */
3023static int hmR0SvmLeaveSession(PVMCPUCC pVCpu)
3024{
3025 HM_DISABLE_PREEMPT(pVCpu);
3026 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3027 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3028
3029 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
3030 and done this from the SVMR0ThreadCtxCallback(). */
3031 if (!pVCpu->hmr0.s.fLeaveDone)
3032 {
3033 hmR0SvmLeave(pVCpu, true /* fImportState */);
3034 pVCpu->hmr0.s.fLeaveDone = true;
3035 }
3036
3037 /*
3038 * !!! IMPORTANT !!!
3039 * If you modify code here, make sure to check whether SVMR0CallRing3Callback() needs to be updated too.
3040 */
3041
3042 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
3043 /* Deregister hook now that we've left HM context before re-enabling preemption. */
3044 VMMR0ThreadCtxHookDisable(pVCpu);
3045
3046 /* Leave HM context. This takes care of local init (term). */
3047 int rc = HMR0LeaveCpu(pVCpu);
3048
3049 HM_RESTORE_PREEMPT();
3050 return rc;
3051}
3052
3053
3054/**
3055 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
3056 * any remaining host state) before we go back to ring-3 due to an assertion.
3057 *
3058 * @param pVCpu The cross context virtual CPU structure.
3059 */
3060VMMR0DECL(int) SVMR0AssertionCallback(PVMCPUCC pVCpu)
3061{
3062 /*
3063 * !!! IMPORTANT !!!
3064 * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
3065 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
3066 */
3067 VMMR0AssertionRemoveNotification(pVCpu);
3068 VMMRZCallRing3Disable(pVCpu);
3069 HM_DISABLE_PREEMPT(pVCpu);
3070
3071 /* Import the entire guest state. */
3072 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3073
3074 /* Restore host FPU state if necessary and resync on next R0 reentry. */
3075 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
3076
3077 /* Restore host debug registers if necessary and resync on next R0 reentry. */
3078 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
3079
3080 /* Deregister the hook now that we've left HM context before re-enabling preemption. */
3081 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
3082 VMMR0ThreadCtxHookDisable(pVCpu);
3083
3084 /* Leave HM context. This takes care of local init (term). */
3085 HMR0LeaveCpu(pVCpu);
3086
3087 HM_RESTORE_PREEMPT();
3088 return VINF_SUCCESS;
3089}
3090
3091
3092/**
3093 * Take necessary actions before going back to ring-3.
3094 *
3095 * An action requires us to go back to ring-3. This function does the necessary
3096 * steps before we can safely return to ring-3. This is not the same as longjmps
3097 * to ring-3, this is voluntary.
3098 *
3099 * @returns Strict VBox status code.
3100 * @param pVCpu The cross context virtual CPU structure.
3101 * @param rcExit The reason for exiting to ring-3. Can be
3102 * VINF_VMM_UNKNOWN_RING3_CALL.
3103 */
3104static VBOXSTRICTRC hmR0SvmExitToRing3(PVMCPUCC pVCpu, VBOXSTRICTRC rcExit)
3105{
3106 Assert(pVCpu);
3107 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
3108
3109 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
3110 VMMRZCallRing3Disable(pVCpu);
3111 Log4Func(("rcExit=%d LocalFF=%#RX64 GlobalFF=%#RX32\n", VBOXSTRICTRC_VAL(rcExit), (uint64_t)pVCpu->fLocalForcedActions,
3112 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions));
3113
3114 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
3115 if (pVCpu->hm.s.Event.fPending)
3116 {
3117 hmR0SvmPendingEventToTrpmTrap(pVCpu);
3118 Assert(!pVCpu->hm.s.Event.fPending);
3119 }
3120
3121 /* Sync. the necessary state for going back to ring-3. */
3122 hmR0SvmLeaveSession(pVCpu);
3123 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
3124
3125 /* Thread-context hooks are unregistered at this point!!! */
3126 /* Ring-3 callback notifications are unregistered at this point!!! */
3127
3128 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
3129 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
3130 | CPUM_CHANGED_LDTR
3131 | CPUM_CHANGED_GDTR
3132 | CPUM_CHANGED_IDTR
3133 | CPUM_CHANGED_TR
3134 | CPUM_CHANGED_HIDDEN_SEL_REGS);
3135 if ( pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging
3136 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
3137 {
3138 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
3139 }
3140
3141 /* Update the exit-to-ring 3 reason. */
3142 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
3143
3144 /* On our way back from ring-3, reload the guest-CPU state if it may change while in ring-3. */
3145 if ( rcExit != VINF_EM_RAW_INTERRUPT
3146 || CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
3147 {
3148 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
3149 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
3150 }
3151
3152 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
3153 VMMRZCallRing3Enable(pVCpu);
3154
3155 /*
3156 * If we're emulating an instruction, we shouldn't have any TRPM traps pending
3157 * and if we're injecting an event we should have a TRPM trap pending.
3158 */
3159 AssertReturnStmt(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu),
3160 pVCpu->hm.s.u32HMError = VBOXSTRICTRC_VAL(rcExit),
3161 VERR_SVM_IPE_5);
3162 AssertReturnStmt(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu),
3163 pVCpu->hm.s.u32HMError = VBOXSTRICTRC_VAL(rcExit),
3164 VERR_SVM_IPE_4);
3165
3166 return rcExit;
3167}
3168
3169
3170/**
3171 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
3172 * intercepts.
3173 *
3174 * @param pVCpu The cross context virtual CPU structure.
3175 * @param pVmcb Pointer to the VM control block.
3176 *
3177 * @remarks No-long-jump zone!!!
3178 */
3179static void hmR0SvmUpdateTscOffsetting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3180{
3181 /*
3182 * Avoid intercepting RDTSC/RDTSCP if we determined the host TSC (++) is stable
3183 * and in case of a nested-guest, if the nested-VMCB specifies it is not intercepting
3184 * RDTSC/RDTSCP as well.
3185 */
3186 bool fParavirtTsc;
3187 uint64_t uTscOffset;
3188 bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu->CTX_SUFF(pVM), pVCpu, &uTscOffset, &fParavirtTsc);
3189
3190 bool fIntercept;
3191 if (fCanUseRealTsc)
3192 fIntercept = hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
3193 else
3194 {
3195 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
3196 fIntercept = true;
3197 }
3198
3199 if (!fIntercept)
3200 {
3201#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3202 /* Apply the nested-guest VMCB's TSC offset over the guest TSC offset. */
3203 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
3204 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
3205#endif
3206
3207 /* Update the TSC offset in the VMCB and the relevant clean bits. */
3208 pVmcb->ctrl.u64TSCOffset = uTscOffset;
3209 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
3210 }
3211
3212 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
3213 information before every VM-entry, hence we have nothing to do here at the moment. */
3214 if (fParavirtTsc)
3215 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
3216}
3217
3218
3219/**
3220 * Sets an event as a pending event to be injected into the guest.
3221 *
3222 * @param pVCpu The cross context virtual CPU structure.
3223 * @param pEvent Pointer to the SVM event.
3224 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3225 * page-fault.
3226 *
3227 * @remarks Statistics counter assumes this is a guest event being reflected to
3228 * the guest i.e. 'StatInjectPendingReflect' is incremented always.
3229 */
3230DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPUCC pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
3231{
3232 Assert(!pVCpu->hm.s.Event.fPending);
3233 Assert(pEvent->n.u1Valid);
3234
3235 pVCpu->hm.s.Event.u64IntInfo = pEvent->u;
3236 pVCpu->hm.s.Event.fPending = true;
3237 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
3238
3239 Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
3240 (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
3241}
3242
3243
3244/**
3245 * Sets an divide error (\#DE) exception as pending-for-injection into the VM.
3246 *
3247 * @param pVCpu The cross context virtual CPU structure.
3248 */
3249DECLINLINE(void) hmR0SvmSetPendingXcptDE(PVMCPUCC pVCpu)
3250{
3251 SVMEVENT Event;
3252 Event.u = 0;
3253 Event.n.u1Valid = 1;
3254 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3255 Event.n.u8Vector = X86_XCPT_DE;
3256 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3257}
3258
3259
3260/**
3261 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3262 *
3263 * @param pVCpu The cross context virtual CPU structure.
3264 */
3265DECLINLINE(void) hmR0SvmSetPendingXcptUD(PVMCPUCC pVCpu)
3266{
3267 SVMEVENT Event;
3268 Event.u = 0;
3269 Event.n.u1Valid = 1;
3270 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3271 Event.n.u8Vector = X86_XCPT_UD;
3272 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3273}
3274
3275
3276/**
3277 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3278 *
3279 * @param pVCpu The cross context virtual CPU structure.
3280 */
3281DECLINLINE(void) hmR0SvmSetPendingXcptDB(PVMCPUCC pVCpu)
3282{
3283 SVMEVENT Event;
3284 Event.u = 0;
3285 Event.n.u1Valid = 1;
3286 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3287 Event.n.u8Vector = X86_XCPT_DB;
3288 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3289}
3290
3291
3292/**
3293 * Sets a page fault (\#PF) exception as pending-for-injection into the VM.
3294 *
3295 * @param pVCpu The cross context virtual CPU structure.
3296 * @param u32ErrCode The error-code for the page-fault.
3297 * @param uFaultAddress The page fault address (CR2).
3298 *
3299 * @remarks This updates the guest CR2 with @a uFaultAddress!
3300 */
3301DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPUCC pVCpu, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)
3302{
3303 SVMEVENT Event;
3304 Event.u = 0;
3305 Event.n.u1Valid = 1;
3306 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3307 Event.n.u8Vector = X86_XCPT_PF;
3308 Event.n.u1ErrorCodeValid = 1;
3309 Event.n.u32ErrorCode = u32ErrCode;
3310
3311 /* Update CR2 of the guest. */
3312 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR2);
3313 if (pVCpu->cpum.GstCtx.cr2 != uFaultAddress)
3314 {
3315 pVCpu->cpum.GstCtx.cr2 = uFaultAddress;
3316 /* The VMCB clean bit for CR2 will be updated while re-loading the guest state. */
3317 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2);
3318 }
3319
3320 hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress);
3321}
3322
3323
3324/**
3325 * Sets a math-fault (\#MF) exception as pending-for-injection into the VM.
3326 *
3327 * @param pVCpu The cross context virtual CPU structure.
3328 */
3329DECLINLINE(void) hmR0SvmSetPendingXcptMF(PVMCPUCC pVCpu)
3330{
3331 SVMEVENT Event;
3332 Event.u = 0;
3333 Event.n.u1Valid = 1;
3334 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3335 Event.n.u8Vector = X86_XCPT_MF;
3336 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3337}
3338
3339
3340/**
3341 * Sets a double fault (\#DF) exception as pending-for-injection into the VM.
3342 *
3343 * @param pVCpu The cross context virtual CPU structure.
3344 */
3345DECLINLINE(void) hmR0SvmSetPendingXcptDF(PVMCPUCC pVCpu)
3346{
3347 SVMEVENT Event;
3348 Event.u = 0;
3349 Event.n.u1Valid = 1;
3350 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3351 Event.n.u8Vector = X86_XCPT_DF;
3352 Event.n.u1ErrorCodeValid = 1;
3353 Event.n.u32ErrorCode = 0;
3354 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3355}
3356
3357
3358/**
3359 * Injects an event into the guest upon VMRUN by updating the relevant field
3360 * in the VMCB.
3361 *
3362 * @param pVCpu The cross context virtual CPU structure.
3363 * @param pVmcb Pointer to the guest VM control block.
3364 * @param pEvent Pointer to the event.
3365 *
3366 * @remarks No-long-jump zone!!!
3367 * @remarks Requires CR0!
3368 */
3369DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPUCC pVCpu, PSVMVMCB pVmcb, PSVMEVENT pEvent)
3370{
3371 Assert(!pVmcb->ctrl.EventInject.n.u1Valid);
3372 pVmcb->ctrl.EventInject.u = pEvent->u;
3373 if ( pVmcb->ctrl.EventInject.n.u3Type == SVM_EVENT_EXCEPTION
3374 || pVmcb->ctrl.EventInject.n.u3Type == SVM_EVENT_NMI)
3375 {
3376 Assert(pEvent->n.u8Vector <= X86_XCPT_LAST);
3377 STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedXcpts[pEvent->n.u8Vector]);
3378 }
3379 else
3380 STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedIrqs[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
3381 RT_NOREF(pVCpu);
3382
3383 Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
3384 (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
3385}
3386
3387
3388
3389/**
3390 * Converts any TRPM trap into a pending HM event. This is typically used when
3391 * entering from ring-3 (not longjmp returns).
3392 *
3393 * @param pVCpu The cross context virtual CPU structure.
3394 */
3395static void hmR0SvmTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3396{
3397 Assert(TRPMHasTrap(pVCpu));
3398 Assert(!pVCpu->hm.s.Event.fPending);
3399
3400 uint8_t uVector;
3401 TRPMEVENT enmTrpmEvent;
3402 uint32_t uErrCode;
3403 RTGCUINTPTR GCPtrFaultAddress;
3404 uint8_t cbInstr;
3405
3406 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, NULL /* pfIcebp */);
3407 AssertRC(rc);
3408
3409 SVMEVENT Event;
3410 Event.u = 0;
3411 Event.n.u1Valid = 1;
3412 Event.n.u8Vector = uVector;
3413
3414 /* Refer AMD spec. 15.20 "Event Injection" for the format. */
3415 if (enmTrpmEvent == TRPM_TRAP)
3416 {
3417 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3418 switch (uVector)
3419 {
3420 case X86_XCPT_NMI:
3421 {
3422 Event.n.u3Type = SVM_EVENT_NMI;
3423 break;
3424 }
3425
3426 case X86_XCPT_BP:
3427 case X86_XCPT_OF:
3428 AssertMsgFailed(("Invalid TRPM vector %d for event type %d\n", uVector, enmTrpmEvent));
3429 RT_FALL_THRU();
3430
3431 case X86_XCPT_PF:
3432 case X86_XCPT_DF:
3433 case X86_XCPT_TS:
3434 case X86_XCPT_NP:
3435 case X86_XCPT_SS:
3436 case X86_XCPT_GP:
3437 case X86_XCPT_AC:
3438 {
3439 Event.n.u1ErrorCodeValid = 1;
3440 Event.n.u32ErrorCode = uErrCode;
3441 break;
3442 }
3443 }
3444 }
3445 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
3446 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3447 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
3448 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
3449 else
3450 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
3451
3452 rc = TRPMResetTrap(pVCpu);
3453 AssertRC(rc);
3454
3455 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
3456 !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode));
3457
3458 hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress);
3459}
3460
3461
3462/**
3463 * Converts any pending SVM event into a TRPM trap. Typically used when leaving
3464 * AMD-V to execute any instruction.
3465 *
3466 * @param pVCpu The cross context virtual CPU structure.
3467 */
3468static void hmR0SvmPendingEventToTrpmTrap(PVMCPUCC pVCpu)
3469{
3470 Assert(pVCpu->hm.s.Event.fPending);
3471 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
3472
3473 SVMEVENT Event;
3474 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3475
3476 uint8_t uVector = Event.n.u8Vector;
3477 TRPMEVENT enmTrapType = HMSvmEventToTrpmEventType(&Event, uVector);
3478
3479 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, Event.n.u3Type));
3480
3481 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
3482 AssertRC(rc);
3483
3484 if (Event.n.u1ErrorCodeValid)
3485 TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode);
3486
3487 if ( enmTrapType == TRPM_TRAP
3488 && uVector == X86_XCPT_PF)
3489 {
3490 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
3491 Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
3492 }
3493 else if (enmTrapType == TRPM_SOFTWARE_INT)
3494 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
3495 pVCpu->hm.s.Event.fPending = false;
3496}
3497
3498
3499/**
3500 * Sets the virtual interrupt intercept control in the VMCB.
3501 *
3502 * @param pVCpu The cross context virtual CPU structure.
3503 * @param pVmcb Pointer to the VM control block.
3504 */
3505static void hmR0SvmSetIntWindowExiting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3506{
3507 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); NOREF(pVCpu);
3508
3509 /*
3510 * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when the guest
3511 * is ready to accept interrupts. At #VMEXIT, we then get the interrupt from the APIC
3512 * (updating ISR at the right time) and inject the interrupt.
3513 *
3514 * With AVIC is supported, we could make use of the asynchronously delivery without
3515 * #VMEXIT and we would be passing the AVIC page to SVM.
3516 *
3517 * In AMD-V, an interrupt window is achieved using a combination of V_IRQ (an interrupt
3518 * is pending), V_IGN_TPR (ignore TPR priorities) and the VINTR intercept all being set.
3519 */
3520 Assert(pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR);
3521 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
3522 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
3523 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_VINTR);
3524 Log4(("Set VINTR intercept\n"));
3525}
3526
3527
3528/**
3529 * Clears the virtual interrupt intercept control in the VMCB as
3530 * we are figured the guest is unable process any interrupts
3531 * at this point of time.
3532 *
3533 * @param pVCpu The cross context virtual CPU structure.
3534 * @param pVmcb Pointer to the VM control block.
3535 */
3536static void hmR0SvmClearIntWindowExiting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3537{
3538 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); NOREF(pVCpu);
3539
3540 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
3541 if ( pVmcbCtrl->IntCtrl.n.u1VIrqPending
3542 || (pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
3543 {
3544 pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
3545 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
3546 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_VINTR);
3547 Log4(("Cleared VINTR intercept\n"));
3548 }
3549}
3550
3551
3552/**
3553 * Evaluates the event to be delivered to the guest and sets it as the pending
3554 * event.
3555 *
3556 * @returns Strict VBox status code.
3557 * @param pVCpu The cross context virtual CPU structure.
3558 * @param pSvmTransient Pointer to the SVM transient structure.
3559 */
3560static VBOXSTRICTRC hmR0SvmEvaluatePendingEvent(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient)
3561{
3562 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3563 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT
3564 | CPUMCTX_EXTRN_RFLAGS
3565 | CPUMCTX_EXTRN_INHIBIT_INT
3566 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ);
3567
3568 Assert(!pVCpu->hm.s.Event.fPending);
3569 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
3570 Assert(pVmcb);
3571
3572 bool const fGif = CPUMGetGuestGif(pCtx);
3573 bool const fIntShadow = CPUMIsInInterruptShadowWithUpdate(pCtx);
3574 bool const fBlockNmi = CPUMAreInterruptsInhibitedByNmi(pCtx);
3575
3576 Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntPending=%RTbool fNmiPending=%RTbool\n",
3577 fGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
3578 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
3579
3580 /** @todo SMI. SMIs take priority over NMIs. */
3581
3582 /*
3583 * Check if the guest or nested-guest can receive NMIs.
3584 * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
3585 * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
3586 */
3587 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
3588 && !fBlockNmi)
3589 {
3590 if ( fGif
3591 && !fIntShadow)
3592 {
3593#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3594 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI))
3595 {
3596 Log4(("Intercepting NMI -> #VMEXIT\n"));
3597 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3598 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
3599 }
3600#endif
3601 Log4(("Setting NMI pending for injection\n"));
3602 SVMEVENT Event;
3603 Event.u = 0;
3604 Event.n.u1Valid = 1;
3605 Event.n.u8Vector = X86_XCPT_NMI;
3606 Event.n.u3Type = SVM_EVENT_NMI;
3607 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3608 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3609 }
3610 else if (!fGif)
3611 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
3612 else if (!pSvmTransient->fIsNestedGuest)
3613 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
3614 /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCB controls. */
3615 }
3616 /*
3617 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt()
3618 * returns a valid interrupt we -must- deliver the interrupt. We can no longer re-request
3619 * it from the APIC device.
3620 *
3621 * For nested-guests, physical interrupts always take priority over virtual interrupts.
3622 * We don't need to inject nested-guest virtual interrupts here, we can let the hardware
3623 * do that work when we execute nested-guest code esp. since all the required information
3624 * is in the VMCB, unlike physical interrupts where we need to fetch the interrupt from
3625 * the virtual interrupt controller.
3626 *
3627 * See AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
3628 */
3629 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
3630 && !pVCpu->hm.s.fSingleInstruction)
3631 {
3632 bool const fBlockInt = !pSvmTransient->fIsNestedGuest ? !(pCtx->eflags.u & X86_EFL_IF)
3633 : CPUMIsGuestSvmPhysIntrEnabled(pVCpu, pCtx);
3634 if ( fGif
3635 && !fBlockInt
3636 && !fIntShadow)
3637 {
3638#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3639 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
3640 {
3641 Log4(("Intercepting INTR -> #VMEXIT\n"));
3642 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3643 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
3644 }
3645#endif
3646 uint8_t u8Interrupt;
3647 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
3648 if (RT_SUCCESS(rc))
3649 {
3650 Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
3651 SVMEVENT Event;
3652 Event.u = 0;
3653 Event.n.u1Valid = 1;
3654 Event.n.u8Vector = u8Interrupt;
3655 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3656 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3657 }
3658 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
3659 {
3660 /*
3661 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
3662 * updated eventually when the TPR is written by the guest.
3663 */
3664 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
3665 }
3666 else
3667 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
3668 }
3669 else if (!fGif)
3670 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
3671 else if (!pSvmTransient->fIsNestedGuest)
3672 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
3673 /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCB controls. */
3674 }
3675
3676 return VINF_SUCCESS;
3677}
3678
3679
3680/**
3681 * Injects any pending events into the guest (or nested-guest).
3682 *
3683 * @param pVCpu The cross context virtual CPU structure.
3684 * @param pVmcb Pointer to the VM control block.
3685 *
3686 * @remarks Must only be called when we are guaranteed to enter
3687 * hardware-assisted SVM execution and not return to ring-3
3688 * prematurely.
3689 */
3690static void hmR0SvmInjectPendingEvent(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3691{
3692 Assert(!TRPMHasTrap(pVCpu));
3693 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3694
3695 bool const fIntShadow = CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx);
3696#ifdef VBOX_STRICT
3697 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3698 bool const fGif = CPUMGetGuestGif(pCtx);
3699 bool fAllowInt = fGif;
3700 if (fGif)
3701 {
3702 /*
3703 * For nested-guests we have no way to determine if we're injecting a physical or
3704 * virtual interrupt at this point. Hence the partial verification below.
3705 */
3706 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3707 fAllowInt = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, pCtx) || CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
3708 else
3709 fAllowInt = RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
3710 }
3711#endif
3712
3713 if (pVCpu->hm.s.Event.fPending)
3714 {
3715 SVMEVENT Event;
3716 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3717 Assert(Event.n.u1Valid);
3718
3719 /*
3720 * Validate event injection pre-conditions.
3721 */
3722 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
3723 {
3724 Assert(fAllowInt);
3725 Assert(!fIntShadow);
3726 }
3727 else if (Event.n.u3Type == SVM_EVENT_NMI)
3728 {
3729 Assert(fGif);
3730 Assert(!fIntShadow);
3731 }
3732
3733 /*
3734 * Before injecting an NMI we must set VMCPU_FF_BLOCK_NMIS to prevent nested NMIs. We
3735 * do this only when we are surely going to inject the NMI as otherwise if we return
3736 * to ring-3 prematurely we could leave NMIs blocked indefinitely upon re-entry into
3737 * SVM R0.
3738 *
3739 * With VT-x, this is handled by the Guest interruptibility information VMCS field
3740 * which will set the VMCS field after actually delivering the NMI which we read on
3741 * VM-exit to determine the state.
3742 */
3743 if ( Event.n.u3Type == SVM_EVENT_NMI
3744 && Event.n.u8Vector == X86_XCPT_NMI)
3745 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3746
3747 /*
3748 * Inject it (update VMCB for injection by the hardware).
3749 */
3750 Log4(("Injecting pending HM event\n"));
3751 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, &Event);
3752 pVCpu->hm.s.Event.fPending = false;
3753
3754 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
3755 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
3756 else
3757 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
3758 }
3759 else
3760 Assert(pVmcb->ctrl.EventInject.n.u1Valid == 0);
3761
3762 /*
3763 * We could have injected an NMI through IEM and continue guest execution using
3764 * hardware-assisted SVM. In which case, we would not have any events pending (above)
3765 * but we still need to intercept IRET in order to eventually clear NMI inhibition.
3766 */
3767 if (CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
3768 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET);
3769
3770 /*
3771 * Update the guest interrupt shadow in the guest (or nested-guest) VMCB.
3772 *
3773 * For nested-guests: We need to update it too for the scenario where IEM executes
3774 * the nested-guest but execution later continues here with an interrupt shadow active.
3775 */
3776 pVmcb->ctrl.IntShadow.n.u1IntShadow = fIntShadow;
3777}
3778
3779
3780/**
3781 * Reports world-switch error and dumps some useful debug info.
3782 *
3783 * @param pVCpu The cross context virtual CPU structure.
3784 * @param rcVMRun The return code from VMRUN (or
3785 * VERR_SVM_INVALID_GUEST_STATE for invalid
3786 * guest-state).
3787 */
3788static void hmR0SvmReportWorldSwitchError(PVMCPUCC pVCpu, int rcVMRun)
3789{
3790 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
3791 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
3792 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3793
3794 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
3795 {
3796#ifdef VBOX_STRICT
3797 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
3798 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
3799 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits));
3800 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx));
3801 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx));
3802 Log4(("ctrl.u16InterceptRdDRx %#x\n", pVmcb->ctrl.u16InterceptRdDRx));
3803 Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx));
3804 Log4(("ctrl.u32InterceptXcpt %#x\n", pVmcb->ctrl.u32InterceptXcpt));
3805 Log4(("ctrl.u64InterceptCtrl %#RX64\n", pVmcb->ctrl.u64InterceptCtrl));
3806 Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr));
3807 Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr));
3808 Log4(("ctrl.u64TSCOffset %#RX64\n", pVmcb->ctrl.u64TSCOffset));
3809
3810 Log4(("ctrl.TLBCtrl.u32ASID %#x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID));
3811 Log4(("ctrl.TLBCtrl.u8TLBFlush %#x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush));
3812 Log4(("ctrl.TLBCtrl.u24Reserved %#x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved));
3813
3814 Log4(("ctrl.IntCtrl.u8VTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR));
3815 Log4(("ctrl.IntCtrl.u1VIrqPending %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqPending));
3816 Log4(("ctrl.IntCtrl.u1VGif %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGif));
3817 Log4(("ctrl.IntCtrl.u6Reserved0 %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved));
3818 Log4(("ctrl.IntCtrl.u4VIntrPrio %#x\n", pVmcb->ctrl.IntCtrl.n.u4VIntrPrio));
3819 Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR));
3820 Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved));
3821 Log4(("ctrl.IntCtrl.u1VIntrMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIntrMasking));
3822 Log4(("ctrl.IntCtrl.u1VGifEnable %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGifEnable));
3823 Log4(("ctrl.IntCtrl.u5Reserved1 %#x\n", pVmcb->ctrl.IntCtrl.n.u5Reserved));
3824 Log4(("ctrl.IntCtrl.u8VIntrVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIntrVector));
3825 Log4(("ctrl.IntCtrl.u24Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved));
3826
3827 Log4(("ctrl.IntShadow.u1IntShadow %#x\n", pVmcb->ctrl.IntShadow.n.u1IntShadow));
3828 Log4(("ctrl.IntShadow.u1GuestIntMask %#x\n", pVmcb->ctrl.IntShadow.n.u1GuestIntMask));
3829 Log4(("ctrl.u64ExitCode %#RX64\n", pVmcb->ctrl.u64ExitCode));
3830 Log4(("ctrl.u64ExitInfo1 %#RX64\n", pVmcb->ctrl.u64ExitInfo1));
3831 Log4(("ctrl.u64ExitInfo2 %#RX64\n", pVmcb->ctrl.u64ExitInfo2));
3832 Log4(("ctrl.ExitIntInfo.u8Vector %#x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector));
3833 Log4(("ctrl.ExitIntInfo.u3Type %#x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type));
3834 Log4(("ctrl.ExitIntInfo.u1ErrorCodeValid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
3835 Log4(("ctrl.ExitIntInfo.u19Reserved %#x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved));
3836 Log4(("ctrl.ExitIntInfo.u1Valid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid));
3837 Log4(("ctrl.ExitIntInfo.u32ErrorCode %#x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
3838 Log4(("ctrl.NestedPagingCtrl.u1NestedPaging %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging));
3839 Log4(("ctrl.NestedPagingCtrl.u1Sev %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1Sev));
3840 Log4(("ctrl.NestedPagingCtrl.u1SevEs %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1SevEs));
3841 Log4(("ctrl.EventInject.u8Vector %#x\n", pVmcb->ctrl.EventInject.n.u8Vector));
3842 Log4(("ctrl.EventInject.u3Type %#x\n", pVmcb->ctrl.EventInject.n.u3Type));
3843 Log4(("ctrl.EventInject.u1ErrorCodeValid %#x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid));
3844 Log4(("ctrl.EventInject.u19Reserved %#x\n", pVmcb->ctrl.EventInject.n.u19Reserved));
3845 Log4(("ctrl.EventInject.u1Valid %#x\n", pVmcb->ctrl.EventInject.n.u1Valid));
3846 Log4(("ctrl.EventInject.u32ErrorCode %#x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode));
3847
3848 Log4(("ctrl.u64NestedPagingCR3 %#RX64\n", pVmcb->ctrl.u64NestedPagingCR3));
3849
3850 Log4(("ctrl.LbrVirt.u1LbrVirt %#x\n", pVmcb->ctrl.LbrVirt.n.u1LbrVirt));
3851 Log4(("ctrl.LbrVirt.u1VirtVmsaveVmload %#x\n", pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload));
3852
3853 Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel));
3854 Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr));
3855 Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit));
3856 Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base));
3857 Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel));
3858 Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr));
3859 Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit));
3860 Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base));
3861 Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel));
3862 Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr));
3863 Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit));
3864 Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base));
3865 Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel));
3866 Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr));
3867 Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit));
3868 Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base));
3869 Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel));
3870 Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr));
3871 Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit));
3872 Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base));
3873
3874 Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit));
3875 Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base));
3876
3877 Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel));
3878 Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr));
3879 Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit));
3880 Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base));
3881
3882 Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit));
3883 Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base));
3884
3885 Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel));
3886 Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr));
3887 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit));
3888 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base));
3889
3890 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL));
3891 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0));
3892 Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2));
3893 Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3));
3894 Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4));
3895 Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6));
3896 Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7));
3897
3898 Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP));
3899 Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP));
3900 Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX));
3901 Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags));
3902
3903 Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS));
3904 Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP));
3905 Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP));
3906
3907 Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER));
3908 Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR));
3909 Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR));
3910 Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR));
3911 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK));
3912 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase));
3913 Log4(("guest.u64PAT %#RX64\n", pVmcb->guest.u64PAT));
3914 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL));
3915 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM));
3916 Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO));
3917 Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM));
3918 Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO));
3919
3920 NOREF(pVmcb);
3921#endif /* VBOX_STRICT */
3922 }
3923 else
3924 Log4Func(("rcVMRun=%d\n", rcVMRun));
3925}
3926
3927
3928/**
3929 * Check per-VM and per-VCPU force flag actions that require us to go back to
3930 * ring-3 for one reason or another.
3931 *
3932 * @returns Strict VBox status code (information status code included).
3933 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3934 * ring-3.
3935 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3936 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3937 * interrupts)
3938 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3939 * all EMTs to be in ring-3.
3940 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3941 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3942 * to the EM loop.
3943 *
3944 * @param pVCpu The cross context virtual CPU structure.
3945 */
3946static VBOXSTRICTRC hmR0SvmCheckForceFlags(PVMCPUCC pVCpu)
3947{
3948 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3949
3950 /* Could happen as a result of longjump. */
3951 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
3952 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3953
3954 /* Update pending interrupts into the APIC's IRR. */
3955 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3956 APICUpdatePendingInterrupts(pVCpu);
3957
3958 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3959 if ( VM_FF_IS_ANY_SET(pVM, !pVCpu->hm.s.fSingleInstruction
3960 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
3961 || VMCPU_FF_IS_ANY_SET(pVCpu, !pVCpu->hm.s.fSingleInstruction
3962 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3963 {
3964 /* Pending PGM C3 sync. */
3965 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3966 {
3967 int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4,
3968 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3969 if (rc != VINF_SUCCESS)
3970 {
3971 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
3972 return rc;
3973 }
3974 }
3975
3976 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3977 /* -XXX- what was that about single stepping? */
3978 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3979 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3980 {
3981 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
3982 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3983 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3984 return rc;
3985 }
3986
3987 /* Pending VM request packets, such as hardware interrupts. */
3988 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3989 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3990 {
3991 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchVmReq);
3992 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3993 return VINF_EM_PENDING_REQUEST;
3994 }
3995
3996 /* Pending PGM pool flushes. */
3997 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3998 {
3999 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPgmPoolFlush);
4000 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4001 return VINF_PGM_POOL_FLUSH_PENDING;
4002 }
4003
4004 /* Pending DMA requests. */
4005 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4006 {
4007 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchDma);
4008 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4009 return VINF_EM_RAW_TO_R3;
4010 }
4011 }
4012
4013 return VINF_SUCCESS;
4014}
4015
4016
4017/**
4018 * Does the preparations before executing guest code in AMD-V.
4019 *
4020 * This may cause longjmps to ring-3 and may even result in rescheduling to the
4021 * recompiler. We must be cautious what we do here regarding committing
4022 * guest-state information into the VMCB assuming we assuredly execute the guest
4023 * in AMD-V. If we fall back to the recompiler after updating the VMCB and
4024 * clearing the common-state (TRPM/forceflags), we must undo those changes so
4025 * that the recompiler can (and should) use them when it resumes guest
4026 * execution. Otherwise such operations must be done when we can no longer
4027 * exit to ring-3.
4028 *
4029 * @returns Strict VBox status code (informational status codes included).
4030 * @retval VINF_SUCCESS if we can proceed with running the guest.
4031 * @retval VINF_* scheduling changes, we have to go back to ring-3.
4032 *
4033 * @param pVCpu The cross context virtual CPU structure.
4034 * @param pSvmTransient Pointer to the SVM transient structure.
4035 */
4036static VBOXSTRICTRC hmR0SvmPreRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4037{
4038 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
4039
4040#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
4041 if (pSvmTransient->fIsNestedGuest)
4042 {
4043 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
4044 return VINF_EM_RESCHEDULE_REM;
4045 }
4046#endif
4047
4048 /* Check force flag actions that might require us to go back to ring-3. */
4049 VBOXSTRICTRC rc = hmR0SvmCheckForceFlags(pVCpu);
4050 if (rc != VINF_SUCCESS)
4051 return rc;
4052
4053 if (TRPMHasTrap(pVCpu))
4054 hmR0SvmTrpmTrapToPendingEvent(pVCpu);
4055 else if (!pVCpu->hm.s.Event.fPending)
4056 {
4057 rc = hmR0SvmEvaluatePendingEvent(pVCpu, pSvmTransient);
4058 if ( rc != VINF_SUCCESS
4059 || pSvmTransient->fIsNestedGuest != CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
4060 {
4061 /* If a nested-guest VM-exit occurred, bail. */
4062 if (pSvmTransient->fIsNestedGuest)
4063 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
4064 return rc;
4065 }
4066 }
4067
4068 /*
4069 * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.
4070 * Just do it in software, see @bugref{8411}.
4071 * NB: If we could continue a task switch exit we wouldn't need to do this.
4072 */
4073 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4074 if (RT_UNLIKELY( !g_fHmSvmFeatures
4075 && pVCpu->hm.s.Event.fPending
4076 && SVM_EVENT_GET_TYPE(pVCpu->hm.s.Event.u64IntInfo) == SVM_EVENT_NMI))
4077 return VINF_EM_RAW_INJECT_TRPM_EVENT;
4078
4079#ifdef HMSVM_SYNC_FULL_GUEST_STATE
4080 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
4081 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
4082#endif
4083
4084#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4085 /*
4086 * Set up the nested-guest VMCB for execution using hardware-assisted SVM.
4087 */
4088 if (pSvmTransient->fIsNestedGuest)
4089 hmR0SvmSetupVmcbNested(pVCpu);
4090#endif
4091
4092 /*
4093 * Export the guest state bits that are not shared with the host in any way as we can
4094 * longjmp or get preempted in the midst of exporting some of the state.
4095 */
4096 rc = hmR0SvmExportGuestState(pVCpu, pSvmTransient);
4097 AssertRCReturn(rc, rc);
4098 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
4099
4100 /* Ensure we've cached (and hopefully modified) the nested-guest VMCB for execution using hardware-assisted SVM. */
4101 Assert(!pSvmTransient->fIsNestedGuest || pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
4102
4103 /*
4104 * If we're not intercepting TPR changes in the guest, save the guest TPR before the
4105 * world-switch so we can update it on the way back if the guest changed the TPR.
4106 */
4107 if (pVCpu->hmr0.s.svm.fSyncVTpr)
4108 {
4109 Assert(!pSvmTransient->fIsNestedGuest);
4110 PCSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
4111 if (pVM->hm.s.fTprPatchingActive)
4112 pSvmTransient->u8GuestTpr = pVmcb->guest.u64LSTAR;
4113 else
4114 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
4115 }
4116
4117 /*
4118 * No longjmps to ring-3 from this point on!!!
4119 *
4120 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4121 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4122 */
4123 VMMRZCallRing3Disable(pVCpu);
4124
4125 /*
4126 * We disable interrupts so that we don't miss any interrupts that would flag preemption
4127 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
4128 * preemption disabled for a while. Since this is purly to aid the
4129 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
4130 * disable interrupt on NT.
4131 *
4132 * We need to check for force-flags that could've possible been altered since we last
4133 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
4134 * see @bugref{6398}).
4135 *
4136 * We also check a couple of other force-flags as a last opportunity to get the EMT back
4137 * to ring-3 before executing guest code.
4138 */
4139 pSvmTransient->fEFlags = ASMIntDisableFlags();
4140 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4141 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4142 {
4143 ASMSetFlags(pSvmTransient->fEFlags);
4144 VMMRZCallRing3Enable(pVCpu);
4145 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
4146 return VINF_EM_RAW_TO_R3;
4147 }
4148 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
4149 {
4150 ASMSetFlags(pSvmTransient->fEFlags);
4151 VMMRZCallRing3Enable(pVCpu);
4152 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);
4153 return VINF_EM_RAW_INTERRUPT;
4154 }
4155
4156 return VINF_SUCCESS;
4157}
4158
4159
4160/**
4161 * Prepares to run guest (or nested-guest) code in AMD-V and we've committed to
4162 * doing so.
4163 *
4164 * This means there is no backing out to ring-3 or anywhere else at this point.
4165 *
4166 * @param pVCpu The cross context virtual CPU structure.
4167 * @param pSvmTransient Pointer to the SVM transient structure.
4168 *
4169 * @remarks Called with preemption disabled.
4170 * @remarks No-long-jump zone!!!
4171 */
4172static void hmR0SvmPreRunGuestCommitted(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4173{
4174 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4175 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4176
4177 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4178 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
4179
4180 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4181 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
4182
4183 hmR0SvmInjectPendingEvent(pVCpu, pVmcb);
4184
4185 if (!CPUMIsGuestFPUStateActive(pVCpu))
4186 {
4187 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
4188 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
4189 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
4190 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
4191 }
4192
4193 /* Load the state shared between host and guest (FPU, debug). */
4194 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)
4195 hmR0SvmExportSharedState(pVCpu, pVmcb);
4196
4197 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT; /* Preemption might set this, nothing to do on AMD-V. */
4198 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
4199
4200 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
4201 RTCPUID const idHostCpu = pHostCpu->idCpu;
4202 bool const fMigratedHostCpu = idHostCpu != pVCpu->hmr0.s.idLastCpu;
4203
4204 /* Setup TSC offsetting. */
4205 if ( pSvmTransient->fUpdateTscOffsetting
4206 || fMigratedHostCpu)
4207 {
4208 hmR0SvmUpdateTscOffsetting(pVCpu, pVmcb);
4209 pSvmTransient->fUpdateTscOffsetting = false;
4210 }
4211
4212 /* Record statistics of how often we use TSC offsetting as opposed to intercepting RDTSC/P. */
4213 if (!(pVmcb->ctrl.u64InterceptCtrl & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP)))
4214 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4215 else
4216 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4217
4218 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
4219 if (fMigratedHostCpu)
4220 pVmcb->ctrl.u32VmcbCleanBits = 0;
4221
4222 /* Store status of the shared guest-host state at the time of VMRUN. */
4223 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
4224 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
4225
4226#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4227 uint8_t *pbMsrBitmap;
4228 if (!pSvmTransient->fIsNestedGuest)
4229 pbMsrBitmap = (uint8_t *)pVCpu->hmr0.s.svm.pvMsrBitmap;
4230 else
4231 {
4232 /** @todo We could perhaps optimize this by monitoring if the guest modifies its
4233 * MSRPM and only perform this if it changed also use EVEX.POR when it
4234 * does. */
4235 hmR0SvmMergeMsrpmNested(pHostCpu, pVCpu);
4236
4237 /* Update the nested-guest VMCB with the newly merged MSRPM (clean bits updated below). */
4238 pVmcb->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm;
4239 pbMsrBitmap = (uint8_t *)pHostCpu->n.svm.pvNstGstMsrpm;
4240 }
4241#else
4242 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
4243#endif
4244
4245 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
4246 /* Flush the appropriate tagged-TLB entries. */
4247 hmR0SvmFlushTaggedTlb(pHostCpu, pVCpu, pVmcb);
4248 Assert(pVCpu->hmr0.s.idLastCpu == idHostCpu);
4249
4250 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
4251
4252 TMNotifyStartOfExecution(pVM, pVCpu); /* Finally, notify TM to resume its clocks as we're about
4253 to start executing. */
4254
4255 /*
4256 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that RDTSCPs
4257 * (that don't cause exits) reads the guest MSR, see @bugref{3324}.
4258 *
4259 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
4260 */
4261 if ( g_CpumHostFeatures.s.fRdTscP
4262 && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
4263 {
4264 uint64_t const uGuestTscAux = CPUMGetGuestTscAux(pVCpu);
4265 pVCpu->hmr0.s.svm.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
4266 if (uGuestTscAux != pVCpu->hmr0.s.svm.u64HostTscAux)
4267 ASMWrMsr(MSR_K8_TSC_AUX, uGuestTscAux);
4268 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
4269 pSvmTransient->fRestoreTscAuxMsr = true;
4270 }
4271 else
4272 {
4273 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
4274 pSvmTransient->fRestoreTscAuxMsr = false;
4275 }
4276 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
4277
4278 /*
4279 * If VMCB Clean bits isn't supported by the CPU or exposed to the guest in the nested
4280 * virtualization case, mark all state-bits as dirty indicating to the CPU to re-load
4281 * from the VMCB.
4282 */
4283 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pSvmTransient->fIsNestedGuest);
4284 if (!fSupportsVmcbCleanBits)
4285 pVmcb->ctrl.u32VmcbCleanBits = 0;
4286}
4287
4288
4289/**
4290 * Wrapper for running the guest (or nested-guest) code in AMD-V.
4291 *
4292 * @returns VBox strict status code.
4293 * @param pVCpu The cross context virtual CPU structure.
4294 * @param HCPhysVmcb The host physical address of the VMCB.
4295 *
4296 * @remarks No-long-jump zone!!!
4297 */
4298DECLINLINE(int) hmR0SvmRunGuest(PVMCPUCC pVCpu, RTHCPHYS HCPhysVmcb)
4299{
4300 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
4301 pVCpu->cpum.GstCtx.fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
4302 return pVCpu->hmr0.s.svm.pfnVMRun(pVCpu->CTX_SUFF(pVM), pVCpu, HCPhysVmcb);
4303}
4304
4305
4306/**
4307 * Performs some essential restoration of state after running guest (or
4308 * nested-guest) code in AMD-V.
4309 *
4310 * @param pVCpu The cross context virtual CPU structure.
4311 * @param pSvmTransient Pointer to the SVM transient structure.
4312 * @param rcVMRun Return code of VMRUN.
4313 *
4314 * @remarks Called with interrupts disabled.
4315 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
4316 * unconditionally when it is safe to do so.
4317 */
4318static void hmR0SvmPostRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, VBOXSTRICTRC rcVMRun)
4319{
4320 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4321
4322 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
4323 ASMAtomicIncU32(&pVCpu->hmr0.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
4324
4325 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
4326 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
4327
4328 /* TSC read must be done early for maximum accuracy. */
4329 if (!(pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
4330 {
4331 if (!pSvmTransient->fIsNestedGuest)
4332 TMCpuTickSetLastSeen(pVCpu, pVCpu->hmr0.s.uTscExit + pVmcbCtrl->u64TSCOffset);
4333#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4334 else
4335 {
4336 /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMNotifySvmNstGstVmexit(). */
4337 uint64_t const uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, pVCpu->hmr0.s.uTscExit + pVmcbCtrl->u64TSCOffset);
4338 TMCpuTickSetLastSeen(pVCpu, uGstTsc);
4339 }
4340#endif
4341 }
4342
4343 if (pSvmTransient->fRestoreTscAuxMsr)
4344 {
4345 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
4346 CPUMSetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
4347 if (u64GuestTscAuxMsr != pVCpu->hmr0.s.svm.u64HostTscAux)
4348 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hmr0.s.svm.u64HostTscAux);
4349 }
4350
4351 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
4352 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4353 TMNotifyEndOfExecution(pVM, pVCpu, pVCpu->hmr0.s.uTscExit); /* Notify TM that the guest is no longer running. */
4354 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4355
4356 Assert(!(ASMGetFlags() & X86_EFL_IF));
4357 ASMSetFlags(pSvmTransient->fEFlags); /* Enable interrupts. */
4358 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
4359
4360 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
4361 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
4362 {
4363 Log4Func(("VMRUN failure: rcVMRun=%Rrc\n", VBOXSTRICTRC_VAL(rcVMRun)));
4364 return;
4365 }
4366
4367 pSvmTransient->u64ExitCode = pVmcbCtrl->u64ExitCode; /* Save the #VMEXIT reason. */
4368 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
4369 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
4370 pVmcbCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
4371
4372#ifdef HMSVM_SYNC_FULL_GUEST_STATE
4373 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4374 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
4375#else
4376 /*
4377 * Always import the following:
4378 *
4379 * - RIP for exit optimizations and evaluating event injection on re-entry.
4380 * - RFLAGS for evaluating event injection on VM re-entry and for exporting shared debug
4381 * state on preemption.
4382 * - Interrupt shadow, GIF for evaluating event injection on VM re-entry.
4383 * - CS for exit optimizations.
4384 * - RAX, RSP for simplifying assumptions on GPRs. All other GPRs are swapped by the
4385 * assembly switcher code.
4386 * - Shared state (only DR7 currently) for exporting shared debug state on preemption.
4387 */
4388 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP
4389 | CPUMCTX_EXTRN_RFLAGS
4390 | CPUMCTX_EXTRN_RAX
4391 | CPUMCTX_EXTRN_RSP
4392 | CPUMCTX_EXTRN_CS
4393 | CPUMCTX_EXTRN_HWVIRT
4394 | CPUMCTX_EXTRN_INHIBIT_INT
4395 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ
4396 | HMSVM_CPUMCTX_SHARED_STATE);
4397#endif
4398
4399 if ( pSvmTransient->u64ExitCode != SVM_EXIT_INVALID
4400 && pVCpu->hmr0.s.svm.fSyncVTpr)
4401 {
4402 Assert(!pSvmTransient->fIsNestedGuest);
4403 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
4404 if ( pVM->hm.s.fTprPatchingActive
4405 && (pVmcb->guest.u64LSTAR & 0xff) != pSvmTransient->u8GuestTpr)
4406 {
4407 int rc = APICSetTpr(pVCpu, pVmcb->guest.u64LSTAR & 0xff);
4408 AssertRC(rc);
4409 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
4410 }
4411 /* Sync TPR when we aren't intercepting CR8 writes. */
4412 else if (pSvmTransient->u8GuestTpr != pVmcbCtrl->IntCtrl.n.u8VTPR)
4413 {
4414 int rc = APICSetTpr(pVCpu, pVmcbCtrl->IntCtrl.n.u8VTPR << 4);
4415 AssertRC(rc);
4416 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
4417 }
4418 }
4419
4420#ifdef DEBUG_ramshankar
4421 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
4422 {
4423 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4424 hmR0SvmLogState(pVCpu, pVmcb, pVCpu->cpum.GstCtx, "hmR0SvmPostRunGuestNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR,
4425 0 /* uVerbose */);
4426 }
4427#endif
4428
4429 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
4430 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_SVM, pSvmTransient->u64ExitCode & EMEXIT_F_TYPE_MASK),
4431 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, pVCpu->hmr0.s.uTscExit);
4432}
4433
4434
4435/**
4436 * Runs the guest code using AMD-V.
4437 *
4438 * @returns Strict VBox status code.
4439 * @param pVCpu The cross context virtual CPU structure.
4440 * @param pcLoops Pointer to the number of executed loops.
4441 */
4442static VBOXSTRICTRC hmR0SvmRunGuestCodeNormal(PVMCPUCC pVCpu, uint32_t *pcLoops)
4443{
4444 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
4445 Assert(pcLoops);
4446 Assert(*pcLoops <= cMaxResumeLoops);
4447
4448 SVMTRANSIENT SvmTransient;
4449 RT_ZERO(SvmTransient);
4450 SvmTransient.fUpdateTscOffsetting = true;
4451 SvmTransient.pVmcb = pVCpu->hmr0.s.svm.pVmcb;
4452
4453 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_5;
4454 for (;;)
4455 {
4456 Assert(!HMR0SuspendPending());
4457 HMSVM_ASSERT_CPU_SAFE(pVCpu);
4458
4459 /* Preparatory work for running nested-guest code, this may force us to return to
4460 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4461 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4462 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
4463 if (rc != VINF_SUCCESS)
4464 break;
4465
4466 /*
4467 * No longjmps to ring-3 from this point on!!!
4468 *
4469 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4470 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4471 */
4472 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
4473 rc = hmR0SvmRunGuest(pVCpu, pVCpu->hmr0.s.svm.HCPhysVmcb);
4474
4475 /* Restore any residual host-state and save any bits shared between host and guest
4476 into the guest-CPU state. Re-enables interrupts! */
4477 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
4478
4479 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
4480 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
4481 {
4482 if (rc == VINF_SUCCESS)
4483 rc = VERR_SVM_INVALID_GUEST_STATE;
4484 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
4485 hmR0SvmReportWorldSwitchError(pVCpu, VBOXSTRICTRC_VAL(rc));
4486 break;
4487 }
4488
4489 /* Handle the #VMEXIT. */
4490 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4491 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
4492 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, SvmTransient.u64ExitCode, pVCpu->hmr0.s.svm.pVmcb);
4493 rc = hmR0SvmHandleExit(pVCpu, &SvmTransient);
4494 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
4495 if (rc != VINF_SUCCESS)
4496 break;
4497 if (++(*pcLoops) >= cMaxResumeLoops)
4498 {
4499 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4500 rc = VINF_EM_RAW_INTERRUPT;
4501 break;
4502 }
4503 }
4504
4505 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4506 return rc;
4507}
4508
4509
4510#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4511/**
4512 * Runs the nested-guest code using AMD-V.
4513 *
4514 * @returns Strict VBox status code.
4515 * @param pVCpu The cross context virtual CPU structure.
4516 * @param pcLoops Pointer to the number of executed loops. If we're switching
4517 * from the guest-code execution loop to this nested-guest
4518 * execution loop pass the remainder value, else pass 0.
4519 */
4520static VBOXSTRICTRC hmR0SvmRunGuestCodeNested(PVMCPUCC pVCpu, uint32_t *pcLoops)
4521{
4522 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4523 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
4524 Assert(pcLoops);
4525 Assert(*pcLoops <= pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops);
4526 /** @todo r=bird: Sharing this with ring-3 isn't safe in the long run, I fear... */
4527 RTHCPHYS const HCPhysVmcb = GVMMR0ConvertGVMPtr2HCPhys(pVCpu->pGVM, &pCtx->hwvirt.svm.Vmcb);
4528
4529 SVMTRANSIENT SvmTransient;
4530 RT_ZERO(SvmTransient);
4531 SvmTransient.fUpdateTscOffsetting = true;
4532 SvmTransient.pVmcb = &pCtx->hwvirt.svm.Vmcb;
4533 SvmTransient.fIsNestedGuest = true;
4534
4535 /* Setup pointer so PGM/IEM can query #VMEXIT auxiliary info. on demand in ring-0. */
4536 pVCpu->hmr0.s.svm.pSvmTransient = &SvmTransient;
4537
4538 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_4;
4539 for (;;)
4540 {
4541 Assert(!HMR0SuspendPending());
4542 HMSVM_ASSERT_CPU_SAFE(pVCpu);
4543
4544 /* Preparatory work for running nested-guest code, this may force us to return to
4545 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4546 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4547 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
4548 if ( rc != VINF_SUCCESS
4549 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
4550 break;
4551
4552 /*
4553 * No longjmps to ring-3 from this point on!!!
4554 *
4555 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4556 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4557 */
4558 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
4559
4560 rc = hmR0SvmRunGuest(pVCpu, HCPhysVmcb);
4561
4562 /* Restore any residual host-state and save any bits shared between host and guest
4563 into the guest-CPU state. Re-enables interrupts! */
4564 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
4565
4566 if (RT_LIKELY( rc == VINF_SUCCESS
4567 && SvmTransient.u64ExitCode != SVM_EXIT_INVALID))
4568 { /* extremely likely */ }
4569 else
4570 {
4571 /* VMRUN failed, shouldn't really happen, Guru. */
4572 if (rc != VINF_SUCCESS)
4573 break;
4574
4575 /* Invalid nested-guest state. Cause a #VMEXIT but assert on strict builds. */
4576 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4577 AssertMsgFailed(("Invalid nested-guest state. rc=%Rrc u64ExitCode=%#RX64\n", rc, SvmTransient.u64ExitCode));
4578 rc = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0, 0);
4579 break;
4580 }
4581
4582 /* Handle the #VMEXIT. */
4583 HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4584 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
4585 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, &pCtx->hwvirt.svm.Vmcb);
4586 rc = hmR0SvmHandleExitNested(pVCpu, &SvmTransient);
4587 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
4588 if (rc == VINF_SUCCESS)
4589 {
4590 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
4591 {
4592 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
4593 rc = VINF_SVM_VMEXIT;
4594 }
4595 else
4596 {
4597 if (++(*pcLoops) <= pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops)
4598 continue;
4599 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4600 rc = VINF_EM_RAW_INTERRUPT;
4601 }
4602 }
4603 else
4604 Assert(rc != VINF_SVM_VMEXIT);
4605 break;
4606 /** @todo NSTSVM: handle single-stepping. */
4607 }
4608
4609 /* Ensure #VMEXIT auxiliary info. is no longer available. */
4610 pVCpu->hmr0.s.svm.pSvmTransient = NULL;
4611
4612 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4613 return rc;
4614}
4615#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
4616
4617
4618/**
4619 * Checks if any expensive dtrace probes are enabled and we should go to the
4620 * debug loop.
4621 *
4622 * @returns true if we should use debug loop, false if not.
4623 */
4624static bool hmR0SvmAnyExpensiveProbesEnabled(void)
4625{
4626 /* It's probably faster to OR the raw 32-bit counter variables together.
4627 Since the variables are in an array and the probes are next to one
4628 another (more or less), we have good locality. So, better read
4629 eight-nine cache lines ever time and only have one conditional, than
4630 128+ conditionals, right? */
4631 return ( VBOXVMM_R0_HMSVM_VMEXIT_ENABLED_RAW() /* expensive too due to context */
4632 | VBOXVMM_XCPT_DE_ENABLED_RAW()
4633 | VBOXVMM_XCPT_DB_ENABLED_RAW()
4634 | VBOXVMM_XCPT_BP_ENABLED_RAW()
4635 | VBOXVMM_XCPT_OF_ENABLED_RAW()
4636 | VBOXVMM_XCPT_BR_ENABLED_RAW()
4637 | VBOXVMM_XCPT_UD_ENABLED_RAW()
4638 | VBOXVMM_XCPT_NM_ENABLED_RAW()
4639 | VBOXVMM_XCPT_DF_ENABLED_RAW()
4640 | VBOXVMM_XCPT_TS_ENABLED_RAW()
4641 | VBOXVMM_XCPT_NP_ENABLED_RAW()
4642 | VBOXVMM_XCPT_SS_ENABLED_RAW()
4643 | VBOXVMM_XCPT_GP_ENABLED_RAW()
4644 | VBOXVMM_XCPT_PF_ENABLED_RAW()
4645 | VBOXVMM_XCPT_MF_ENABLED_RAW()
4646 | VBOXVMM_XCPT_AC_ENABLED_RAW()
4647 | VBOXVMM_XCPT_XF_ENABLED_RAW()
4648 | VBOXVMM_XCPT_VE_ENABLED_RAW()
4649 | VBOXVMM_XCPT_SX_ENABLED_RAW()
4650 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
4651 | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
4652 ) != 0
4653 || ( VBOXVMM_INSTR_HALT_ENABLED_RAW()
4654 | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
4655 | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
4656 | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
4657 | VBOXVMM_INSTR_INVD_ENABLED_RAW()
4658 | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
4659 | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
4660 | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
4661 | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
4662 | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
4663 | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
4664 | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
4665 | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
4666 | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
4667 | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
4668 | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
4669 | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
4670 | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
4671 | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
4672 | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
4673 | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
4674 | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
4675 | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
4676 | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
4677 | VBOXVMM_INSTR_STR_ENABLED_RAW()
4678 | VBOXVMM_INSTR_LTR_ENABLED_RAW()
4679 //| VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
4680 | VBOXVMM_INSTR_RSM_ENABLED_RAW()
4681 //| VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
4682 //| VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
4683 //| VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
4684 //| VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
4685 | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
4686 | VBOXVMM_INSTR_SVM_VMRUN_ENABLED_RAW()
4687 | VBOXVMM_INSTR_SVM_VMLOAD_ENABLED_RAW()
4688 | VBOXVMM_INSTR_SVM_VMSAVE_ENABLED_RAW()
4689 | VBOXVMM_INSTR_SVM_STGI_ENABLED_RAW()
4690 | VBOXVMM_INSTR_SVM_CLGI_ENABLED_RAW()
4691 ) != 0
4692 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
4693 | VBOXVMM_EXIT_HALT_ENABLED_RAW()
4694 | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
4695 | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
4696 | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
4697 | VBOXVMM_EXIT_INVD_ENABLED_RAW()
4698 | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
4699 | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
4700 | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
4701 | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
4702 | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
4703 | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
4704 | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
4705 | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
4706 | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
4707 | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
4708 | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
4709 | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
4710 | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
4711 | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
4712 | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
4713 | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
4714 | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
4715 | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
4716 | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
4717 | VBOXVMM_EXIT_STR_ENABLED_RAW()
4718 | VBOXVMM_EXIT_LTR_ENABLED_RAW()
4719 //| VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
4720 | VBOXVMM_EXIT_RSM_ENABLED_RAW()
4721 //| VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
4722 //| VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
4723 //| VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
4724 //| VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
4725 | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
4726 | VBOXVMM_EXIT_SVM_VMRUN_ENABLED_RAW()
4727 | VBOXVMM_EXIT_SVM_VMLOAD_ENABLED_RAW()
4728 | VBOXVMM_EXIT_SVM_VMSAVE_ENABLED_RAW()
4729 | VBOXVMM_EXIT_SVM_STGI_ENABLED_RAW()
4730 | VBOXVMM_EXIT_SVM_CLGI_ENABLED_RAW()
4731 ) != 0;
4732}
4733
4734
4735/**
4736 * Runs the guest code using AMD-V.
4737 *
4738 * @returns Strict VBox status code.
4739 * @param pVCpu The cross context virtual CPU structure.
4740 */
4741VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPUCC pVCpu)
4742{
4743 AssertPtr(pVCpu);
4744 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4745 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4746 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
4747 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
4748
4749 uint32_t cLoops = 0;
4750 VBOXSTRICTRC rc;
4751 for (;;)
4752 {
4753#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4754 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
4755#else
4756 NOREF(pCtx);
4757 bool const fInNestedGuestMode = false;
4758#endif
4759 if (!fInNestedGuestMode)
4760 {
4761 if ( !pVCpu->hm.s.fUseDebugLoop
4762 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0SvmAnyExpensiveProbesEnabled())
4763 && !DBGFIsStepping(pVCpu)
4764 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
4765 rc = hmR0SvmRunGuestCodeNormal(pVCpu, &cLoops);
4766 else
4767 rc = hmR0SvmRunGuestCodeDebug(pVCpu, &cLoops);
4768 }
4769#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4770 else
4771 rc = hmR0SvmRunGuestCodeNested(pVCpu, &cLoops);
4772
4773 if (rc == VINF_SVM_VMRUN)
4774 {
4775 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
4776 continue;
4777 }
4778 if (rc == VINF_SVM_VMEXIT)
4779 {
4780 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
4781 continue;
4782 }
4783#endif
4784 break;
4785 }
4786
4787 /* Fixup error codes. */
4788 if (rc == VERR_EM_INTERPRETER)
4789 rc = VINF_EM_RAW_EMULATE_INSTR;
4790 else if (rc == VINF_EM_RESET)
4791 rc = VINF_EM_TRIPLE_FAULT;
4792
4793 /* Prepare to return to ring-3. This will remove longjmp notifications. */
4794 rc = hmR0SvmExitToRing3(pVCpu, rc);
4795 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
4796 Assert(!VMMR0AssertionIsNotificationSet(pVCpu));
4797 return rc;
4798}
4799
4800#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4801
4802/**
4803 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
4804 *
4805 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
4806 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO.
4807 */
4808static bool hmR0SvmIsIoInterceptSet(void *pvIoBitmap, PSVMIOIOEXITINFO pIoExitInfo)
4809{
4810 const uint16_t u16Port = pIoExitInfo->n.u16Port;
4811 const SVMIOIOTYPE enmIoType = (SVMIOIOTYPE)pIoExitInfo->n.u1Type;
4812 const uint8_t cbReg = (pIoExitInfo->u >> SVM_IOIO_OP_SIZE_SHIFT) & 7;
4813 const uint8_t cAddrSizeBits = ((pIoExitInfo->u >> SVM_IOIO_ADDR_SIZE_SHIFT) & 7) << 4;
4814 const uint8_t iEffSeg = pIoExitInfo->n.u3Seg;
4815 const bool fRep = pIoExitInfo->n.u1Rep;
4816 const bool fStrIo = pIoExitInfo->n.u1Str;
4817
4818 return CPUMIsSvmIoInterceptSet(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo,
4819 NULL /* pIoExitInfo */);
4820}
4821
4822
4823/**
4824 * Handles a nested-guest \#VMEXIT (for all EXITCODE values except
4825 * SVM_EXIT_INVALID).
4826 *
4827 * @returns VBox status code (informational status codes included).
4828 * @param pVCpu The cross context virtual CPU structure.
4829 * @param pSvmTransient Pointer to the SVM transient structure.
4830 */
4831static VBOXSTRICTRC hmR0SvmHandleExitNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4832{
4833 HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
4834 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
4835 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
4836
4837 /*
4838 * We import the complete state here because we use separate VMCBs for the guest and the
4839 * nested-guest, and the guest's VMCB is used after the #VMEXIT. We can only save/restore
4840 * the #VMEXIT specific state if we used the same VMCB for both guest and nested-guest.
4841 */
4842#define NST_GST_VMEXIT_CALL_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
4843 do { \
4844 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \
4845 return IEMExecSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
4846 } while (0)
4847
4848 /*
4849 * For all the #VMEXITs here we primarily figure out if the #VMEXIT is expected by the
4850 * nested-guest. If it isn't, it should be handled by the (outer) guest.
4851 */
4852 PSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
4853 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4854 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
4855 uint64_t const uExitCode = pVmcbNstGstCtrl->u64ExitCode;
4856 uint64_t const uExitInfo1 = pVmcbNstGstCtrl->u64ExitInfo1;
4857 uint64_t const uExitInfo2 = pVmcbNstGstCtrl->u64ExitInfo2;
4858
4859 Assert(uExitCode == pVmcbNstGstCtrl->u64ExitCode);
4860 switch (uExitCode)
4861 {
4862 case SVM_EXIT_CPUID:
4863 {
4864 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CPUID))
4865 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4866 return hmR0SvmExitCpuid(pVCpu, pSvmTransient);
4867 }
4868
4869 case SVM_EXIT_RDTSC:
4870 {
4871 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSC))
4872 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4873 return hmR0SvmExitRdtsc(pVCpu, pSvmTransient);
4874 }
4875
4876 case SVM_EXIT_RDTSCP:
4877 {
4878 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSCP))
4879 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4880 return hmR0SvmExitRdtscp(pVCpu, pSvmTransient);
4881 }
4882
4883 case SVM_EXIT_MONITOR:
4884 {
4885 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MONITOR))
4886 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4887 return hmR0SvmExitMonitor(pVCpu, pSvmTransient);
4888 }
4889
4890 case SVM_EXIT_MWAIT:
4891 {
4892 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MWAIT))
4893 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4894 return hmR0SvmExitMwait(pVCpu, pSvmTransient);
4895 }
4896
4897 case SVM_EXIT_HLT:
4898 {
4899 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_HLT))
4900 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4901 return hmR0SvmExitHlt(pVCpu, pSvmTransient);
4902 }
4903
4904 case SVM_EXIT_MSR:
4905 {
4906 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MSR_PROT))
4907 {
4908 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
4909 uint16_t offMsrpm;
4910 uint8_t uMsrpmBit;
4911 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
4912 if (RT_SUCCESS(rc))
4913 {
4914 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
4915 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
4916
4917 uint8_t const * const pbMsrBitmap = &pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm];
4918 bool const fInterceptRead = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit));
4919 bool const fInterceptWrite = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1));
4920
4921 if ( (fInterceptWrite && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
4922 || (fInterceptRead && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ))
4923 {
4924 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4925 }
4926 }
4927 else
4928 {
4929 /*
4930 * MSRs not covered by the MSRPM automatically cause an #VMEXIT.
4931 * See AMD-V spec. "15.11 MSR Intercepts".
4932 */
4933 Assert(rc == VERR_OUT_OF_RANGE);
4934 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4935 }
4936 }
4937 return hmR0SvmExitMsr(pVCpu, pSvmTransient);
4938 }
4939
4940 case SVM_EXIT_IOIO:
4941 {
4942 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT))
4943 {
4944 SVMIOIOEXITINFO IoExitInfo;
4945 IoExitInfo.u = pVmcbNstGst->ctrl.u64ExitInfo1;
4946 bool const fIntercept = hmR0SvmIsIoInterceptSet(pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap, &IoExitInfo);
4947 if (fIntercept)
4948 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4949 }
4950 return hmR0SvmExitIOInstr(pVCpu, pSvmTransient);
4951 }
4952
4953 case SVM_EXIT_XCPT_PF:
4954 {
4955 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4956 if (pVM->hmr0.s.fNestedPaging)
4957 {
4958 uint32_t const u32ErrCode = pVmcbNstGstCtrl->u64ExitInfo1;
4959 uint64_t const uFaultAddress = pVmcbNstGstCtrl->u64ExitInfo2;
4960
4961 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
4962 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
4963 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, u32ErrCode, uFaultAddress);
4964
4965 /* If the nested-guest is not intercepting #PFs, forward the #PF to the guest. */
4966 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2);
4967 hmR0SvmSetPendingXcptPF(pVCpu, u32ErrCode, uFaultAddress);
4968 return VINF_SUCCESS;
4969 }
4970 return hmR0SvmExitXcptPF(pVCpu, pSvmTransient);
4971 }
4972
4973 case SVM_EXIT_XCPT_UD:
4974 {
4975 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_UD))
4976 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4977 hmR0SvmSetPendingXcptUD(pVCpu);
4978 return VINF_SUCCESS;
4979 }
4980
4981 case SVM_EXIT_XCPT_MF:
4982 {
4983 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_MF))
4984 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4985 return hmR0SvmExitXcptMF(pVCpu, pSvmTransient);
4986 }
4987
4988 case SVM_EXIT_XCPT_DB:
4989 {
4990 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_DB))
4991 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4992 return hmR0SvmNestedExitXcptDB(pVCpu, pSvmTransient);
4993 }
4994
4995 case SVM_EXIT_XCPT_AC:
4996 {
4997 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_AC))
4998 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4999 return hmR0SvmExitXcptAC(pVCpu, pSvmTransient);
5000 }
5001
5002 case SVM_EXIT_XCPT_BP:
5003 {
5004 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_BP))
5005 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5006 return hmR0SvmNestedExitXcptBP(pVCpu, pSvmTransient);
5007 }
5008
5009 case SVM_EXIT_READ_CR0:
5010 case SVM_EXIT_READ_CR3:
5011 case SVM_EXIT_READ_CR4:
5012 {
5013 uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0;
5014 if (CPUMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr))
5015 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5016 return hmR0SvmExitReadCRx(pVCpu, pSvmTransient);
5017 }
5018
5019 case SVM_EXIT_CR0_SEL_WRITE:
5020 {
5021 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5022 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5023 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
5024 }
5025
5026 case SVM_EXIT_WRITE_CR0:
5027 case SVM_EXIT_WRITE_CR3:
5028 case SVM_EXIT_WRITE_CR4:
5029 case SVM_EXIT_WRITE_CR8: /* CR8 writes would go to the V_TPR rather than here, since we run with V_INTR_MASKING. */
5030 {
5031 uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;
5032 Log4Func(("Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2));
5033
5034 if (CPUMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr))
5035 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5036 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
5037 }
5038
5039 case SVM_EXIT_PAUSE:
5040 {
5041 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
5042 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5043 return hmR0SvmExitPause(pVCpu, pSvmTransient);
5044 }
5045
5046 case SVM_EXIT_VINTR:
5047 {
5048 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
5049 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5050 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
5051 }
5052
5053 case SVM_EXIT_INTR:
5054 case SVM_EXIT_NMI:
5055 case SVM_EXIT_SMI:
5056 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */
5057 {
5058 /*
5059 * We shouldn't direct physical interrupts, NMIs, SMIs to the nested-guest.
5060 *
5061 * Although we don't intercept SMIs, the nested-guest might. Therefore, we might
5062 * get an SMI #VMEXIT here so simply ignore rather than causing a corresponding
5063 * nested-guest #VMEXIT.
5064 *
5065 * We shall import the complete state here as we may cause #VMEXITs from ring-3
5066 * while trying to inject interrupts, see comment at the top of this function.
5067 */
5068 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_ALL);
5069 return hmR0SvmExitIntr(pVCpu, pSvmTransient);
5070 }
5071
5072 case SVM_EXIT_FERR_FREEZE:
5073 {
5074 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_FERR_FREEZE))
5075 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5076 return hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient);
5077 }
5078
5079 case SVM_EXIT_INVLPG:
5080 {
5081 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPG))
5082 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5083 return hmR0SvmExitInvlpg(pVCpu, pSvmTransient);
5084 }
5085
5086 case SVM_EXIT_WBINVD:
5087 {
5088 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_WBINVD))
5089 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5090 return hmR0SvmExitWbinvd(pVCpu, pSvmTransient);
5091 }
5092
5093 case SVM_EXIT_INVD:
5094 {
5095 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVD))
5096 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5097 return hmR0SvmExitInvd(pVCpu, pSvmTransient);
5098 }
5099
5100 case SVM_EXIT_RDPMC:
5101 {
5102 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDPMC))
5103 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5104 return hmR0SvmExitRdpmc(pVCpu, pSvmTransient);
5105 }
5106
5107 default:
5108 {
5109 switch (uExitCode)
5110 {
5111 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5112 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
5113 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
5114 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5115 {
5116 uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0;
5117 if (CPUMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr))
5118 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5119 return hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
5120 }
5121
5122 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5123 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
5124 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
5125 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5126 {
5127 uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0;
5128 if (CPUMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr))
5129 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5130 return hmR0SvmExitWriteDRx(pVCpu, pSvmTransient);
5131 }
5132
5133 case SVM_EXIT_XCPT_DE:
5134 /* SVM_EXIT_XCPT_DB: */ /* Handled above. */
5135 /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */
5136 /* SVM_EXIT_XCPT_BP: */ /* Handled above. */
5137 case SVM_EXIT_XCPT_OF:
5138 case SVM_EXIT_XCPT_BR:
5139 /* SVM_EXIT_XCPT_UD: */ /* Handled above. */
5140 case SVM_EXIT_XCPT_NM:
5141 case SVM_EXIT_XCPT_DF:
5142 case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
5143 case SVM_EXIT_XCPT_TS:
5144 case SVM_EXIT_XCPT_NP:
5145 case SVM_EXIT_XCPT_SS:
5146 case SVM_EXIT_XCPT_GP:
5147 /* SVM_EXIT_XCPT_PF: */ /* Handled above. */
5148 case SVM_EXIT_XCPT_15: /* Reserved. */
5149 /* SVM_EXIT_XCPT_MF: */ /* Handled above. */
5150 /* SVM_EXIT_XCPT_AC: */ /* Handled above. */
5151 case SVM_EXIT_XCPT_MC:
5152 case SVM_EXIT_XCPT_XF:
5153 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
5154 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
5155 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
5156 {
5157 uint8_t const uVector = uExitCode - SVM_EXIT_XCPT_0;
5158 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector))
5159 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5160 return hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient);
5161 }
5162
5163 case SVM_EXIT_XSETBV:
5164 {
5165 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_XSETBV))
5166 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5167 return hmR0SvmExitXsetbv(pVCpu, pSvmTransient);
5168 }
5169
5170 case SVM_EXIT_TASK_SWITCH:
5171 {
5172 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_TASK_SWITCH))
5173 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5174 return hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient);
5175 }
5176
5177 case SVM_EXIT_IRET:
5178 {
5179 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IRET))
5180 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5181 return hmR0SvmExitIret(pVCpu, pSvmTransient);
5182 }
5183
5184 case SVM_EXIT_SHUTDOWN:
5185 {
5186 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN))
5187 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5188 return hmR0SvmExitShutdown(pVCpu, pSvmTransient);
5189 }
5190
5191 case SVM_EXIT_VMMCALL:
5192 {
5193 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMMCALL))
5194 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5195 return hmR0SvmExitVmmCall(pVCpu, pSvmTransient);
5196 }
5197
5198 case SVM_EXIT_CLGI:
5199 {
5200 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CLGI))
5201 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5202 return hmR0SvmExitClgi(pVCpu, pSvmTransient);
5203 }
5204
5205 case SVM_EXIT_STGI:
5206 {
5207 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_STGI))
5208 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5209 return hmR0SvmExitStgi(pVCpu, pSvmTransient);
5210 }
5211
5212 case SVM_EXIT_VMLOAD:
5213 {
5214 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMLOAD))
5215 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5216 return hmR0SvmExitVmload(pVCpu, pSvmTransient);
5217 }
5218
5219 case SVM_EXIT_VMSAVE:
5220 {
5221 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMSAVE))
5222 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5223 return hmR0SvmExitVmsave(pVCpu, pSvmTransient);
5224 }
5225
5226 case SVM_EXIT_INVLPGA:
5227 {
5228 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPGA))
5229 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5230 return hmR0SvmExitInvlpga(pVCpu, pSvmTransient);
5231 }
5232
5233 case SVM_EXIT_VMRUN:
5234 {
5235 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMRUN))
5236 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5237 return hmR0SvmExitVmrun(pVCpu, pSvmTransient);
5238 }
5239
5240 case SVM_EXIT_RSM:
5241 {
5242 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RSM))
5243 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5244 hmR0SvmSetPendingXcptUD(pVCpu);
5245 return VINF_SUCCESS;
5246 }
5247
5248 case SVM_EXIT_SKINIT:
5249 {
5250 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SKINIT))
5251 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5252 hmR0SvmSetPendingXcptUD(pVCpu);
5253 return VINF_SUCCESS;
5254 }
5255
5256 case SVM_EXIT_NPF:
5257 {
5258 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
5259 return hmR0SvmExitNestedPF(pVCpu, pSvmTransient);
5260 }
5261
5262 case SVM_EXIT_INIT: /* We shouldn't get INIT signals while executing a nested-guest. */
5263 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
5264
5265 default:
5266 {
5267 AssertMsgFailed(("hmR0SvmHandleExitNested: Unknown exit code %#x\n", pSvmTransient->u64ExitCode));
5268 pVCpu->hm.s.u32HMError = pSvmTransient->u64ExitCode;
5269 return VERR_SVM_UNKNOWN_EXIT;
5270 }
5271 }
5272 }
5273 }
5274 /* not reached */
5275
5276# undef NST_GST_VMEXIT_CALL_RET
5277}
5278
5279#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5280
5281/** @def VMEXIT_CALL_RET
5282 * Used by hmR0SvmHandleExit and hmR0SvmDebugHandleExit
5283 */
5284#ifdef DEBUG_ramshankar
5285# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) \
5286 do { \
5287 if ((a_fDbg) == 1) \
5288 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \
5289 int rc = a_CallExpr; \
5290 if ((a_fDbg) == 1) \
5291 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
5292 return rc; \
5293 } while (0)
5294#else
5295# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) return a_CallExpr
5296#endif
5297
5298/**
5299 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
5300 *
5301 * @returns Strict VBox status code (informational status codes included).
5302 * @param pVCpu The cross context virtual CPU structure.
5303 * @param pSvmTransient Pointer to the SVM transient structure.
5304 */
5305static VBOXSTRICTRC hmR0SvmHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5306{
5307 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
5308 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
5309
5310 /*
5311 * The ordering of the case labels is based on most-frequently-occurring #VMEXITs
5312 * for most guests under normal workloads (for some definition of "normal").
5313 */
5314 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
5315 switch (uExitCode)
5316 {
5317 case SVM_EXIT_NPF: VMEXIT_CALL_RET(0, hmR0SvmExitNestedPF(pVCpu, pSvmTransient));
5318 case SVM_EXIT_IOIO: VMEXIT_CALL_RET(0, hmR0SvmExitIOInstr(pVCpu, pSvmTransient));
5319 case SVM_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0SvmExitRdtsc(pVCpu, pSvmTransient));
5320 case SVM_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0SvmExitRdtscp(pVCpu, pSvmTransient));
5321 case SVM_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0SvmExitCpuid(pVCpu, pSvmTransient));
5322 case SVM_EXIT_XCPT_PF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptPF(pVCpu, pSvmTransient));
5323 case SVM_EXIT_MSR: VMEXIT_CALL_RET(0, hmR0SvmExitMsr(pVCpu, pSvmTransient));
5324 case SVM_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0SvmExitMonitor(pVCpu, pSvmTransient));
5325 case SVM_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0SvmExitMwait(pVCpu, pSvmTransient));
5326 case SVM_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0SvmExitHlt(pVCpu, pSvmTransient));
5327
5328 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */
5329 case SVM_EXIT_INTR:
5330 case SVM_EXIT_NMI: VMEXIT_CALL_RET(0, hmR0SvmExitIntr(pVCpu, pSvmTransient));
5331
5332 case SVM_EXIT_READ_CR0:
5333 case SVM_EXIT_READ_CR3:
5334 case SVM_EXIT_READ_CR4: VMEXIT_CALL_RET(0, hmR0SvmExitReadCRx(pVCpu, pSvmTransient));
5335
5336 case SVM_EXIT_CR0_SEL_WRITE:
5337 case SVM_EXIT_WRITE_CR0:
5338 case SVM_EXIT_WRITE_CR3:
5339 case SVM_EXIT_WRITE_CR4:
5340 case SVM_EXIT_WRITE_CR8: VMEXIT_CALL_RET(0, hmR0SvmExitWriteCRx(pVCpu, pSvmTransient));
5341
5342 case SVM_EXIT_VINTR: VMEXIT_CALL_RET(0, hmR0SvmExitVIntr(pVCpu, pSvmTransient));
5343 case SVM_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0SvmExitPause(pVCpu, pSvmTransient));
5344 case SVM_EXIT_VMMCALL: VMEXIT_CALL_RET(0, hmR0SvmExitVmmCall(pVCpu, pSvmTransient));
5345 case SVM_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpg(pVCpu, pSvmTransient));
5346 case SVM_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0SvmExitWbinvd(pVCpu, pSvmTransient));
5347 case SVM_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0SvmExitInvd(pVCpu, pSvmTransient));
5348 case SVM_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0SvmExitRdpmc(pVCpu, pSvmTransient));
5349 case SVM_EXIT_IRET: VMEXIT_CALL_RET(0, hmR0SvmExitIret(pVCpu, pSvmTransient));
5350 case SVM_EXIT_XCPT_DE: VMEXIT_CALL_RET(0, hmR0SvmExitXcptDE(pVCpu, pSvmTransient));
5351 case SVM_EXIT_XCPT_UD: VMEXIT_CALL_RET(0, hmR0SvmExitXcptUD(pVCpu, pSvmTransient));
5352 case SVM_EXIT_XCPT_MF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptMF(pVCpu, pSvmTransient));
5353 case SVM_EXIT_XCPT_DB: VMEXIT_CALL_RET(0, hmR0SvmExitXcptDB(pVCpu, pSvmTransient));
5354 case SVM_EXIT_XCPT_AC: VMEXIT_CALL_RET(0, hmR0SvmExitXcptAC(pVCpu, pSvmTransient));
5355 case SVM_EXIT_XCPT_BP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptBP(pVCpu, pSvmTransient));
5356 case SVM_EXIT_XCPT_GP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptGP(pVCpu, pSvmTransient));
5357 case SVM_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0SvmExitXsetbv(pVCpu, pSvmTransient));
5358 case SVM_EXIT_FERR_FREEZE: VMEXIT_CALL_RET(0, hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient));
5359
5360 default:
5361 {
5362 switch (pSvmTransient->u64ExitCode)
5363 {
5364 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5365 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
5366 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
5367 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5368 VMEXIT_CALL_RET(0, hmR0SvmExitReadDRx(pVCpu, pSvmTransient));
5369
5370 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5371 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
5372 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
5373 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5374 VMEXIT_CALL_RET(0, hmR0SvmExitWriteDRx(pVCpu, pSvmTransient));
5375
5376 case SVM_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient));
5377 case SVM_EXIT_SHUTDOWN: VMEXIT_CALL_RET(0, hmR0SvmExitShutdown(pVCpu, pSvmTransient));
5378
5379 case SVM_EXIT_SMI:
5380 case SVM_EXIT_INIT:
5381 {
5382 /*
5383 * We don't intercept SMIs. As for INIT signals, it really shouldn't ever happen here.
5384 * If it ever does, we want to know about it so log the exit code and bail.
5385 */
5386 VMEXIT_CALL_RET(0, hmR0SvmExitUnexpected(pVCpu, pSvmTransient));
5387 }
5388
5389#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5390 case SVM_EXIT_CLGI: VMEXIT_CALL_RET(0, hmR0SvmExitClgi(pVCpu, pSvmTransient));
5391 case SVM_EXIT_STGI: VMEXIT_CALL_RET(0, hmR0SvmExitStgi(pVCpu, pSvmTransient));
5392 case SVM_EXIT_VMLOAD: VMEXIT_CALL_RET(0, hmR0SvmExitVmload(pVCpu, pSvmTransient));
5393 case SVM_EXIT_VMSAVE: VMEXIT_CALL_RET(0, hmR0SvmExitVmsave(pVCpu, pSvmTransient));
5394 case SVM_EXIT_INVLPGA: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpga(pVCpu, pSvmTransient));
5395 case SVM_EXIT_VMRUN: VMEXIT_CALL_RET(0, hmR0SvmExitVmrun(pVCpu, pSvmTransient));
5396#else
5397 case SVM_EXIT_CLGI:
5398 case SVM_EXIT_STGI:
5399 case SVM_EXIT_VMLOAD:
5400 case SVM_EXIT_VMSAVE:
5401 case SVM_EXIT_INVLPGA:
5402 case SVM_EXIT_VMRUN:
5403#endif
5404 case SVM_EXIT_RSM:
5405 case SVM_EXIT_SKINIT:
5406 {
5407 hmR0SvmSetPendingXcptUD(pVCpu);
5408 return VINF_SUCCESS;
5409 }
5410
5411 /*
5412 * The remaining should only be possible when debugging or dtracing.
5413 */
5414 case SVM_EXIT_XCPT_DE:
5415 /* SVM_EXIT_XCPT_DB: */ /* Handled above. */
5416 /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */
5417 /* SVM_EXIT_XCPT_BP: */ /* Handled above. */
5418 case SVM_EXIT_XCPT_OF:
5419 case SVM_EXIT_XCPT_BR:
5420 /* SVM_EXIT_XCPT_UD: */ /* Handled above. */
5421 case SVM_EXIT_XCPT_NM:
5422 case SVM_EXIT_XCPT_DF:
5423 case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
5424 case SVM_EXIT_XCPT_TS:
5425 case SVM_EXIT_XCPT_NP:
5426 case SVM_EXIT_XCPT_SS:
5427 /* SVM_EXIT_XCPT_GP: */ /* Handled above. */
5428 /* SVM_EXIT_XCPT_PF: */
5429 case SVM_EXIT_XCPT_15: /* Reserved. */
5430 /* SVM_EXIT_XCPT_MF: */ /* Handled above. */
5431 /* SVM_EXIT_XCPT_AC: */ /* Handled above. */
5432 case SVM_EXIT_XCPT_MC:
5433 case SVM_EXIT_XCPT_XF:
5434 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
5435 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
5436 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
5437 VMEXIT_CALL_RET(0, hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient));
5438
5439 case SVM_EXIT_SWINT: VMEXIT_CALL_RET(0, hmR0SvmExitSwInt(pVCpu, pSvmTransient));
5440 case SVM_EXIT_TR_READ: VMEXIT_CALL_RET(0, hmR0SvmExitTrRead(pVCpu, pSvmTransient));
5441 case SVM_EXIT_TR_WRITE: VMEXIT_CALL_RET(0, hmR0SvmExitTrWrite(pVCpu, pSvmTransient)); /* Also OS/2 TLB workaround. */
5442
5443 default:
5444 {
5445 AssertMsgFailed(("hmR0SvmHandleExit: Unknown exit code %#RX64\n", uExitCode));
5446 pVCpu->hm.s.u32HMError = uExitCode;
5447 return VERR_SVM_UNKNOWN_EXIT;
5448 }
5449 }
5450 }
5451 }
5452 /* not reached */
5453}
5454
5455
5456/** @name Execution loop for single stepping, DBGF events and expensive Dtrace probes.
5457 *
5458 * The following few functions and associated structure contains the bloat
5459 * necessary for providing detailed debug events and dtrace probes as well as
5460 * reliable host side single stepping. This works on the principle of
5461 * "subclassing" the normal execution loop and workers. We replace the loop
5462 * method completely and override selected helpers to add necessary adjustments
5463 * to their core operation.
5464 *
5465 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
5466 * any performance for debug and analysis features.
5467 *
5468 * @{
5469 */
5470
5471/**
5472 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
5473 * the debug run loop.
5474 */
5475typedef struct SVMRUNDBGSTATE
5476{
5477 /** The initial SVMVMCBCTRL::u64InterceptCtrl value (helps with restore). */
5478 uint64_t bmInterceptInitial;
5479 /** The initial SVMVMCBCTRL::u32InterceptXcpt value (helps with restore). */
5480 uint32_t bmXcptInitial;
5481 /** The initial SVMVMCBCTRL::u16InterceptRdCRx value (helps with restore). */
5482 uint16_t bmInterceptRdCRxInitial;
5483 /** The initial SVMVMCBCTRL::u16InterceptWrCRx value (helps with restore). */
5484 uint16_t bmInterceptWrCRxInitial;
5485 /** The initial SVMVMCBCTRL::u16InterceptRdDRx value (helps with restore). */
5486 uint16_t bmInterceptRdDRxInitial;
5487 /** The initial SVMVMCBCTRL::u16InterceptWrDRx value (helps with restore). */
5488 uint16_t bmInterceptWrDRxInitial;
5489
5490 /** Whether we've actually modified the intercept control qword. */
5491 bool fModifiedInterceptCtrl : 1;
5492 /** Whether we've actually modified the exception bitmap. */
5493 bool fModifiedXcptBitmap : 1;
5494 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptRdCRx. */
5495 bool fModifiedInterceptRdCRx : 1;
5496 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptWrCRx. */
5497 bool fModifiedInterceptWrCRx : 1;
5498 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptRdDRx. */
5499 bool fModifiedInterceptRdDRx : 1;
5500 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptWrDRx. */
5501 bool fModifiedInterceptWrDRx : 1;
5502
5503 /** The CS we started executing with. */
5504 uint16_t uCsStart;
5505 /** The RIP we started executing at. This is for detecting that we stepped. */
5506 uint64_t uRipStart;
5507
5508 /** The sequence number of the Dtrace provider settings the state was
5509 * configured against. */
5510 uint32_t uDtraceSettingsSeqNo;
5511 /** Extra stuff we need in SVMVMCBCTRL::u32InterceptXcpt. */
5512 uint32_t bmXcptExtra;
5513 /** Extra stuff we need in SVMVMCBCTRL::u64InterceptCtrl. */
5514 uint64_t bmInterceptExtra;
5515 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptRdCRx. */
5516 uint16_t bmInterceptRdCRxExtra;
5517 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptWrCRx. */
5518 uint16_t bmInterceptWrCRxExtra;
5519 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptRdDRx. */
5520 uint16_t bmInterceptRdDRxExtra;
5521 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptWrDRx. */
5522 uint16_t bmInterceptWrDRxExtra;
5523 /** VM-exits to check (one bit per VM-exit). */
5524 uint32_t bmExitsToCheck[33];
5525} SVMRUNDBGSTATE;
5526AssertCompileMemberSize(SVMRUNDBGSTATE, bmExitsToCheck, (SVM_EXIT_MAX + 1 + 31) / 32 * 4);
5527typedef SVMRUNDBGSTATE *PSVMRUNDBGSTATE;
5528
5529
5530/**
5531 * Initializes the SVMRUNDBGSTATE structure.
5532 *
5533 * @param pVCpu The cross context virtual CPU structure of the
5534 * calling EMT.
5535 * @param pSvmTransient The SVM-transient structure.
5536 * @param pDbgState The debug state to initialize.
5537 */
5538static void hmR0SvmRunDebugStateInit(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5539{
5540 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
5541 pDbgState->bmInterceptInitial = pVmcb->ctrl.u64InterceptCtrl;
5542 pDbgState->bmXcptInitial = pVmcb->ctrl.u32InterceptXcpt;
5543 pDbgState->bmInterceptRdCRxInitial = pVmcb->ctrl.u16InterceptRdCRx;
5544 pDbgState->bmInterceptWrCRxInitial = pVmcb->ctrl.u16InterceptWrCRx;
5545 pDbgState->bmInterceptRdDRxInitial = pVmcb->ctrl.u16InterceptRdDRx;
5546 pDbgState->bmInterceptWrDRxInitial = pVmcb->ctrl.u16InterceptWrDRx;
5547
5548 pDbgState->fModifiedInterceptCtrl = false;
5549 pDbgState->fModifiedXcptBitmap = false;
5550 pDbgState->fModifiedInterceptRdCRx = false;
5551 pDbgState->fModifiedInterceptWrCRx = false;
5552 pDbgState->fModifiedInterceptRdDRx = false;
5553 pDbgState->fModifiedInterceptWrDRx = false;
5554
5555 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
5556 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
5557
5558 /* We don't really need to zero these. */
5559 pDbgState->bmInterceptExtra = 0;
5560 pDbgState->bmXcptExtra = 0;
5561 pDbgState->bmInterceptRdCRxExtra = 0;
5562 pDbgState->bmInterceptWrCRxExtra = 0;
5563 pDbgState->bmInterceptRdDRxExtra = 0;
5564 pDbgState->bmInterceptWrDRxExtra = 0;
5565}
5566
5567
5568/**
5569 * Updates the VMCB fields with changes requested by @a pDbgState.
5570 *
5571 * This is performed after hmR0SvmPreRunGuestDebugStateUpdate as well
5572 * immediately before executing guest code, i.e. when interrupts are disabled.
5573 * We don't check status codes here as we cannot easily assert or return in the
5574 * latter case.
5575 *
5576 * @param pSvmTransient The SVM-transient structure.
5577 * @param pDbgState The debug state.
5578 */
5579static void hmR0SvmPreRunGuestDebugStateApply(PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5580{
5581 /*
5582 * Ensure desired flags in VMCS control fields are set.
5583 */
5584 PSVMVMCB const pVmcb = pSvmTransient->pVmcb;
5585#define ADD_EXTRA_INTERCEPTS(a_VmcbCtrlField, a_bmExtra, a_fModified) do { \
5586 if ((pVmcb->ctrl. a_VmcbCtrlField & (a_bmExtra)) != (a_bmExtra)) \
5587 { \
5588 pVmcb->ctrl. a_VmcbCtrlField |= (a_bmExtra); \
5589 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; \
5590 Log6Func((#a_VmcbCtrlField ": %#RX64\n", pVmcb->ctrl. a_VmcbCtrlField)); \
5591 (a_fModified) = true; \
5592 } \
5593 } while (0)
5594 ADD_EXTRA_INTERCEPTS(u64InterceptCtrl, pDbgState->bmInterceptExtra, pDbgState->fModifiedInterceptCtrl);
5595 ADD_EXTRA_INTERCEPTS(u32InterceptXcpt, pDbgState->bmXcptExtra, pDbgState->fModifiedXcptBitmap);
5596 ADD_EXTRA_INTERCEPTS(u16InterceptRdCRx, pDbgState->bmInterceptRdCRxExtra, pDbgState->fModifiedInterceptRdCRx);
5597 ADD_EXTRA_INTERCEPTS(u16InterceptWrCRx, pDbgState->bmInterceptWrCRxExtra, pDbgState->fModifiedInterceptWrCRx);
5598 ADD_EXTRA_INTERCEPTS(u16InterceptRdDRx, pDbgState->bmInterceptRdDRxExtra, pDbgState->fModifiedInterceptRdDRx);
5599 ADD_EXTRA_INTERCEPTS(u16InterceptWrDRx, pDbgState->bmInterceptWrDRxExtra, pDbgState->fModifiedInterceptWrDRx);
5600#undef ADD_EXTRA_INTERCEPTS
5601}
5602
5603
5604/**
5605 * Restores VMCB fields that were changed by hmR0SvmPreRunGuestDebugStateApply
5606 * for re-entry next time around.
5607 *
5608 * @param pSvmTransient The SVM-transient structure.
5609 * @param pDbgState The debug state.
5610 */
5611static void hmR0SvmRunDebugStateRevert(PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5612{
5613 /*
5614 * Restore VM-exit control settings as we may not reenter this function the
5615 * next time around.
5616 */
5617 PSVMVMCB const pVmcb = pSvmTransient->pVmcb;
5618
5619#define RESTORE_INTERCEPTS(a_VmcbCtrlField, a_bmInitial, a_fModified) do { \
5620 if ((a_fModified)) \
5621 { \
5622 pVmcb->ctrl. a_VmcbCtrlField = (a_bmInitial); \
5623 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; \
5624 } \
5625 } while (0)
5626 RESTORE_INTERCEPTS(u64InterceptCtrl, pDbgState->bmInterceptInitial, pDbgState->fModifiedInterceptCtrl);
5627 RESTORE_INTERCEPTS(u32InterceptXcpt, pDbgState->bmXcptInitial, pDbgState->fModifiedXcptBitmap);
5628 RESTORE_INTERCEPTS(u16InterceptRdCRx, pDbgState->bmInterceptRdCRxInitial, pDbgState->fModifiedInterceptRdCRx);
5629 RESTORE_INTERCEPTS(u16InterceptWrCRx, pDbgState->bmInterceptWrCRxInitial, pDbgState->fModifiedInterceptWrCRx);
5630 RESTORE_INTERCEPTS(u16InterceptRdDRx, pDbgState->bmInterceptRdDRxInitial, pDbgState->fModifiedInterceptRdDRx);
5631 RESTORE_INTERCEPTS(u16InterceptWrDRx, pDbgState->bmInterceptWrDRxInitial, pDbgState->fModifiedInterceptWrDRx);
5632#undef RESTORE_INTERCEPTS
5633}
5634
5635
5636/**
5637 * Configures VM-exit controls for current DBGF and DTrace settings.
5638 *
5639 * This updates @a pDbgState and the VMCB execution control fields (in the debug
5640 * state) to reflect the necessary VM-exits demanded by DBGF and DTrace.
5641 *
5642 * @param pVCpu The cross context virtual CPU structure.
5643 * @param pSvmTransient The SVM-transient structure. May update
5644 * fUpdatedTscOffsettingAndPreemptTimer.
5645 * @param pDbgState The debug state.
5646 */
5647static void hmR0SvmPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5648{
5649 /*
5650 * Take down the dtrace serial number so we can spot changes.
5651 */
5652 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
5653 ASMCompilerBarrier();
5654
5655 /*
5656 * Clear data members that we'll be rebuilding here.
5657 */
5658 pDbgState->bmXcptExtra = 0;
5659 pDbgState->bmInterceptExtra = 0;
5660 pDbgState->bmInterceptRdCRxExtra = 0;
5661 pDbgState->bmInterceptWrCRxExtra = 0;
5662 pDbgState->bmInterceptRdDRxExtra = 0;
5663 pDbgState->bmInterceptWrDRxExtra = 0;
5664 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
5665 pDbgState->bmExitsToCheck[i] = 0;
5666
5667 /*
5668 * Software interrupts (INT XXh)
5669 */
5670 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5671 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
5672 || VBOXVMM_INT_SOFTWARE_ENABLED())
5673 {
5674 pDbgState->bmInterceptExtra |= SVM_CTRL_INTERCEPT_INTN;
5675 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_SWINT);
5676 }
5677
5678 /*
5679 * INT3 breakpoints - triggered by #BP exceptions.
5680 */
5681 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
5682 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
5683
5684 /*
5685 * Exception bitmap and XCPT events+probes.
5686 */
5687#define SET_XCPT(a_iXcpt) do { \
5688 pDbgState->bmXcptExtra |= RT_BIT_32(a_iXcpt); \
5689 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_XCPT_0 + (a_iXcpt)); \
5690 } while (0)
5691
5692 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
5693 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
5694 SET_XCPT(iXcpt);
5695
5696 if (VBOXVMM_XCPT_DE_ENABLED()) SET_XCPT(X86_XCPT_DE);
5697 if (VBOXVMM_XCPT_DB_ENABLED()) SET_XCPT(X86_XCPT_DB);
5698 if (VBOXVMM_XCPT_BP_ENABLED()) SET_XCPT(X86_XCPT_BP);
5699 if (VBOXVMM_XCPT_OF_ENABLED()) SET_XCPT(X86_XCPT_OF);
5700 if (VBOXVMM_XCPT_BR_ENABLED()) SET_XCPT(X86_XCPT_BR);
5701 if (VBOXVMM_XCPT_UD_ENABLED()) SET_XCPT(X86_XCPT_UD);
5702 if (VBOXVMM_XCPT_NM_ENABLED()) SET_XCPT(X86_XCPT_NM);
5703 if (VBOXVMM_XCPT_DF_ENABLED()) SET_XCPT(X86_XCPT_DF);
5704 if (VBOXVMM_XCPT_TS_ENABLED()) SET_XCPT(X86_XCPT_TS);
5705 if (VBOXVMM_XCPT_NP_ENABLED()) SET_XCPT(X86_XCPT_NP);
5706 if (VBOXVMM_XCPT_SS_ENABLED()) SET_XCPT(X86_XCPT_SS);
5707 if (VBOXVMM_XCPT_GP_ENABLED()) SET_XCPT(X86_XCPT_GP);
5708 if (VBOXVMM_XCPT_PF_ENABLED()) SET_XCPT(X86_XCPT_PF);
5709 if (VBOXVMM_XCPT_MF_ENABLED()) SET_XCPT(X86_XCPT_MF);
5710 if (VBOXVMM_XCPT_AC_ENABLED()) SET_XCPT(X86_XCPT_AC);
5711 if (VBOXVMM_XCPT_XF_ENABLED()) SET_XCPT(X86_XCPT_XF);
5712 if (VBOXVMM_XCPT_VE_ENABLED()) SET_XCPT(X86_XCPT_VE);
5713 if (VBOXVMM_XCPT_SX_ENABLED()) SET_XCPT(X86_XCPT_SX);
5714
5715#undef SET_XCPT
5716
5717 /*
5718 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
5719 *
5720 * Note! This is the reverse of what hmR0SvmHandleExitDtraceEvents does.
5721 * So, when adding/changing/removing please don't forget to update it.
5722 *
5723 * Some of the macros are picking up local variables to save horizontal space,
5724 * (being able to see it in a table is the lesser evil here).
5725 */
5726#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
5727 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
5728 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
5729#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
5730 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
5731 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
5732 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
5733 } else do { } while (0)
5734#define SET_INCP_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fInterceptCtrl) \
5735 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
5736 { \
5737 (pDbgState)->bmInterceptExtra |= (a_fInterceptCtrl); \
5738 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
5739 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
5740 } else do { } while (0)
5741
5742 /** @todo double check these */
5743 /** @todo Check what more AMD-V specific we can intercept. */
5744 //SET_INCP_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, SVM_EXIT_TASK_SWITCH, SVM_CTRL_INTERCEPT_TASK_SWITCH);
5745 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, SVM_EXIT_TASK_SWITCH);
5746 SET_INCP_XBM_IF_EITHER_EN(INSTR_VMM_CALL, SVM_EXIT_VMMCALL, SVM_CTRL_INTERCEPT_VMMCALL);
5747 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, SVM_EXIT_VMMCALL);
5748 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMRUN, SVM_EXIT_VMRUN, SVM_CTRL_INTERCEPT_VMRUN);
5749 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMRUN, SVM_EXIT_VMRUN);
5750 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMLOAD, SVM_EXIT_VMLOAD, SVM_CTRL_INTERCEPT_VMLOAD);
5751 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMLOAD, SVM_EXIT_VMLOAD);
5752 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMSAVE, SVM_EXIT_VMSAVE, SVM_CTRL_INTERCEPT_VMSAVE);
5753 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMSAVE, SVM_EXIT_VMSAVE);
5754 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_STGI, SVM_EXIT_STGI, SVM_CTRL_INTERCEPT_STGI);
5755 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_STGI, SVM_EXIT_STGI);
5756 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_CLGI, SVM_EXIT_CLGI, SVM_CTRL_INTERCEPT_CLGI);
5757 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_CLGI, SVM_EXIT_CLGI);
5758
5759 SET_INCP_XBM_IF_EITHER_EN(INSTR_CPUID, SVM_EXIT_CPUID, SVM_CTRL_INTERCEPT_CPUID);
5760 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, SVM_EXIT_CPUID);
5761 SET_INCP_XBM_IF_EITHER_EN(INSTR_HALT, SVM_EXIT_HLT, SVM_CTRL_INTERCEPT_HLT);
5762 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, SVM_EXIT_HLT);
5763 SET_INCP_XBM_IF_EITHER_EN(INSTR_INVD, SVM_EXIT_INVD, SVM_CTRL_INTERCEPT_INVD);
5764 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, SVM_EXIT_INVD);
5765 SET_INCP_XBM_IF_EITHER_EN(INSTR_INVLPG, SVM_EXIT_INVLPG, SVM_CTRL_INTERCEPT_INVLPG);
5766 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, SVM_EXIT_INVLPG);
5767 SET_INCP_XBM_IF_EITHER_EN(INSTR_RDPMC, SVM_EXIT_RDPMC, SVM_CTRL_INTERCEPT_RDPMC);
5768 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, SVM_EXIT_RDPMC);
5769 SET_INCP_XBM_IF_EITHER_EN(INSTR_RDTSC, SVM_EXIT_RDTSC, SVM_CTRL_INTERCEPT_RDTSC);
5770 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, SVM_EXIT_RDTSC);
5771 SET_INCP_XBM_IF_EITHER_EN(INSTR_RDTSCP, SVM_EXIT_RDTSCP, SVM_CTRL_INTERCEPT_RDTSCP);
5772 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, SVM_EXIT_RDTSCP);
5773 SET_INCP_XBM_IF_EITHER_EN(INSTR_RSM, SVM_EXIT_RSM, SVM_CTRL_INTERCEPT_RSM);
5774 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, SVM_EXIT_RSM);
5775
5776 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
5777 pDbgState->bmInterceptRdCRxExtra = 0xffff;
5778 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ) || IS_EITHER_ENABLED(pVM, EXIT_CRX_READ))
5779 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_READ_CR0, SVM_EXIT_READ_CR15 + 1);
5780
5781 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
5782 pDbgState->bmInterceptWrCRxExtra = 0xffff;
5783 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE) || IS_EITHER_ENABLED(pVM, EXIT_CRX_WRITE))
5784 {
5785 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_WRITE_CR0, SVM_EXIT_WRITE_CR15 + 1);
5786 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_CR0_SEL_WRITE);
5787 }
5788
5789 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_READ))
5790 pDbgState->bmInterceptRdDRxExtra = 0xffff;
5791 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_READ) || IS_EITHER_ENABLED(pVM, EXIT_DRX_READ))
5792 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_READ_DR0, SVM_EXIT_READ_DR15 + 1);
5793
5794 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
5795 pDbgState->bmInterceptWrDRxExtra = 0xffff;
5796 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE) || IS_EITHER_ENABLED(pVM, EXIT_DRX_WRITE))
5797 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_WRITE_DR0, SVM_EXIT_WRITE_DR15 + 1);
5798
5799 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RDMSR, SVM_EXIT_MSR); /** @todo modify bitmap to intercept almost everything? (Clearing MSR_PROT just means no intercepts.) */
5800 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, SVM_EXIT_MSR);
5801 SET_ONLY_XBM_IF_EITHER_EN(INSTR_WRMSR, SVM_EXIT_MSR); /** @todo ditto */
5802 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, SVM_EXIT_MSR);
5803 SET_INCP_XBM_IF_EITHER_EN(INSTR_MWAIT, SVM_EXIT_MWAIT, SVM_CTRL_INTERCEPT_MWAIT);
5804 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, SVM_EXIT_MWAIT);
5805 if (ASMBitTest(pDbgState->bmExitsToCheck, SVM_EXIT_MWAIT))
5806 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_MWAIT_ARMED);
5807 SET_INCP_XBM_IF_EITHER_EN(INSTR_MONITOR, SVM_EXIT_MONITOR, SVM_CTRL_INTERCEPT_MONITOR);
5808 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, SVM_EXIT_MONITOR);
5809 SET_INCP_XBM_IF_EITHER_EN(INSTR_PAUSE, SVM_EXIT_PAUSE, SVM_CTRL_INTERCEPT_PAUSE);
5810 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, SVM_EXIT_PAUSE);
5811 SET_INCP_XBM_IF_EITHER_EN(INSTR_SIDT, SVM_EXIT_IDTR_READ, SVM_CTRL_INTERCEPT_IDTR_READS);
5812 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, SVM_EXIT_IDTR_READ);
5813 SET_INCP_XBM_IF_EITHER_EN(INSTR_LIDT, SVM_EXIT_IDTR_WRITE, SVM_CTRL_INTERCEPT_IDTR_WRITES);
5814 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, SVM_EXIT_IDTR_WRITE);
5815 SET_INCP_XBM_IF_EITHER_EN(INSTR_SGDT, SVM_EXIT_GDTR_READ, SVM_CTRL_INTERCEPT_GDTR_READS);
5816 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, SVM_EXIT_GDTR_READ);
5817 SET_INCP_XBM_IF_EITHER_EN(INSTR_LGDT, SVM_EXIT_GDTR_WRITE, SVM_CTRL_INTERCEPT_GDTR_WRITES);
5818 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, SVM_EXIT_GDTR_WRITE);
5819 SET_INCP_XBM_IF_EITHER_EN(INSTR_SLDT, SVM_EXIT_LDTR_READ, SVM_CTRL_INTERCEPT_LDTR_READS);
5820 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, SVM_EXIT_LDTR_READ);
5821 SET_INCP_XBM_IF_EITHER_EN(INSTR_LLDT, SVM_EXIT_LDTR_WRITE, SVM_CTRL_INTERCEPT_LDTR_WRITES);
5822 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, SVM_EXIT_LDTR_WRITE);
5823 SET_INCP_XBM_IF_EITHER_EN(INSTR_STR, SVM_EXIT_TR_READ, SVM_CTRL_INTERCEPT_TR_READS);
5824 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, SVM_EXIT_TR_READ);
5825 SET_INCP_XBM_IF_EITHER_EN(INSTR_LTR, SVM_EXIT_TR_WRITE, SVM_CTRL_INTERCEPT_TR_WRITES);
5826 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, SVM_EXIT_TR_WRITE);
5827 SET_INCP_XBM_IF_EITHER_EN(INSTR_WBINVD, SVM_EXIT_WBINVD, SVM_CTRL_INTERCEPT_WBINVD);
5828 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, SVM_EXIT_WBINVD);
5829 SET_INCP_XBM_IF_EITHER_EN(INSTR_XSETBV, SVM_EXIT_XSETBV, SVM_CTRL_INTERCEPT_XSETBV);
5830 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, SVM_EXIT_XSETBV);
5831
5832 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_TRIPLE_FAULT))
5833 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_SHUTDOWN);
5834
5835#undef IS_EITHER_ENABLED
5836#undef SET_ONLY_XBM_IF_EITHER_EN
5837#undef SET_INCP_XBM_IF_EITHER_EN
5838
5839 /*
5840 * Sanitize the control stuff.
5841 */
5842 /** @todo filter out unsupported stuff? */
5843 if ( pVCpu->hmr0.s.fDebugWantRdTscExit
5844 != RT_BOOL(pDbgState->bmInterceptExtra & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP)))
5845 {
5846 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
5847 /// @todo pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
5848 RT_NOREF(pSvmTransient);
5849 }
5850
5851 Log6(("HM: debug state: bmInterceptExtra=%#RX64 bmXcptExtra=%#RX32%s%s%s%s bmExitsToCheck=%08RX32'%08RX32'%08RX32'%08RX32'%08RX32\n",
5852 pDbgState->bmInterceptExtra, pDbgState->bmXcptExtra,
5853 pDbgState->bmInterceptRdCRxExtra ? " rd-cr" : "",
5854 pDbgState->bmInterceptWrCRxExtra ? " wr-cr" : "",
5855 pDbgState->bmInterceptRdDRxExtra ? " rd-dr" : "",
5856 pDbgState->bmInterceptWrDRxExtra ? " wr-dr" : "",
5857 pDbgState->bmExitsToCheck[0],
5858 pDbgState->bmExitsToCheck[1],
5859 pDbgState->bmExitsToCheck[2],
5860 pDbgState->bmExitsToCheck[3],
5861 pDbgState->bmExitsToCheck[4]));
5862}
5863
5864
5865/**
5866 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
5867 * appropriate.
5868 *
5869 * The caller has checked the VM-exit against the SVMRUNDBGSTATE::bmExitsToCheck
5870 * bitmap.
5871 *
5872 * @returns Strict VBox status code (i.e. informational status codes too).
5873 * @param pVCpu The cross context virtual CPU structure.
5874 * @param pSvmTransient The SVM-transient structure.
5875 * @param uExitCode The VM-exit code.
5876 *
5877 * @remarks The name of this function is displayed by dtrace, so keep it short
5878 * and to the point. No longer than 33 chars long, please.
5879 */
5880static VBOXSTRICTRC hmR0SvmHandleExitDtraceEvents(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, uint64_t uExitCode)
5881{
5882 /*
5883 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
5884 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
5885 *
5886 * Note! This is the reverse operation of what hmR0SvmPreRunGuestDebugStateUpdate
5887 * does. Must add/change/remove both places. Same ordering, please.
5888 *
5889 * Added/removed events must also be reflected in the next section
5890 * where we dispatch dtrace events.
5891 */
5892 bool fDtrace1 = false;
5893 bool fDtrace2 = false;
5894 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
5895 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
5896 uint64_t uEventArg = 0;
5897#define SET_XCPT(a_XcptName) \
5898 do { \
5899 enmEvent2 = RT_CONCAT(DBGFEVENT_XCPT_, a_XcptName); \
5900 fDtrace2 = RT_CONCAT3(VBOXVMM_XCPT_, a_XcptName, _ENABLED)(); \
5901 } while (0)
5902#define SET_EXIT(a_EventSubName) \
5903 do { \
5904 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
5905 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
5906 } while (0)
5907#define SET_BOTH(a_EventSubName) \
5908 do { \
5909 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
5910 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
5911 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
5912 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
5913 } while (0)
5914 switch (uExitCode)
5915 {
5916 case SVM_EXIT_SWINT:
5917 enmEvent2 = DBGFEVENT_INTERRUPT_SOFTWARE;
5918 fDtrace2 = VBOXVMM_INT_SOFTWARE_ENABLED();
5919 uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1;
5920 break;
5921
5922 case SVM_EXIT_XCPT_DE: SET_XCPT(DE); break;
5923 case SVM_EXIT_XCPT_DB: SET_XCPT(DB); break;
5924 case SVM_EXIT_XCPT_BP: SET_XCPT(BP); break;
5925 case SVM_EXIT_XCPT_OF: SET_XCPT(OF); break;
5926 case SVM_EXIT_XCPT_BR: SET_XCPT(BR); break;
5927 case SVM_EXIT_XCPT_UD: SET_XCPT(UD); break;
5928 case SVM_EXIT_XCPT_NM: SET_XCPT(NM); break;
5929 case SVM_EXIT_XCPT_DF: SET_XCPT(DF); break;
5930 case SVM_EXIT_XCPT_TS: SET_XCPT(TS); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5931 case SVM_EXIT_XCPT_NP: SET_XCPT(NP); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5932 case SVM_EXIT_XCPT_SS: SET_XCPT(SS); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5933 case SVM_EXIT_XCPT_GP: SET_XCPT(GP); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5934 case SVM_EXIT_XCPT_PF: SET_XCPT(PF); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5935 case SVM_EXIT_XCPT_MF: SET_XCPT(MF); break;
5936 case SVM_EXIT_XCPT_AC: SET_XCPT(AC); break;
5937 case SVM_EXIT_XCPT_XF: SET_XCPT(XF); break;
5938 case SVM_EXIT_XCPT_VE: SET_XCPT(VE); break;
5939 case SVM_EXIT_XCPT_SX: SET_XCPT(SX); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5940
5941 case SVM_EXIT_XCPT_2: enmEvent2 = DBGFEVENT_XCPT_02; break;
5942 case SVM_EXIT_XCPT_9: enmEvent2 = DBGFEVENT_XCPT_09; break;
5943 case SVM_EXIT_XCPT_15: enmEvent2 = DBGFEVENT_XCPT_0f; break;
5944 case SVM_EXIT_XCPT_18: enmEvent2 = DBGFEVENT_XCPT_MC; break;
5945 case SVM_EXIT_XCPT_21: enmEvent2 = DBGFEVENT_XCPT_15; break;
5946 case SVM_EXIT_XCPT_22: enmEvent2 = DBGFEVENT_XCPT_16; break;
5947 case SVM_EXIT_XCPT_23: enmEvent2 = DBGFEVENT_XCPT_17; break;
5948 case SVM_EXIT_XCPT_24: enmEvent2 = DBGFEVENT_XCPT_18; break;
5949 case SVM_EXIT_XCPT_25: enmEvent2 = DBGFEVENT_XCPT_19; break;
5950 case SVM_EXIT_XCPT_26: enmEvent2 = DBGFEVENT_XCPT_1a; break;
5951 case SVM_EXIT_XCPT_27: enmEvent2 = DBGFEVENT_XCPT_1b; break;
5952 case SVM_EXIT_XCPT_28: enmEvent2 = DBGFEVENT_XCPT_1c; break;
5953 case SVM_EXIT_XCPT_29: enmEvent2 = DBGFEVENT_XCPT_1d; break;
5954 case SVM_EXIT_XCPT_31: enmEvent2 = DBGFEVENT_XCPT_1f; break;
5955
5956 case SVM_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
5957 case SVM_EXIT_VMMCALL: SET_BOTH(VMM_CALL); break;
5958 case SVM_EXIT_VMRUN: SET_BOTH(SVM_VMRUN); break;
5959 case SVM_EXIT_VMLOAD: SET_BOTH(SVM_VMLOAD); break;
5960 case SVM_EXIT_VMSAVE: SET_BOTH(SVM_VMSAVE); break;
5961 case SVM_EXIT_STGI: SET_BOTH(SVM_STGI); break;
5962 case SVM_EXIT_CLGI: SET_BOTH(SVM_CLGI); break;
5963 case SVM_EXIT_CPUID: SET_BOTH(CPUID); break;
5964 case SVM_EXIT_HLT: SET_BOTH(HALT); break;
5965 case SVM_EXIT_INVD: SET_BOTH(INVD); break;
5966 case SVM_EXIT_INVLPG: SET_BOTH(INVLPG); break;
5967 case SVM_EXIT_RDPMC: SET_BOTH(RDPMC); break;
5968 case SVM_EXIT_RDTSC: SET_BOTH(RDTSC); break;
5969 case SVM_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
5970 case SVM_EXIT_RSM: SET_BOTH(RSM); break;
5971
5972 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
5973 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
5974 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
5975 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
5976 SET_BOTH(CRX_READ);
5977 uEventArg = uExitCode - SVM_EXIT_READ_CR0;
5978 break;
5979 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
5980 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
5981 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
5982 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
5983 case SVM_EXIT_CR0_SEL_WRITE:
5984 SET_BOTH(CRX_WRITE);
5985 uEventArg = uExitCode - SVM_EXIT_WRITE_CR0;
5986 break;
5987 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5988 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
5989 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
5990 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5991 SET_BOTH(DRX_READ);
5992 uEventArg = uExitCode - SVM_EXIT_READ_DR0;
5993 break;
5994 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5995 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
5996 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
5997 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5998 SET_BOTH(DRX_WRITE);
5999 uEventArg = uExitCode - SVM_EXIT_WRITE_DR0;
6000 break;
6001 case SVM_EXIT_MSR:
6002 if (pSvmTransient->pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
6003 SET_BOTH(WRMSR);
6004 else
6005 SET_BOTH(RDMSR);
6006 break;
6007 case SVM_EXIT_MWAIT_ARMED:
6008 case SVM_EXIT_MWAIT: SET_BOTH(MWAIT); break;
6009 case SVM_EXIT_MONITOR: SET_BOTH(MONITOR); break;
6010 case SVM_EXIT_PAUSE: SET_BOTH(PAUSE); break;
6011 case SVM_EXIT_IDTR_READ: SET_BOTH(SIDT); break;
6012 case SVM_EXIT_IDTR_WRITE: SET_BOTH(LIDT); break;
6013 case SVM_EXIT_GDTR_READ: SET_BOTH(SGDT); break;
6014 case SVM_EXIT_GDTR_WRITE: SET_BOTH(LGDT); break;
6015 case SVM_EXIT_LDTR_READ: SET_BOTH(SLDT); break;
6016 case SVM_EXIT_LDTR_WRITE: SET_BOTH(LLDT); break;
6017 case SVM_EXIT_TR_READ: SET_BOTH(STR); break;
6018 case SVM_EXIT_TR_WRITE: SET_BOTH(LTR); break;
6019 case SVM_EXIT_WBINVD: SET_BOTH(WBINVD); break;
6020 case SVM_EXIT_XSETBV: SET_BOTH(XSETBV); break;
6021
6022 case SVM_EXIT_SHUTDOWN:
6023 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
6024 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
6025 break;
6026
6027 default:
6028 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitCode));
6029 break;
6030 }
6031#undef SET_BOTH
6032#undef SET_EXIT
6033
6034 /*
6035 * Dtrace tracepoints go first. We do them here at once so we don't
6036 * have to copy the guest state saving and stuff a few dozen times.
6037 * Down side is that we've got to repeat the switch, though this time
6038 * we use enmEvent since the probes are a subset of what DBGF does.
6039 */
6040 if (fDtrace1 || fDtrace2)
6041 {
6042 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6043 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; RT_NOREF(pCtx); /* Shut up Clang 13. */
6044 switch (enmEvent1)
6045 {
6046 /** @todo consider which extra parameters would be helpful for each probe. */
6047 case DBGFEVENT_END: break;
6048 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6049 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
6050 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
6051 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
6052 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
6053 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
6054 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
6055 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
6056 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
6057 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, (uint32_t)uEventArg); break;
6058 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, (uint32_t)uEventArg); break;
6059 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, (uint32_t)uEventArg); break;
6060 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, (uint32_t)uEventArg); break;
6061 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, (uint32_t)uEventArg, pCtx->cr2); break;
6062 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
6063 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
6064 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
6065 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
6066 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, (uint32_t)uEventArg); break;
6067 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
6068 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
6069 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
6070 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
6071 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
6072 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
6073 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
6074 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6075 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6076 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6077 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6078 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
6079 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
6080 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
6081 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
6082 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
6083 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
6084 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
6085 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
6086 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
6087 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
6088 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
6089 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
6090 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
6091 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
6092 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
6093 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
6094 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
6095 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
6096 case DBGFEVENT_INSTR_SVM_VMRUN: VBOXVMM_INSTR_SVM_VMRUN(pVCpu, pCtx); break;
6097 case DBGFEVENT_INSTR_SVM_VMLOAD: VBOXVMM_INSTR_SVM_VMLOAD(pVCpu, pCtx); break;
6098 case DBGFEVENT_INSTR_SVM_VMSAVE: VBOXVMM_INSTR_SVM_VMSAVE(pVCpu, pCtx); break;
6099 case DBGFEVENT_INSTR_SVM_STGI: VBOXVMM_INSTR_SVM_STGI(pVCpu, pCtx); break;
6100 case DBGFEVENT_INSTR_SVM_CLGI: VBOXVMM_INSTR_SVM_CLGI(pVCpu, pCtx); break;
6101 default: AssertMsgFailed(("enmEvent1=%d uExitCode=%d\n", enmEvent1, uExitCode)); break;
6102 }
6103 switch (enmEvent2)
6104 {
6105 /** @todo consider which extra parameters would be helpful for each probe. */
6106 case DBGFEVENT_END: break;
6107 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
6108 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
6109 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
6110 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
6111 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
6112 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
6113 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
6114 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
6115 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6116 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6117 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6118 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6119 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
6120 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
6121 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
6122 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
6123 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
6124 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
6125 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
6126 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
6127 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
6128 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
6129 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
6130 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
6131 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
6132 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
6133 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
6134 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
6135 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
6136 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
6137 case DBGFEVENT_EXIT_SVM_VMRUN: VBOXVMM_EXIT_SVM_VMRUN(pVCpu, pCtx); break;
6138 case DBGFEVENT_EXIT_SVM_VMLOAD: VBOXVMM_EXIT_SVM_VMLOAD(pVCpu, pCtx); break;
6139 case DBGFEVENT_EXIT_SVM_VMSAVE: VBOXVMM_EXIT_SVM_VMSAVE(pVCpu, pCtx); break;
6140 case DBGFEVENT_EXIT_SVM_STGI: VBOXVMM_EXIT_SVM_STGI(pVCpu, pCtx); break;
6141 case DBGFEVENT_EXIT_SVM_CLGI: VBOXVMM_EXIT_SVM_CLGI(pVCpu, pCtx); break;
6142 default: AssertMsgFailed(("enmEvent2=%d uExitCode=%d\n", enmEvent2, uExitCode)); break;
6143 }
6144 }
6145
6146 /*
6147 * Fire of the DBGF event, if enabled (our check here is just a quick one,
6148 * the DBGF call will do a full check).
6149 *
6150 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
6151 * Note! If we have to events, we prioritize the first, i.e. the instruction
6152 * one, in order to avoid event nesting.
6153 */
6154 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6155 VBOXSTRICTRC rcStrict;
6156 if ( enmEvent1 != DBGFEVENT_END
6157 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
6158 {
6159 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6160 rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
6161 }
6162 else if ( enmEvent2 != DBGFEVENT_END
6163 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
6164 {
6165 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6166 rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
6167 }
6168 else
6169 rcStrict = VINF_SUCCESS;
6170 return rcStrict;
6171}
6172
6173
6174/**
6175 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID),
6176 * debug variant.
6177 *
6178 * @returns Strict VBox status code (informational status codes included).
6179 * @param pVCpu The cross context virtual CPU structure.
6180 * @param pSvmTransient Pointer to the SVM transient structure.
6181 * @param pDbgState The runtime debug state.
6182 */
6183static VBOXSTRICTRC hmR0SvmDebugHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
6184{
6185 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
6186 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
6187
6188 /*
6189 * Expensive (saves context) generic dtrace VM-exit probe.
6190 */
6191 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
6192 if (!VBOXVMM_R0_HMSVM_VMEXIT_ENABLED())
6193 { /* more likely */ }
6194 else
6195 {
6196 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6197 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, uExitCode, pSvmTransient->pVmcb);
6198 }
6199
6200 /*
6201 * Check for single stepping event if we're stepping.
6202 */
6203 if (pVCpu->hm.s.fSingleInstruction)
6204 {
6205 switch (uExitCode)
6206 {
6207 /* Various events: */
6208 case SVM_EXIT_XCPT_0: case SVM_EXIT_XCPT_1: case SVM_EXIT_XCPT_2: case SVM_EXIT_XCPT_3:
6209 case SVM_EXIT_XCPT_4: case SVM_EXIT_XCPT_5: case SVM_EXIT_XCPT_6: case SVM_EXIT_XCPT_7:
6210 case SVM_EXIT_XCPT_8: case SVM_EXIT_XCPT_9: case SVM_EXIT_XCPT_10: case SVM_EXIT_XCPT_11:
6211 case SVM_EXIT_XCPT_12: case SVM_EXIT_XCPT_13: case SVM_EXIT_XCPT_14: case SVM_EXIT_XCPT_15:
6212 case SVM_EXIT_XCPT_16: case SVM_EXIT_XCPT_17: case SVM_EXIT_XCPT_18: case SVM_EXIT_XCPT_19:
6213 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
6214 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
6215 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
6216 case SVM_EXIT_INTR:
6217 case SVM_EXIT_NMI:
6218 case SVM_EXIT_VINTR:
6219 case SVM_EXIT_NPF:
6220 case SVM_EXIT_AVIC_NOACCEL:
6221
6222 /* Instruction specific VM-exits: */
6223 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
6224 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
6225 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
6226 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
6227 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
6228 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
6229 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
6230 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
6231 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
6232 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
6233 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
6234 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
6235 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
6236 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
6237 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
6238 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
6239 case SVM_EXIT_CR0_SEL_WRITE:
6240 case SVM_EXIT_IDTR_READ:
6241 case SVM_EXIT_GDTR_READ:
6242 case SVM_EXIT_LDTR_READ:
6243 case SVM_EXIT_TR_READ:
6244 case SVM_EXIT_IDTR_WRITE:
6245 case SVM_EXIT_GDTR_WRITE:
6246 case SVM_EXIT_LDTR_WRITE:
6247 case SVM_EXIT_TR_WRITE:
6248 case SVM_EXIT_RDTSC:
6249 case SVM_EXIT_RDPMC:
6250 case SVM_EXIT_PUSHF:
6251 case SVM_EXIT_POPF:
6252 case SVM_EXIT_CPUID:
6253 case SVM_EXIT_RSM:
6254 case SVM_EXIT_IRET:
6255 case SVM_EXIT_SWINT:
6256 case SVM_EXIT_INVD:
6257 case SVM_EXIT_PAUSE:
6258 case SVM_EXIT_HLT:
6259 case SVM_EXIT_INVLPG:
6260 case SVM_EXIT_INVLPGA:
6261 case SVM_EXIT_IOIO:
6262 case SVM_EXIT_MSR:
6263 case SVM_EXIT_TASK_SWITCH:
6264 case SVM_EXIT_VMRUN:
6265 case SVM_EXIT_VMMCALL:
6266 case SVM_EXIT_VMLOAD:
6267 case SVM_EXIT_VMSAVE:
6268 case SVM_EXIT_STGI:
6269 case SVM_EXIT_CLGI:
6270 case SVM_EXIT_SKINIT:
6271 case SVM_EXIT_RDTSCP:
6272 case SVM_EXIT_ICEBP:
6273 case SVM_EXIT_WBINVD:
6274 case SVM_EXIT_MONITOR:
6275 case SVM_EXIT_MWAIT:
6276 case SVM_EXIT_MWAIT_ARMED:
6277 case SVM_EXIT_XSETBV:
6278 case SVM_EXIT_RDPRU:
6279 case SVM_EXIT_WRITE_EFER_TRAP:
6280 case SVM_EXIT_WRITE_CR0_TRAP: case SVM_EXIT_WRITE_CR1_TRAP: case SVM_EXIT_WRITE_CR2_TRAP: case SVM_EXIT_WRITE_CR3_TRAP:
6281 case SVM_EXIT_WRITE_CR4_TRAP: case SVM_EXIT_WRITE_CR5_TRAP: case SVM_EXIT_WRITE_CR6_TRAP: case SVM_EXIT_WRITE_CR7_TRAP:
6282 case SVM_EXIT_WRITE_CR8_TRAP: case SVM_EXIT_WRITE_CR9_TRAP: case SVM_EXIT_WRITE_CR10_TRAP: case SVM_EXIT_WRITE_CR11_TRAP:
6283 case SVM_EXIT_WRITE_CR12_TRAP: case SVM_EXIT_WRITE_CR13_TRAP: case SVM_EXIT_WRITE_CR14_TRAP: case SVM_EXIT_WRITE_CR15_TRAP:
6284 case SVM_EXIT_MCOMMIT:
6285 {
6286 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6287 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
6288 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
6289 {
6290 Log6Func(("VINF_EM_DBG_STEPPED: %04x:%08RX64 (exit %u)\n",
6291 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode));
6292 return VINF_EM_DBG_STEPPED;
6293 }
6294 break;
6295 }
6296
6297 /* Errors and unexpected events: */
6298 case SVM_EXIT_FERR_FREEZE:
6299 case SVM_EXIT_SHUTDOWN:
6300 case SVM_EXIT_AVIC_INCOMPLETE_IPI:
6301 break;
6302
6303 case SVM_EXIT_SMI:
6304 case SVM_EXIT_INIT:
6305 default:
6306 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitCode));
6307 break;
6308 }
6309 }
6310
6311 /*
6312 * Check for debugger event breakpoints and dtrace probes.
6313 */
6314 if ( uExitCode < sizeof(pDbgState->bmExitsToCheck) * 8U
6315 && ASMBitTest(pDbgState->bmExitsToCheck, uExitCode) )
6316 {
6317 VBOXSTRICTRC rcStrict = hmR0SvmHandleExitDtraceEvents(pVCpu, pSvmTransient, uExitCode);
6318 if (rcStrict != VINF_SUCCESS)
6319 {
6320 Log6Func(("%04x:%08RX64 (exit %u) -> %Rrc\n",
6321 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode, VBOXSTRICTRC_VAL(rcStrict) ));
6322 return rcStrict;
6323 }
6324 }
6325
6326 /*
6327 * Normal processing.
6328 */
6329 return hmR0SvmHandleExit(pVCpu, pSvmTransient);
6330}
6331
6332
6333/**
6334 * Runs the guest code using AMD-V in single step mode.
6335 *
6336 * @returns Strict VBox status code.
6337 * @param pVCpu The cross context virtual CPU structure.
6338 * @param pcLoops Pointer to the number of executed loops.
6339 */
6340static VBOXSTRICTRC hmR0SvmRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops)
6341{
6342 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
6343 Assert(pcLoops);
6344 Assert(*pcLoops <= cMaxResumeLoops);
6345
6346 SVMTRANSIENT SvmTransient;
6347 RT_ZERO(SvmTransient);
6348 SvmTransient.fUpdateTscOffsetting = true;
6349 SvmTransient.pVmcb = pVCpu->hmr0.s.svm.pVmcb;
6350
6351 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6352
6353 /* Set HMCPU indicators. */
6354 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
6355 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
6356 pVCpu->hmr0.s.fDebugWantRdTscExit = false;
6357 pVCpu->hmr0.s.fUsingDebugLoop = true;
6358
6359 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
6360 SVMRUNDBGSTATE DbgState;
6361 hmR0SvmRunDebugStateInit(pVCpu, &SvmTransient, &DbgState);
6362 hmR0SvmPreRunGuestDebugStateUpdate(pVCpu, &SvmTransient, &DbgState);
6363
6364 /*
6365 * The loop.
6366 */
6367 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_5;
6368 for (;;)
6369 {
6370 Assert(!HMR0SuspendPending());
6371 AssertMsg(pVCpu->hmr0.s.idEnteredCpu == RTMpCpuId(),
6372 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hmr0.s.idEnteredCpu,
6373 (unsigned)RTMpCpuId(), *pcLoops));
6374 bool fStepping = pVCpu->hm.s.fSingleInstruction;
6375
6376 /* Set up VM-execution controls the next two can respond to. */
6377 hmR0SvmPreRunGuestDebugStateApply(&SvmTransient, &DbgState);
6378
6379 /* Preparatory work for running nested-guest code, this may force us to return to
6380 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
6381 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
6382 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
6383 if (rc != VINF_SUCCESS)
6384 break;
6385
6386 /*
6387 * No longjmps to ring-3 from this point on!!!
6388 *
6389 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
6390 * better than a kernel panic. This also disables flushing of the R0-logger instance.
6391 */
6392 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
6393
6394 /* Override any obnoxious code in the above two calls. */
6395 hmR0SvmPreRunGuestDebugStateApply(&SvmTransient, &DbgState);
6396#if 0
6397 Log(("%04x:%08RX64 ds=%04x %04x:%08RX64 i=%#RX64\n",
6398 SvmTransient.pVmcb->guest.CS.u16Sel, SvmTransient.pVmcb->guest.u64RIP, SvmTransient.pVmcb->guest.DS.u16Sel,
6399 SvmTransient.pVmcb->guest.SS.u16Sel, SvmTransient.pVmcb->guest.u64RSP, SvmTransient.pVmcb->ctrl.EventInject.u));
6400#endif
6401
6402 /*
6403 * Finally execute guest code.
6404 */
6405 rc = hmR0SvmRunGuest(pVCpu, pVCpu->hmr0.s.svm.HCPhysVmcb);
6406
6407 /* Restore any residual host-state and save any bits shared between host and guest
6408 into the guest-CPU state. Re-enables interrupts! */
6409 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
6410#if 0
6411 Log(("%04x:%08RX64 ds=%04x %04x:%08RX64 i=%#RX64 exit=%d\n",
6412 SvmTransient.pVmcb->guest.CS.u16Sel, SvmTransient.pVmcb->guest.u64RIP, SvmTransient.pVmcb->guest.DS.u16Sel,
6413 SvmTransient.pVmcb->guest.SS.u16Sel, SvmTransient.pVmcb->guest.u64RSP, SvmTransient.pVmcb->ctrl.EventInject.u, SvmTransient.u64ExitCode));
6414#endif
6415
6416 if (RT_LIKELY( rc == VINF_SUCCESS /* Check for VMRUN errors. */
6417 && SvmTransient.u64ExitCode != SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
6418 { /* very likely*/ }
6419 else
6420 {
6421 if (rc == VINF_SUCCESS)
6422 rc = VERR_SVM_INVALID_GUEST_STATE;
6423 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
6424 hmR0SvmReportWorldSwitchError(pVCpu, VBOXSTRICTRC_VAL(rc));
6425 return rc;
6426 }
6427
6428 /* Handle the #VMEXIT. */
6429 HMSVM_DEBUG_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
6430 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
6431 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hmr0.s.svm.pVmcb);
6432 rc = hmR0SvmDebugHandleExit(pVCpu, &SvmTransient, &DbgState);
6433 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
6434 if (rc != VINF_SUCCESS)
6435 break;
6436 if (++(*pcLoops) >= cMaxResumeLoops)
6437 {
6438 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
6439 rc = VINF_EM_RAW_INTERRUPT;
6440 break;
6441 }
6442
6443 /*
6444 * Stepping: Did the RIP change, if so, consider it a single step.
6445 * Otherwise, make sure one of the TFs gets set.
6446 */
6447 if (fStepping)
6448 {
6449 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6450 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart
6451 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
6452 {
6453 Log6Func(("VINF_EM_DBG_STEPPED: %04x:%08RX64 (exit %u)\n",
6454 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, SvmTransient.u64ExitCode));
6455 rc = VINF_EM_DBG_STEPPED;
6456 break;
6457 }
6458 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
6459 }
6460
6461 /*
6462 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
6463 * Revert the state changes afterware so we can drop intercepts no longer needed.
6464 */
6465 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
6466 {
6467 hmR0SvmPreRunGuestDebugStateUpdate(pVCpu, &SvmTransient, &DbgState);
6468 hmR0SvmRunDebugStateRevert(&SvmTransient, &DbgState);
6469 }
6470 }
6471
6472 /*
6473 * Clear the X86_EFL_TF if necessary.
6474 */
6475 if (pVCpu->hmr0.s.fClearTrapFlag)
6476 {
6477 pVCpu->hmr0.s.fClearTrapFlag = false;
6478 pCtx->eflags.Bits.u1TF = 0;
6479 }
6480
6481 /* Restore HMCPU indicators. */
6482 pVCpu->hmr0.s.fUsingDebugLoop = false;
6483 pVCpu->hmr0.s.fDebugWantRdTscExit = false;
6484 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
6485
6486 /* Restore all controls applied by hmR0SvmPreRunGuestDebugStateApply above. */
6487 hmR0SvmRunDebugStateRevert(&SvmTransient, &DbgState);
6488
6489 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
6490 return rc;
6491}
6492
6493/** @} */
6494
6495#undef VMEXIT_CALL_RET
6496
6497
6498#ifdef VBOX_STRICT
6499/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6500# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
6501 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6502
6503# define HMSVM_ASSERT_PREEMPT_CPUID() \
6504 do \
6505 { \
6506 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6507 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6508 } while (0)
6509
6510# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \
6511 do { \
6512 AssertPtr((a_pVCpu)); \
6513 AssertPtr((a_pSvmTransient)); \
6514 Assert(ASMIntAreEnabled()); \
6515 HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \
6516 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
6517 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu)); \
6518 HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \
6519 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6520 HMSVM_ASSERT_PREEMPT_CPUID(); \
6521 } while (0)
6522#else
6523# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \
6524 do { \
6525 RT_NOREF2(a_pVCpu, a_pSvmTransient); \
6526 } while (0)
6527#endif
6528
6529
6530/**
6531 * Gets the IEM exception flags for the specified SVM event.
6532 *
6533 * @returns The IEM exception flags.
6534 * @param pEvent Pointer to the SVM event.
6535 *
6536 * @remarks This function currently only constructs flags required for
6537 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g. error-code
6538 * and CR2 aspects of an exception are not included).
6539 */
6540static uint32_t hmR0SvmGetIemXcptFlags(PCSVMEVENT pEvent)
6541{
6542 uint8_t const uEventType = pEvent->n.u3Type;
6543 uint32_t fIemXcptFlags;
6544 switch (uEventType)
6545 {
6546 case SVM_EVENT_EXCEPTION:
6547 /*
6548 * Only INT3 and INTO instructions can raise #BP and #OF exceptions.
6549 * See AMD spec. Table 8-1. "Interrupt Vector Source and Cause".
6550 */
6551 if (pEvent->n.u8Vector == X86_XCPT_BP)
6552 {
6553 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR;
6554 break;
6555 }
6556 if (pEvent->n.u8Vector == X86_XCPT_OF)
6557 {
6558 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_OF_INSTR;
6559 break;
6560 }
6561 /** @todo How do we distinguish ICEBP \#DB from the regular one? */
6562 RT_FALL_THRU();
6563 case SVM_EVENT_NMI:
6564 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6565 break;
6566
6567 case SVM_EVENT_EXTERNAL_IRQ:
6568 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
6569 break;
6570
6571 case SVM_EVENT_SOFTWARE_INT:
6572 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
6573 break;
6574
6575 default:
6576 fIemXcptFlags = 0;
6577 AssertMsgFailed(("Unexpected event type! uEventType=%#x uVector=%#x", uEventType, pEvent->n.u8Vector));
6578 break;
6579 }
6580 return fIemXcptFlags;
6581}
6582
6583
6584/**
6585 * Handle a condition that occurred while delivering an event through the guest
6586 * IDT.
6587 *
6588 * @returns VBox status code (informational error codes included).
6589 * @retval VINF_SUCCESS if we should continue handling the \#VMEXIT.
6590 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought to
6591 * continue execution of the guest which will delivery the \#DF.
6592 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6593 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6594 *
6595 * @param pVCpu The cross context virtual CPU structure.
6596 * @param pSvmTransient Pointer to the SVM transient structure.
6597 *
6598 * @remarks No-long-jump zone!!!
6599 */
6600static int hmR0SvmCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6601{
6602 /** @todo r=bird: Looks like this is called on many exits and we start by
6603 * loading CR2 on the offchance that we actually have work to do here.
6604 *
6605 * HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY can surely check
6606 * pVmcb->ctrl.ExitIntInfo.n.u1Valid, can't it?
6607 *
6608 * Also, what's the deal with hmR0SvmGetCurrentVmcb() vs pSvmTransient->pVmcb?
6609 */
6610 int rc = VINF_SUCCESS;
6611 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6612 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2);
6613
6614 Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n",
6615 pVmcb->ctrl.ExitIntInfo.u, !!pVmcb->ctrl.ExitIntInfo.n.u1Valid, !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid,
6616 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, pVmcb->ctrl.ExitIntInfo.n.u3Type, pVmcb->ctrl.ExitIntInfo.n.u8Vector));
6617
6618 /*
6619 * The EXITINTINFO (if valid) contains the prior exception (IDT vector) that was trying to
6620 * be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector).
6621 *
6622 * See AMD spec. 15.7.3 "EXITINFO Pseudo-Code".
6623 */
6624 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid)
6625 {
6626 IEMXCPTRAISE enmRaise;
6627 IEMXCPTRAISEINFO fRaiseInfo;
6628 bool const fExitIsHwXcpt = pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0 <= SVM_EXIT_XCPT_31;
6629 uint8_t const uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
6630 if (fExitIsHwXcpt)
6631 {
6632 uint8_t const uExitVector = pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0;
6633 uint32_t const fIdtVectorFlags = hmR0SvmGetIemXcptFlags(&pVmcb->ctrl.ExitIntInfo);
6634 uint32_t const fExitVectorFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6635 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6636 }
6637 else
6638 {
6639 /*
6640 * If delivery of an event caused a #VMEXIT that is not an exception (e.g. #NPF)
6641 * then we end up here.
6642 *
6643 * If the event was:
6644 * - a software interrupt, we can re-execute the instruction which will
6645 * regenerate the event.
6646 * - an NMI, we need to clear NMI blocking and re-inject the NMI.
6647 * - a hardware exception or external interrupt, we re-inject it.
6648 */
6649 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6650 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_SOFTWARE_INT)
6651 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6652 else
6653 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6654 }
6655
6656 switch (enmRaise)
6657 {
6658 case IEMXCPTRAISE_CURRENT_XCPT:
6659 case IEMXCPTRAISE_PREV_EVENT:
6660 {
6661 /* For software interrupts, we shall re-execute the instruction. */
6662 if (!(fRaiseInfo & IEMXCPTRAISEINFO_SOFT_INT_XCPT))
6663 {
6664 RTGCUINTPTR GCPtrFaultAddress = 0;
6665
6666 /* If we are re-injecting an NMI, clear NMI blocking. */
6667 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)
6668 CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
6669
6670 /* Determine a vectoring #PF condition, see comment in hmR0SvmExitXcptPF(). */
6671 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6672 {
6673 pSvmTransient->fVectoringPF = true;
6674 Log4Func(("IDT: Pending vectoring #PF due to delivery of Ext-Int/NMI. uCR2=%#RX64\n",
6675 pVCpu->cpum.GstCtx.cr2));
6676 }
6677 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION
6678 && uIdtVector == X86_XCPT_PF)
6679 {
6680 /*
6681 * If the previous exception was a #PF, we need to recover the CR2 value.
6682 * This can't happen with shadow paging.
6683 */
6684 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
6685 }
6686
6687 /*
6688 * Without nested paging, when uExitVector is #PF, CR2 value will be updated from the VMCB's
6689 * exit info. fields, if it's a guest #PF, see hmR0SvmExitXcptPF().
6690 */
6691 Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);
6692 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflect);
6693 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, GCPtrFaultAddress);
6694
6695 Log4Func(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32 GCPtrFaultAddress=%#RX64\n",
6696 pVmcb->ctrl.ExitIntInfo.u, RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid),
6697 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, GCPtrFaultAddress));
6698 }
6699 break;
6700 }
6701
6702 case IEMXCPTRAISE_REEXEC_INSTR:
6703 {
6704 Assert(rc == VINF_SUCCESS);
6705 break;
6706 }
6707
6708 case IEMXCPTRAISE_DOUBLE_FAULT:
6709 {
6710 /*
6711 * Determing a vectoring double #PF condition. Used later, when PGM evaluates
6712 * the second #PF as a guest #PF (and not a shadow #PF) and needs to be
6713 * converted into a #DF.
6714 */
6715 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6716 {
6717 Log4Func(("IDT: Pending vectoring double #PF uCR2=%#RX64\n", pVCpu->cpum.GstCtx.cr2));
6718 pSvmTransient->fVectoringDoublePF = true;
6719 Assert(rc == VINF_SUCCESS);
6720 }
6721 else
6722 {
6723 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectConvertDF);
6724 hmR0SvmSetPendingXcptDF(pVCpu);
6725 rc = VINF_HM_DOUBLE_FAULT;
6726 }
6727 break;
6728 }
6729
6730 case IEMXCPTRAISE_TRIPLE_FAULT:
6731 {
6732 rc = VINF_EM_RESET;
6733 break;
6734 }
6735
6736 case IEMXCPTRAISE_CPU_HANG:
6737 {
6738 rc = VERR_EM_GUEST_CPU_HANG;
6739 break;
6740 }
6741
6742 default:
6743 AssertMsgFailedBreakStmt(("Bogus enmRaise value: %d (%#x)\n", enmRaise, enmRaise), rc = VERR_SVM_IPE_2);
6744 }
6745 }
6746 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG);
6747 return rc;
6748}
6749
6750
6751/**
6752 * Advances the guest RIP by the number of bytes specified in @a cb.
6753 *
6754 * @param pVCpu The cross context virtual CPU structure.
6755 * @param cb RIP increment value in bytes.
6756 */
6757DECLINLINE(void) hmR0SvmAdvanceRip(PVMCPUCC pVCpu, uint32_t cb)
6758{
6759 pVCpu->cpum.GstCtx.rip += cb;
6760 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
6761 /** @todo clear RF. */
6762}
6763
6764
6765/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6766/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6767/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6768
6769/** @name \#VMEXIT handlers.
6770 * @{
6771 */
6772
6773/**
6774 * \#VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT
6775 * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT).
6776 */
6777HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6778{
6779 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6780
6781 if (pSvmTransient->u64ExitCode == SVM_EXIT_NMI)
6782 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
6783 else if (pSvmTransient->u64ExitCode == SVM_EXIT_INTR)
6784 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
6785
6786 /*
6787 * AMD-V has no preemption timer and the generic periodic preemption timer has no way to
6788 * signal -before- the timer fires if the current interrupt is our own timer or a some
6789 * other host interrupt. We also cannot examine what interrupt it is until the host
6790 * actually take the interrupt.
6791 *
6792 * Going back to executing guest code here unconditionally causes random scheduling
6793 * problems (observed on an AMD Phenom 9850 Quad-Core on Windows 64-bit host).
6794 */
6795 return VINF_EM_RAW_INTERRUPT;
6796}
6797
6798
6799/**
6800 * \#VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional \#VMEXIT.
6801 */
6802HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6803{
6804 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6805
6806 VBOXSTRICTRC rcStrict;
6807 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6808 if (fSupportsNextRipSave)
6809 {
6810 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
6811 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6812 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6813 rcStrict = IEMExecDecodedWbinvd(pVCpu, cbInstr);
6814 }
6815 else
6816 {
6817 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6818 rcStrict = IEMExecOne(pVCpu);
6819 }
6820
6821 if (rcStrict == VINF_IEM_RAISED_XCPT)
6822 {
6823 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6824 rcStrict = VINF_SUCCESS;
6825 }
6826 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6827 return rcStrict;
6828}
6829
6830
6831/**
6832 * \#VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional \#VMEXIT.
6833 */
6834HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6835{
6836 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6837
6838 VBOXSTRICTRC rcStrict;
6839 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6840 if (fSupportsNextRipSave)
6841 {
6842 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
6843 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6844 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6845 rcStrict = IEMExecDecodedInvd(pVCpu, cbInstr);
6846 }
6847 else
6848 {
6849 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6850 rcStrict = IEMExecOne(pVCpu);
6851 }
6852
6853 if (rcStrict == VINF_IEM_RAISED_XCPT)
6854 {
6855 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6856 rcStrict = VINF_SUCCESS;
6857 }
6858 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6859 return rcStrict;
6860}
6861
6862
6863/**
6864 * \#VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional \#VMEXIT.
6865 */
6866HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6867{
6868 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6869
6870 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
6871 VBOXSTRICTRC rcStrict;
6872 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
6873 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
6874 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
6875 if (!pExitRec)
6876 {
6877 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6878 if (fSupportsNextRipSave)
6879 {
6880 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6881 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6882 rcStrict = IEMExecDecodedCpuid(pVCpu, cbInstr);
6883 }
6884 else
6885 {
6886 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6887 rcStrict = IEMExecOne(pVCpu);
6888 }
6889
6890 if (rcStrict == VINF_IEM_RAISED_XCPT)
6891 {
6892 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
6893 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6894 rcStrict = VINF_SUCCESS;
6895 }
6896 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6897 }
6898 else
6899 {
6900 /*
6901 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
6902 */
6903 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6904
6905 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
6906 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
6907
6908 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
6909
6910 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
6911 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6912 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6913 }
6914 return rcStrict;
6915}
6916
6917
6918/**
6919 * \#VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional \#VMEXIT.
6920 */
6921HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6922{
6923 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6924
6925 VBOXSTRICTRC rcStrict;
6926 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6927 if (fSupportsNextRipSave)
6928 {
6929 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
6930 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6931 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6932 rcStrict = IEMExecDecodedRdtsc(pVCpu, cbInstr);
6933 }
6934 else
6935 {
6936 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6937 rcStrict = IEMExecOne(pVCpu);
6938 }
6939
6940 if (rcStrict == VINF_SUCCESS)
6941 pSvmTransient->fUpdateTscOffsetting = true;
6942 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6943 {
6944 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6945 rcStrict = VINF_SUCCESS;
6946 }
6947 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6948 return rcStrict;
6949}
6950
6951
6952/**
6953 * \#VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional \#VMEXIT.
6954 */
6955HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6956{
6957 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6958
6959 VBOXSTRICTRC rcStrict;
6960 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6961 if (fSupportsNextRipSave)
6962 {
6963 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
6964 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6965 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6966 rcStrict = IEMExecDecodedRdtscp(pVCpu, cbInstr);
6967 }
6968 else
6969 {
6970 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6971 rcStrict = IEMExecOne(pVCpu);
6972 }
6973
6974 if (rcStrict == VINF_SUCCESS)
6975 pSvmTransient->fUpdateTscOffsetting = true;
6976 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6977 {
6978 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6979 rcStrict = VINF_SUCCESS;
6980 }
6981 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6982 return rcStrict;
6983}
6984
6985
6986/**
6987 * \#VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional \#VMEXIT.
6988 */
6989HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6990{
6991 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6992
6993 VBOXSTRICTRC rcStrict;
6994 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6995 if (fSupportsNextRipSave)
6996 {
6997 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
6998 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6999 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7000 rcStrict = IEMExecDecodedRdpmc(pVCpu, cbInstr);
7001 }
7002 else
7003 {
7004 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7005 rcStrict = IEMExecOne(pVCpu);
7006 }
7007
7008 if (rcStrict == VINF_IEM_RAISED_XCPT)
7009 {
7010 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7011 rcStrict = VINF_SUCCESS;
7012 }
7013 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7014 return rcStrict;
7015}
7016
7017
7018/**
7019 * \#VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional \#VMEXIT.
7020 */
7021HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7022{
7023 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7024 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
7025
7026 VBOXSTRICTRC rcStrict;
7027 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
7028 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7029 if ( fSupportsDecodeAssists
7030 && fSupportsNextRipSave)
7031 {
7032 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7033 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7034 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7035 RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1;
7036 rcStrict = IEMExecDecodedInvlpg(pVCpu, cbInstr, GCPtrPage);
7037 }
7038 else
7039 {
7040 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7041 rcStrict = IEMExecOne(pVCpu);
7042 }
7043
7044 if (rcStrict == VINF_IEM_RAISED_XCPT)
7045 {
7046 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7047 rcStrict = VINF_SUCCESS;
7048 }
7049 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7050 return VBOXSTRICTRC_VAL(rcStrict);
7051}
7052
7053
7054/**
7055 * \#VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional \#VMEXIT.
7056 */
7057HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7058{
7059 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7060
7061 VBOXSTRICTRC rcStrict;
7062 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7063 if (fSupportsNextRipSave)
7064 {
7065 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7066 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7067 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7068 rcStrict = IEMExecDecodedHlt(pVCpu, cbInstr);
7069 }
7070 else
7071 {
7072 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7073 rcStrict = IEMExecOne(pVCpu);
7074 }
7075
7076 if ( rcStrict == VINF_EM_HALT
7077 || rcStrict == VINF_SUCCESS)
7078 rcStrict = EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx) ? VINF_SUCCESS : VINF_EM_HALT;
7079 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7080 {
7081 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7082 rcStrict = VINF_SUCCESS;
7083 }
7084 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7085 if (rcStrict != VINF_SUCCESS)
7086 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
7087 return VBOXSTRICTRC_VAL(rcStrict);;
7088}
7089
7090
7091/**
7092 * \#VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional \#VMEXIT.
7093 */
7094HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7095{
7096 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7097
7098 /*
7099 * If the instruction length is supplied by the CPU is 3 bytes, we can be certain that no
7100 * segment override prefix is present (and thus use the default segment DS). Otherwise, a
7101 * segment override prefix or other prefixes might be used, in which case we fallback to
7102 * IEMExecOne() to figure out.
7103 */
7104 VBOXSTRICTRC rcStrict;
7105 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7106 uint8_t const cbInstr = hmR0SvmSupportsNextRipSave(pVCpu) ? pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip : 0;
7107 if (cbInstr)
7108 {
7109 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7110 rcStrict = IEMExecDecodedMonitor(pVCpu, cbInstr);
7111 }
7112 else
7113 {
7114 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7115 rcStrict = IEMExecOne(pVCpu);
7116 }
7117
7118 if (rcStrict == VINF_IEM_RAISED_XCPT)
7119 {
7120 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7121 rcStrict = VINF_SUCCESS;
7122 }
7123 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7124 return rcStrict;
7125}
7126
7127
7128/**
7129 * \#VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional \#VMEXIT.
7130 */
7131HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7132{
7133 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7134
7135 VBOXSTRICTRC rcStrict;
7136 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7137 if (fSupportsNextRipSave)
7138 {
7139 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7140 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7141 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7142 rcStrict = IEMExecDecodedMwait(pVCpu, cbInstr);
7143 }
7144 else
7145 {
7146 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7147 rcStrict = IEMExecOne(pVCpu);
7148 }
7149
7150 if ( rcStrict == VINF_EM_HALT
7151 && EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7152 rcStrict = VINF_SUCCESS;
7153 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7154 {
7155 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7156 rcStrict = VINF_SUCCESS;
7157 }
7158 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7159 return rcStrict;
7160}
7161
7162
7163/**
7164 * \#VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN). Conditional
7165 * \#VMEXIT.
7166 */
7167HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7168{
7169 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7170 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7171 return VINF_EM_RESET;
7172}
7173
7174
7175/**
7176 * \#VMEXIT handler for unexpected exits. Conditional \#VMEXIT.
7177 */
7178HMSVM_EXIT_DECL hmR0SvmExitUnexpected(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7179{
7180 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7181 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7182 AssertMsgFailed(("hmR0SvmExitUnexpected: ExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pSvmTransient->u64ExitCode,
7183 pVmcb->ctrl.u64ExitInfo1, pVmcb->ctrl.u64ExitInfo2));
7184 RT_NOREF(pVmcb);
7185 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
7186 return VERR_SVM_UNEXPECTED_EXIT;
7187}
7188
7189
7190/**
7191 * \#VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional \#VMEXIT.
7192 */
7193HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7194{
7195 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7196
7197 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7198 Log4Func(("CS:RIP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
7199#ifdef VBOX_WITH_STATISTICS
7200 switch (pSvmTransient->u64ExitCode)
7201 {
7202 case SVM_EXIT_READ_CR0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
7203 case SVM_EXIT_READ_CR2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
7204 case SVM_EXIT_READ_CR3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
7205 case SVM_EXIT_READ_CR4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
7206 case SVM_EXIT_READ_CR8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
7207 }
7208#endif
7209
7210 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
7211 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7212 if ( fSupportsDecodeAssists
7213 && fSupportsNextRipSave)
7214 {
7215 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7216 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
7217 if (fMovCRx)
7218 {
7219 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR_MASK
7220 | CPUMCTX_EXTRN_APIC_TPR);
7221 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
7222 uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0;
7223 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
7224 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
7225 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7226 return VBOXSTRICTRC_VAL(rcStrict);
7227 }
7228 /* else: SMSW instruction, fall back below to IEM for this. */
7229 }
7230
7231 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7232 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7233 AssertMsg( rcStrict == VINF_SUCCESS
7234 || rcStrict == VINF_PGM_SYNC_CR3
7235 || rcStrict == VINF_IEM_RAISED_XCPT,
7236 ("hmR0SvmExitReadCRx: IEMExecOne failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7237 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15);
7238 if (rcStrict == VINF_IEM_RAISED_XCPT)
7239 {
7240 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7241 rcStrict = VINF_SUCCESS;
7242 }
7243 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7244 return rcStrict;
7245}
7246
7247
7248/**
7249 * \#VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional \#VMEXIT.
7250 */
7251HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7252{
7253 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7254
7255 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
7256 uint8_t const iCrReg = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0);
7257 Assert(iCrReg <= 15);
7258
7259 VBOXSTRICTRC rcStrict = VERR_SVM_IPE_5;
7260 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7261 bool fDecodedInstr = false;
7262 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
7263 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7264 if ( fSupportsDecodeAssists
7265 && fSupportsNextRipSave)
7266 {
7267 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7268 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
7269 if (fMovCRx)
7270 {
7271 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
7272 | CPUMCTX_EXTRN_APIC_TPR);
7273 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
7274 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
7275 Log4Func(("Mov CR%u w/ iGReg=%#x\n", iCrReg, iGReg));
7276 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
7277 fDecodedInstr = true;
7278 }
7279 /* else: LMSW or CLTS instruction, fall back below to IEM for this. */
7280 }
7281
7282 if (!fDecodedInstr)
7283 {
7284 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7285 Log4Func(("iCrReg=%#x\n", iCrReg));
7286 rcStrict = IEMExecOne(pVCpu);
7287 if (RT_UNLIKELY( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
7288 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED))
7289 rcStrict = VERR_EM_INTERPRETER;
7290 }
7291
7292 if (rcStrict == VINF_SUCCESS)
7293 {
7294 switch (iCrReg)
7295 {
7296 case 0:
7297 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
7298 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
7299 break;
7300
7301 case 2:
7302 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2);
7303 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
7304 break;
7305
7306 case 3:
7307 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR3);
7308 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
7309 break;
7310
7311 case 4:
7312 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
7313 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
7314 break;
7315
7316 case 8:
7317 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7318 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
7319 break;
7320
7321 default:
7322 {
7323 AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x\n",
7324 pSvmTransient->u64ExitCode, iCrReg));
7325 break;
7326 }
7327 }
7328 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7329 }
7330 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7331 {
7332 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7333 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7334 rcStrict = VINF_SUCCESS;
7335 }
7336 else
7337 Assert(rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_PGM_SYNC_CR3);
7338 return rcStrict;
7339}
7340
7341
7342/**
7343 * \#VMEXIT helper for read MSRs, see hmR0SvmExitMsr.
7344 *
7345 * @returns Strict VBox status code.
7346 * @param pVCpu The cross context virtual CPU structure.
7347 * @param pVmcb Pointer to the VM control block.
7348 */
7349static VBOXSTRICTRC hmR0SvmExitReadMsr(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
7350{
7351 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
7352 Log4Func(("idMsr=%#RX32\n", pVCpu->cpum.GstCtx.ecx));
7353
7354 VBOXSTRICTRC rcStrict;
7355 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7356 if (fSupportsNextRipSave)
7357 {
7358 /** @todo Optimize this: Only retrieve the MSR bits we need here. CPUMAllMsrs.cpp
7359 * can ask for what it needs instead of using CPUMCTX_EXTRN_ALL_MSRS. */
7360 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7361 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7362 rcStrict = IEMExecDecodedRdmsr(pVCpu, cbInstr);
7363 }
7364 else
7365 {
7366 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7367 rcStrict = IEMExecOne(pVCpu);
7368 }
7369
7370 AssertMsg( rcStrict == VINF_SUCCESS
7371 || rcStrict == VINF_IEM_RAISED_XCPT
7372 || rcStrict == VINF_CPUM_R3_MSR_READ,
7373 ("hmR0SvmExitReadMsr: Unexpected status %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7374
7375 if (rcStrict == VINF_IEM_RAISED_XCPT)
7376 {
7377 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7378 rcStrict = VINF_SUCCESS;
7379 }
7380 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7381 return rcStrict;
7382}
7383
7384
7385/**
7386 * \#VMEXIT helper for write MSRs, see hmR0SvmExitMsr.
7387 *
7388 * @returns Strict VBox status code.
7389 * @param pVCpu The cross context virtual CPU structure.
7390 * @param pVmcb Pointer to the VM control block.
7391 * @param pSvmTransient Pointer to the SVM-transient structure.
7392 */
7393static VBOXSTRICTRC hmR0SvmExitWriteMsr(PVMCPUCC pVCpu, PSVMVMCB pVmcb, PSVMTRANSIENT pSvmTransient)
7394{
7395 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7396 uint32_t const idMsr = pCtx->ecx;
7397 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7398 Log4Func(("idMsr=%#RX32\n", idMsr));
7399
7400 /*
7401 * Handle TPR patching MSR writes.
7402 * We utilitize the LSTAR MSR for patching.
7403 */
7404 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7405 if ( idMsr == MSR_K8_LSTAR
7406 && pVCpu->CTX_SUFF(pVM)->hm.s.fTprPatchingActive)
7407 {
7408 unsigned cbInstr;
7409 if (fSupportsNextRipSave)
7410 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7411 else
7412 {
7413 PDISSTATE pDis = &pVCpu->hmr0.s.svm.Dis;
7414 int rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbInstr);
7415 if ( rc == VINF_SUCCESS
7416 && pDis->pCurInstr->uOpcode == OP_WRMSR)
7417 Assert(cbInstr > 0);
7418 else
7419 cbInstr = 0;
7420 }
7421
7422 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */
7423 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr)
7424 {
7425 int rc = APICSetTpr(pVCpu, pCtx->eax & 0xff);
7426 AssertRCReturn(rc, rc);
7427 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7428 }
7429
7430 int rc = VINF_SUCCESS;
7431 hmR0SvmAdvanceRip(pVCpu, cbInstr);
7432 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
7433 return rc;
7434 }
7435
7436 /*
7437 * Handle regular MSR writes.
7438 */
7439 VBOXSTRICTRC rcStrict;
7440 if (fSupportsNextRipSave)
7441 {
7442 /** @todo Optimize this: We don't need to get much of the MSR state here
7443 * since we're only updating. CPUMAllMsrs.cpp can ask for what it needs and
7444 * clear the applicable extern flags. */
7445 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7446 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7447 rcStrict = IEMExecDecodedWrmsr(pVCpu, cbInstr);
7448 }
7449 else
7450 {
7451 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7452 rcStrict = IEMExecOne(pVCpu);
7453 }
7454
7455 AssertMsg( rcStrict == VINF_SUCCESS
7456 || rcStrict == VINF_IEM_RAISED_XCPT
7457 || rcStrict == VINF_CPUM_R3_MSR_WRITE,
7458 ("hmR0SvmExitWriteMsr: Unexpected status %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7459
7460 if (rcStrict == VINF_SUCCESS)
7461 {
7462 /* If this is an X2APIC WRMSR access, update the APIC TPR state. */
7463 if ( idMsr >= MSR_IA32_X2APIC_START
7464 && idMsr <= MSR_IA32_X2APIC_END)
7465 {
7466 /*
7467 * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest().
7468 * When full APIC register virtualization is implemented we'll have to make sure
7469 * APIC state is saved from the VMCB before IEM changes it.
7470 */
7471 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7472 }
7473 else
7474 {
7475 switch (idMsr)
7476 {
7477 case MSR_IA32_TSC: pSvmTransient->fUpdateTscOffsetting = true; break;
7478 case MSR_K6_EFER: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR); break;
7479 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
7480 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
7481 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7482 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7483 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7484 }
7485 }
7486 }
7487 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7488 {
7489 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7490 rcStrict = VINF_SUCCESS;
7491 }
7492 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7493 return rcStrict;
7494}
7495
7496
7497/**
7498 * \#VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional
7499 * \#VMEXIT.
7500 */
7501HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7502{
7503 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7504
7505 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7506 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ)
7507 return hmR0SvmExitReadMsr(pVCpu, pVmcb);
7508
7509 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE);
7510 return hmR0SvmExitWriteMsr(pVCpu, pVmcb, pSvmTransient);
7511}
7512
7513
7514/**
7515 * \#VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional \#VMEXIT.
7516 */
7517HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7518{
7519 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7520 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7521
7522 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
7523
7524 /** @todo Stepping with nested-guest. */
7525 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7526 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
7527 {
7528 /* We should -not- get this #VMEXIT if the guest's debug registers were active. */
7529 if (pSvmTransient->fWasGuestDebugStateActive)
7530 {
7531 AssertMsgFailed(("hmR0SvmExitReadDRx: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
7532 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
7533 return VERR_SVM_UNEXPECTED_EXIT;
7534 }
7535
7536 /*
7537 * Lazy DR0-3 loading.
7538 */
7539 if (!pSvmTransient->fWasHyperDebugStateActive)
7540 {
7541 Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
7542 Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
7543
7544 /* Don't intercept DRx read and writes. */
7545 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
7546 pVmcb->ctrl.u16InterceptRdDRx = 0;
7547 pVmcb->ctrl.u16InterceptWrDRx = 0;
7548 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
7549
7550 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
7551 VMMRZCallRing3Disable(pVCpu);
7552 HM_DISABLE_PREEMPT(pVCpu);
7553
7554 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
7555 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
7556 Assert(CPUMIsGuestDebugStateActive(pVCpu));
7557
7558 HM_RESTORE_PREEMPT();
7559 VMMRZCallRing3Enable(pVCpu);
7560
7561 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
7562 return VINF_SUCCESS;
7563 }
7564 }
7565
7566 /*
7567 * Interpret the read/writing of DRx.
7568 */
7569 /** @todo Decode assist. */
7570 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu);
7571 Log5(("hmR0SvmExitReadDRx: Emulated DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
7572 if (RT_LIKELY(rc == VINF_SUCCESS))
7573 {
7574 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
7575 /** @todo CPUM should set this flag! */
7576 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
7577 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
7578 }
7579 else
7580 Assert(rc == VERR_EM_INTERPRETER);
7581 return rc;
7582}
7583
7584
7585/**
7586 * \#VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional \#VMEXIT.
7587 */
7588HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7589{
7590 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7591 /* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
7592 VBOXSTRICTRC rc = hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
7593 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
7594 STAM_COUNTER_DEC(&pVCpu->hm.s.StatExitDRxRead);
7595 return rc;
7596}
7597
7598
7599/**
7600 * \#VMEXIT handler for XCRx write (SVM_EXIT_XSETBV). Conditional \#VMEXIT.
7601 */
7602HMSVM_EXIT_DECL hmR0SvmExitXsetbv(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7603{
7604 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7605 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7606
7607 /** @todo decode assists... */
7608 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7609 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7610 {
7611 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7612 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7613 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], fLoadSaveGuestXcr0, pCtx->cr4));
7614 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7615 {
7616 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7617 hmR0SvmUpdateVmRunFunction(pVCpu);
7618 }
7619 }
7620 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7621 {
7622 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7623 rcStrict = VINF_SUCCESS;
7624 }
7625 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7626 return rcStrict;
7627}
7628
7629
7630/**
7631 * \#VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional \#VMEXIT.
7632 */
7633HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7634{
7635 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7636 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK);
7637
7638 /* I/O operation lookup arrays. */
7639 static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
7640 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
7641 the result (in AL/AX/EAX). */
7642 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7643 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7644 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7645
7646 Log4Func(("CS:RIP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
7647
7648 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
7649 SVMIOIOEXITINFO IoExitInfo;
7650 IoExitInfo.u = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
7651 uint32_t uIOWidth = (IoExitInfo.u >> 4) & 0x7;
7652 uint32_t cbValue = s_aIOSize[uIOWidth];
7653 uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
7654
7655 if (RT_UNLIKELY(!cbValue))
7656 {
7657 AssertMsgFailed(("hmR0SvmExitIOInstr: Invalid IO operation. uIOWidth=%u\n", uIOWidth));
7658 return VERR_EM_INTERPRETER;
7659 }
7660
7661 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
7662 VBOXSTRICTRC rcStrict;
7663 PCEMEXITREC pExitRec = NULL;
7664 if ( !pVCpu->hm.s.fSingleInstruction
7665 && !pVCpu->cpum.GstCtx.eflags.Bits.u1TF)
7666 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7667 !IoExitInfo.n.u1Str
7668 ? IoExitInfo.n.u1Type == SVM_IOIO_READ
7669 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
7670 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
7671 : IoExitInfo.n.u1Type == SVM_IOIO_READ
7672 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
7673 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
7674 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7675 if (!pExitRec)
7676 {
7677 bool fUpdateRipAlready = false;
7678 if (IoExitInfo.n.u1Str)
7679 {
7680 /* INS/OUTS - I/O String instruction. */
7681 /** @todo Huh? why can't we use the segment prefix information given by AMD-V
7682 * in EXITINFO1? Investigate once this thing is up and running. */
7683 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, IoExitInfo.n.u16Port, cbValue,
7684 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? 'w' : 'r'));
7685 AssertReturn(pCtx->dx == IoExitInfo.n.u16Port, VERR_SVM_IPE_2);
7686 static IEMMODE const s_aenmAddrMode[8] =
7687 {
7688 (IEMMODE)-1, IEMMODE_16BIT, IEMMODE_32BIT, (IEMMODE)-1, IEMMODE_64BIT, (IEMMODE)-1, (IEMMODE)-1, (IEMMODE)-1
7689 };
7690 IEMMODE enmAddrMode = s_aenmAddrMode[(IoExitInfo.u >> 7) & 0x7];
7691 if (enmAddrMode != (IEMMODE)-1)
7692 {
7693 uint64_t cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip;
7694 if (cbInstr <= 15 && cbInstr >= 1)
7695 {
7696 Assert(cbInstr >= 1U + IoExitInfo.n.u1Rep);
7697 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
7698 {
7699 /* Don't know exactly how to detect whether u3Seg is valid, currently
7700 only enabling it for Bulldozer and later with NRIP. OS/2 broke on
7701 2384 Opterons when only checking NRIP. */
7702 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7703 if ( fSupportsNextRipSave
7704 && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First)
7705 {
7706 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_DS || cbInstr > 1U + IoExitInfo.n.u1Rep,
7707 ("u32Seg=%d cbInstr=%d u1REP=%d", IoExitInfo.n.u3Seg, cbInstr, IoExitInfo.n.u1Rep));
7708 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
7709 IoExitInfo.n.u3Seg, true /*fIoChecked*/);
7710 }
7711 else if (cbInstr == 1U + IoExitInfo.n.u1Rep)
7712 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
7713 X86_SREG_DS, true /*fIoChecked*/);
7714 else
7715 rcStrict = IEMExecOne(pVCpu);
7716 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
7717 }
7718 else
7719 {
7720 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_ES /*=0*/, ("%#x\n", IoExitInfo.n.u3Seg));
7721 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
7722 true /*fIoChecked*/);
7723 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
7724 }
7725 }
7726 else
7727 {
7728 AssertMsgFailed(("rip=%RX64 nrip=%#RX64 cbInstr=%#RX64\n", pCtx->rip, pVmcb->ctrl.u64ExitInfo2, cbInstr));
7729 rcStrict = IEMExecOne(pVCpu);
7730 }
7731 }
7732 else
7733 {
7734 AssertMsgFailed(("IoExitInfo=%RX64\n", IoExitInfo.u));
7735 rcStrict = IEMExecOne(pVCpu);
7736 }
7737 fUpdateRipAlready = true;
7738 if (rcStrict == VINF_IEM_RAISED_XCPT)
7739 {
7740 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7741 rcStrict = VINF_SUCCESS;
7742 }
7743 }
7744 else
7745 {
7746 /* IN/OUT - I/O instruction. */
7747 Assert(!IoExitInfo.n.u1Rep);
7748
7749 uint8_t const cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip;
7750 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
7751 {
7752 rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue);
7753 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
7754 && !pCtx->eflags.Bits.u1TF)
7755 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, IoExitInfo.n.u16Port, cbInstr, cbValue, pCtx->eax & uAndVal);
7756 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
7757 }
7758 else
7759 {
7760 uint32_t u32Val = 0;
7761 rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue);
7762 if (IOM_SUCCESS(rcStrict))
7763 {
7764 /* Save result of I/O IN instr. in AL/AX/EAX. */
7765 /** @todo r=bird: 32-bit op size should clear high bits of rax! */
7766 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
7767 }
7768 else if ( rcStrict == VINF_IOM_R3_IOPORT_READ
7769 && !pCtx->eflags.Bits.u1TF)
7770 rcStrict = EMRZSetPendingIoPortRead(pVCpu, IoExitInfo.n.u16Port, cbInstr, cbValue);
7771
7772 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
7773 }
7774 }
7775
7776 if (IOM_SUCCESS(rcStrict))
7777 {
7778 /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */
7779 if (!fUpdateRipAlready)
7780 pCtx->rip = pVmcb->ctrl.u64ExitInfo2;
7781
7782 /*
7783 * If any I/O breakpoints are armed, we need to check if one triggered
7784 * and take appropriate action.
7785 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
7786 */
7787 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
7788 * execution engines about whether hyper BPs and such are pending. */
7789 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);
7790 uint32_t const uDr7 = pCtx->dr[7];
7791 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
7792 && X86_DR7_ANY_RW_IO(uDr7)
7793 && (pCtx->cr4 & X86_CR4_DE))
7794 || DBGFBpIsHwIoArmed(pVM)))
7795 {
7796 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
7797 VMMRZCallRing3Disable(pVCpu);
7798 HM_DISABLE_PREEMPT(pVCpu);
7799
7800 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
7801 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/);
7802
7803 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, &pVCpu->cpum.GstCtx, IoExitInfo.n.u16Port, cbValue);
7804 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
7805 {
7806 /* Raise #DB. */
7807 pVmcb->guest.u64DR6 = pCtx->dr[6];
7808 pVmcb->guest.u64DR7 = pCtx->dr[7];
7809 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
7810 hmR0SvmSetPendingXcptDB(pVCpu);
7811 }
7812 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
7813 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
7814 else if ( rcStrict2 != VINF_SUCCESS
7815 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
7816 rcStrict = rcStrict2;
7817 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
7818
7819 HM_RESTORE_PREEMPT();
7820 VMMRZCallRing3Enable(pVCpu);
7821 }
7822
7823 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7824 }
7825#ifdef VBOX_STRICT
7826 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
7827 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
7828 Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ);
7829 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
7830 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
7831 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
7832 Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE);
7833 else
7834 {
7835 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
7836 * statuses, that the VMM device and some others may return. See
7837 * IOM_SUCCESS() for guidance. */
7838 AssertMsg( RT_FAILURE(rcStrict)
7839 || rcStrict == VINF_SUCCESS
7840 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
7841 || rcStrict == VINF_EM_DBG_BREAKPOINT
7842 || rcStrict == VINF_EM_RAW_GUEST_TRAP
7843 || rcStrict == VINF_EM_DBG_STEPPED
7844 || rcStrict == VINF_EM_RAW_TO_R3
7845 || rcStrict == VINF_EM_TRIPLE_FAULT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7846 }
7847#endif
7848 }
7849 else
7850 {
7851 /*
7852 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7853 */
7854 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7855 STAM_COUNTER_INC(!IoExitInfo.n.u1Str
7856 ? IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
7857 : IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
7858 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
7859 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IoExitInfo.n.u1Rep ? "REP " : "",
7860 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? "OUT" : "IN", IoExitInfo.n.u1Str ? "S" : "", IoExitInfo.n.u16Port, uIOWidth));
7861
7862 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7863 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
7864
7865 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7866 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7867 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7868 }
7869 return rcStrict;
7870}
7871
7872
7873/**
7874 * \#VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional \#VMEXIT.
7875 */
7876HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7877{
7878 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7879 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7880 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7881
7882 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7883 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7884 Assert(pVM->hmr0.s.fNestedPaging);
7885
7886 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
7887 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7888 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
7889 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1; /* Note! High bits in EXITINFO1 may contain additional info and are
7890 thus intentionally not copied into u32ErrCode. */
7891
7892 Log4Func(("#NPF at CS:RIP=%04x:%RX64 GCPhysFaultAddr=%RGp ErrCode=%#x cbInstrFetched=%u %.15Rhxs\n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr,
7893 u32ErrCode, pVmcb->ctrl.cbInstrFetched, pVmcb->ctrl.abInstr));
7894
7895 /*
7896 * TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions.
7897 */
7898 if ( pVM->hm.s.fTprPatchingAllowed
7899 && (GCPhysFaultAddr & GUEST_PAGE_OFFSET_MASK) == XAPIC_OFF_TPR
7900 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */
7901 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */
7902 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
7903 && !CPUMIsGuestInLongModeEx(pCtx)
7904 && !CPUMGetGuestCPL(pVCpu)
7905 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
7906 {
7907 RTGCPHYS GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
7908 GCPhysApicBase &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7909
7910 if (GCPhysFaultAddr == GCPhysApicBase + XAPIC_OFF_TPR)
7911 {
7912 /* Only attempt to patch the instruction once. */
7913 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
7914 if (!pPatch)
7915 return VINF_EM_HM_PATCH_TPR_INSTR;
7916 }
7917 }
7918
7919 /*
7920 * Determine the nested paging mode.
7921 */
7922/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
7923 PGMMODE const enmNestedPagingMode = PGMGetHostMode(pVM);
7924
7925 /*
7926 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
7927 */
7928 Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
7929 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
7930 {
7931 /*
7932 * If event delivery causes an MMIO #NPF, go back to instruction emulation as otherwise
7933 * injecting the original pending event would most likely cause the same MMIO #NPF.
7934 */
7935 if (pVCpu->hm.s.Event.fPending)
7936 {
7937 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
7938 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7939 }
7940
7941 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
7942 VBOXSTRICTRC rcStrict;
7943 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7944 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
7945 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7946 if (!pExitRec)
7947 {
7948
7949 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, pCtx, GCPhysFaultAddr, u32ErrCode);
7950
7951 /*
7952 * If we succeed, resume guest execution.
7953 *
7954 * If we fail in interpreting the instruction because we couldn't get the guest
7955 * physical address of the page containing the instruction via the guest's page
7956 * tables (we would invalidate the guest page in the host TLB), resume execution
7957 * which would cause a guest page fault to let the guest handle this weird case.
7958 *
7959 * See @bugref{6043}.
7960 */
7961 if ( rcStrict == VINF_SUCCESS
7962 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
7963 || rcStrict == VERR_PAGE_NOT_PRESENT)
7964 {
7965 /* Successfully handled MMIO operation. */
7966 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7967 rcStrict = VINF_SUCCESS;
7968 }
7969 }
7970 else
7971 {
7972 /*
7973 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7974 */
7975 Assert(pCtx == &pVCpu->cpum.GstCtx);
7976 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7977 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
7978 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysFaultAddr));
7979
7980 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7981 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
7982
7983 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7984 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7985 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7986 }
7987 return rcStrict;
7988 }
7989
7990 /*
7991 * Nested page-fault.
7992 */
7993 TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode);
7994 int rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, pCtx, GCPhysFaultAddr);
7995 TRPMResetTrap(pVCpu);
7996
7997 Log4Func(("#NPF: PGMR0Trap0eHandlerNestedPaging returns %Rrc CS:RIP=%04x:%RX64\n", rc, pCtx->cs.Sel, pCtx->rip));
7998
7999 /*
8000 * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
8001 */
8002 if ( rc == VINF_SUCCESS
8003 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8004 || rc == VERR_PAGE_NOT_PRESENT)
8005 {
8006 /* We've successfully synced our shadow page tables. */
8007 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
8008 rc = VINF_SUCCESS;
8009 }
8010
8011 /*
8012 * If delivering an event causes an #NPF (and not MMIO), we shall resolve the fault and
8013 * re-inject the original event.
8014 */
8015 if (pVCpu->hm.s.Event.fPending)
8016 {
8017 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflectNPF);
8018
8019 /*
8020 * If the #NPF handler requested emulation of the instruction, ignore it.
8021 * We need to re-inject the original event so as to not lose it.
8022 * Reproducible when booting ReactOS 0.4.12 with BTRFS (installed using BootCD,
8023 * LiveCD is broken for other reasons).
8024 */
8025 if (rc == VINF_EM_RAW_EMULATE_INSTR)
8026 rc = VINF_EM_RAW_INJECT_TRPM_EVENT;
8027 }
8028
8029 return rc;
8030}
8031
8032
8033/**
8034 * \#VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional
8035 * \#VMEXIT.
8036 */
8037HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8038{
8039 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8040 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
8041
8042 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
8043 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8044 hmR0SvmClearIntWindowExiting(pVCpu, pVmcb);
8045
8046 /* Deliver the pending interrupt via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
8047 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
8048 return VINF_SUCCESS;
8049}
8050
8051
8052/**
8053 * \#VMEXIT handler for task switches (SVM_EXIT_TASK_SWITCH). Conditional
8054 * \#VMEXIT.
8055 */
8056HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8057{
8058 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8059 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8060
8061#ifndef HMSVM_ALWAYS_TRAP_TASK_SWITCH
8062 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8063#endif
8064
8065 /* Check if this task-switch occurred while delivering an event through the guest IDT. */
8066 if (pVCpu->hm.s.Event.fPending) /* Can happen with exceptions/NMI. See @bugref{8411}. */
8067 {
8068 /*
8069 * AMD-V provides us with the exception which caused the TS; we collect
8070 * the information in the call to hmR0SvmCheckExitDueToEventDelivery().
8071 */
8072 Log4Func(("TS occurred during event delivery\n"));
8073 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8074 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8075 }
8076
8077 /** @todo Emulate task switch someday, currently just going back to ring-3 for
8078 * emulation. */
8079 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8080 return VERR_EM_INTERPRETER;
8081}
8082
8083
8084/**
8085 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
8086 */
8087HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8088{
8089 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8090 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8091
8092 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8093 if (pVM->hm.s.fTprPatchingAllowed)
8094 {
8095 int rc = hmEmulateSvmMovTpr(pVM, pVCpu);
8096 if (rc != VERR_NOT_FOUND)
8097 {
8098 Log4Func(("hmEmulateSvmMovTpr returns %Rrc\n", rc));
8099 return rc;
8100 }
8101 }
8102
8103 if (EMAreHypercallInstructionsEnabled(pVCpu))
8104 {
8105 unsigned cbInstr;
8106 if (hmR0SvmSupportsNextRipSave(pVCpu))
8107 {
8108 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8109 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8110 }
8111 else
8112 {
8113 PDISSTATE pDis = &pVCpu->hmr0.s.svm.Dis;
8114 int rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbInstr);
8115 if ( rc == VINF_SUCCESS
8116 && pDis->pCurInstr->uOpcode == OP_VMMCALL)
8117 Assert(cbInstr > 0);
8118 else
8119 cbInstr = 0;
8120 }
8121
8122 VBOXSTRICTRC rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8123 if (RT_SUCCESS(rcStrict))
8124 {
8125 /* Only update the RIP if we're continuing guest execution and not in the case
8126 of say VINF_GIM_R3_HYPERCALL. */
8127 if (rcStrict == VINF_SUCCESS)
8128 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8129
8130 return VBOXSTRICTRC_VAL(rcStrict);
8131 }
8132 else
8133 Log4Func(("GIMHypercall returns %Rrc -> #UD\n", VBOXSTRICTRC_VAL(rcStrict)));
8134 }
8135
8136 hmR0SvmSetPendingXcptUD(pVCpu);
8137 return VINF_SUCCESS;
8138}
8139
8140
8141/**
8142 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
8143 */
8144HMSVM_EXIT_DECL hmR0SvmExitPause(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8145{
8146 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8147
8148 unsigned cbInstr;
8149 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8150 if (fSupportsNextRipSave)
8151 {
8152 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8153 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8154 }
8155 else
8156 {
8157 PDISSTATE pDis = &pVCpu->hmr0.s.svm.Dis;
8158 int rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbInstr);
8159 if ( rc == VINF_SUCCESS
8160 && pDis->pCurInstr->uOpcode == OP_PAUSE)
8161 Assert(cbInstr > 0);
8162 else
8163 cbInstr = 0;
8164 }
8165
8166 /** @todo The guest has likely hit a contended spinlock. We might want to
8167 * poke a schedule different guest VCPU. */
8168 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8169 return VINF_EM_RAW_INTERRUPT;
8170}
8171
8172
8173/**
8174 * \#VMEXIT handler for FERR intercept (SVM_EXIT_FERR_FREEZE). Conditional
8175 * \#VMEXIT.
8176 */
8177HMSVM_EXIT_DECL hmR0SvmExitFerrFreeze(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8178{
8179 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8180 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);
8181 Assert(!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE));
8182
8183 Log4Func(("Raising IRQ 13 in response to #FERR\n"));
8184 return PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
8185}
8186
8187
8188/**
8189 * \#VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional \#VMEXIT.
8190 */
8191HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8192{
8193 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8194
8195 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now (almost) ready. */
8196 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8197 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_IRET);
8198
8199 /* Emulate the IRET. We have to execute the IRET before an NMI, but must potentially
8200 * deliver a pending NMI right after. If the IRET faults, an NMI can come before the
8201 * handler executes. Yes, x86 is ugly.
8202 */
8203 return VINF_EM_RAW_EMULATE_INSTR;
8204}
8205
8206
8207/**
8208 * \#VMEXIT handler for page-fault exceptions (SVM_EXIT_XCPT_14).
8209 * Conditional \#VMEXIT.
8210 */
8211HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8212{
8213 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8214 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8215 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8216
8217 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
8218 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8219 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8220 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8221 uint32_t uErrCode = pVmcb->ctrl.u64ExitInfo1;
8222 uint64_t const uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
8223
8224#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(HMSVM_ALWAYS_TRAP_PF)
8225 if (pVM->hmr0.s.fNestedPaging)
8226 {
8227 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
8228 if ( !pSvmTransient->fVectoringDoublePF
8229 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
8230 {
8231 /* A genuine guest #PF, reflect it to the guest. */
8232 hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress);
8233 Log4Func(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RX64 ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip,
8234 uFaultAddress, uErrCode));
8235 }
8236 else
8237 {
8238 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8239 hmR0SvmSetPendingXcptDF(pVCpu);
8240 Log4Func(("Pending #DF due to vectoring #PF. NP\n"));
8241 }
8242 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8243 return VINF_SUCCESS;
8244 }
8245#endif
8246
8247 Assert(!pVM->hmr0.s.fNestedPaging);
8248
8249 /*
8250 * TPR patching shortcut for APIC TPR reads and writes; only applicable to 32-bit guests.
8251 */
8252 if ( pVM->hm.s.fTprPatchingAllowed
8253 && (uFaultAddress & 0xfff) == XAPIC_OFF_TPR
8254 && !(uErrCode & X86_TRAP_PF_P) /* Not present. */
8255 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
8256 && !CPUMIsGuestInLongModeEx(pCtx)
8257 && !CPUMGetGuestCPL(pVCpu)
8258 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
8259 {
8260 RTGCPHYS GCPhysApicBase;
8261 GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
8262 GCPhysApicBase &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8263
8264 /* Check if the page at the fault-address is the APIC base. */
8265 PGMPTWALK Walk;
8266 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, &Walk);
8267 if ( rc2 == VINF_SUCCESS
8268 && Walk.GCPhys == GCPhysApicBase)
8269 {
8270 /* Only attempt to patch the instruction once. */
8271 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
8272 if (!pPatch)
8273 return VINF_EM_HM_PATCH_TPR_INSTR;
8274 }
8275 }
8276
8277 Log4Func(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
8278 pCtx->rip, uErrCode, pCtx->cr3));
8279
8280 /*
8281 * If it's a vectoring #PF, emulate injecting the original event injection as
8282 * PGMTrap0eHandler() is incapable of differentiating between instruction emulation and
8283 * event injection that caused a #PF. See @bugref{6607}.
8284 */
8285 if (pSvmTransient->fVectoringPF)
8286 {
8287 Assert(pVCpu->hm.s.Event.fPending);
8288 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8289 }
8290
8291 TRPMAssertXcptPF(pVCpu, uFaultAddress, uErrCode);
8292 int rc = PGMTrap0eHandler(pVCpu, uErrCode, pCtx, (RTGCPTR)uFaultAddress);
8293
8294 Log4Func(("#PF: rc=%Rrc\n", rc));
8295
8296 if (rc == VINF_SUCCESS)
8297 {
8298 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
8299 TRPMResetTrap(pVCpu);
8300 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
8301 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
8302 return rc;
8303 }
8304
8305 if (rc == VINF_EM_RAW_GUEST_TRAP)
8306 {
8307 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
8308
8309 /*
8310 * If a nested-guest delivers a #PF and that causes a #PF which is -not- a shadow #PF,
8311 * we should simply forward the #PF to the guest and is up to the nested-hypervisor to
8312 * determine whether it is a nested-shadow #PF or a #DF, see @bugref{7243#c121}.
8313 */
8314 if ( !pSvmTransient->fVectoringDoublePF
8315 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
8316 {
8317 /* It's a guest (or nested-guest) page fault and needs to be reflected. */
8318 uErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */
8319 TRPMResetTrap(pVCpu);
8320
8321#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
8322 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
8323 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
8324 && CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
8325 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_XCPT_PF, uErrCode, uFaultAddress);
8326#endif
8327
8328 hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress);
8329 }
8330 else
8331 {
8332 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8333 TRPMResetTrap(pVCpu);
8334 hmR0SvmSetPendingXcptDF(pVCpu);
8335 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
8336 }
8337
8338 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8339 return VINF_SUCCESS;
8340 }
8341
8342 TRPMResetTrap(pVCpu);
8343 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
8344 return rc;
8345}
8346
8347
8348
8349/**
8350 * \#VMEXIT handler for division overflow exceptions (SVM_EXIT_XCPT_1).
8351 * Conditional \#VMEXIT.
8352 */
8353HMSVM_EXIT_DECL hmR0SvmExitXcptDE(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8354{
8355 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8356 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
8357 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
8358
8359 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
8360 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8361 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
8362
8363 int rc = VERR_SVM_UNEXPECTED_XCPT_EXIT;
8364 if (pVCpu->hm.s.fGCMTrapXcptDE)
8365 {
8366 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8367 uint8_t cbInstr = 0;
8368 VBOXSTRICTRC rcStrict = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
8369 if (rcStrict == VINF_SUCCESS)
8370 rc = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
8371 else if (rcStrict == VERR_NOT_FOUND)
8372 rc = VERR_NOT_FOUND; /* Deliver the exception. */
8373 else
8374 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
8375 }
8376
8377 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
8378 if (RT_FAILURE(rc))
8379 {
8380 hmR0SvmSetPendingXcptDE(pVCpu);
8381 rc = VINF_SUCCESS;
8382 }
8383
8384 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
8385 return rc;
8386}
8387
8388
8389/**
8390 * \#VMEXIT handler for undefined opcode (SVM_EXIT_XCPT_6).
8391 * Conditional \#VMEXIT.
8392 */
8393HMSVM_EXIT_DECL hmR0SvmExitXcptUD(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8394{
8395 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8396 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
8397 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8398
8399 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
8400 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
8401 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
8402
8403 /** @todo if we accumulate more optional stuff here, we ought to combine the
8404 * reading of opcode bytes to avoid doing more than once. */
8405
8406 VBOXSTRICTRC rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
8407 if (pVCpu->hm.s.fGIMTrapXcptUD)
8408 {
8409 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8410 uint8_t cbInstr = 0;
8411 rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
8412 if (rcStrict == VINF_SUCCESS)
8413 {
8414 /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */
8415 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8416 rcStrict = VINF_SUCCESS;
8417 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
8418 }
8419 else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
8420 rcStrict = VINF_SUCCESS;
8421 else if (rcStrict == VINF_GIM_R3_HYPERCALL)
8422 rcStrict = VINF_GIM_R3_HYPERCALL;
8423 else
8424 {
8425 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
8426 rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
8427 }
8428 }
8429
8430 if (pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
8431 {
8432 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
8433 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
8434 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
8435 {
8436 /* Ideally, IEM should just handle all these special #UD situations, but
8437 we don't quite trust things to behave optimially when doing that. So,
8438 for now we'll restrict ourselves to a handful of possible sysenter and
8439 sysexit encodings that we filter right here. */
8440 uint8_t abInstr[SVM_CTRL_GUEST_INSTR_BYTES_MAX];
8441 uint8_t cbInstr = pVmcb->ctrl.cbInstrFetched;
8442 uint32_t const uCpl = CPUMGetGuestCPL(pVCpu);
8443 uint8_t const cbMin = uCpl != 0 ? 2 : 1 + 2;
8444 RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
8445 if (cbInstr < cbMin || cbInstr > SVM_CTRL_GUEST_INSTR_BYTES_MAX)
8446 {
8447 cbInstr = cbMin;
8448 int rc2 = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, GCPtrInstr, cbInstr);
8449 AssertRCStmt(rc2, cbInstr = 0);
8450 }
8451 else
8452 memcpy(abInstr, pVmcb->ctrl.abInstr, cbInstr); /* unlikely */
8453 if ( cbInstr == 0 /* read error */
8454 || (cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x34) /* sysenter */
8455 || ( uCpl == 0
8456 && ( ( cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x35) /* sysexit */
8457 || ( cbInstr >= 3 && abInstr[1] == 0x0f && abInstr[2] == 0x35 /* rex.w sysexit */
8458 && (abInstr[0] & (X86_OP_REX_W | 0xf0)) == X86_OP_REX_W))))
8459 {
8460 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK
8461 | CPUMCTX_EXTRN_SREG_MASK /* without ES+DS+GS the app will #GP later - go figure */);
8462 Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
8463 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, GCPtrInstr, abInstr, cbInstr);
8464 Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: rcStrict=%Rrc %04x:%08RX64 %08RX64 %04x:%08RX64\n",
8465 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u,
8466 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp));
8467 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8468 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); /** @todo Lazy bird. */
8469 if (rcStrict == VINF_IEM_RAISED_XCPT)
8470 rcStrict = VINF_SUCCESS;
8471 return rcStrict;
8472 }
8473 Log6(("hmR0SvmExitXcptUD: not sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
8474 }
8475 else
8476 Log6(("hmR0SvmExitXcptUD: not in long mode at %04x:%llx\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8477 }
8478
8479 /* If the GIM #UD exception handler didn't succeed for some reason or wasn't needed, raise #UD. */
8480 if (RT_FAILURE(rcStrict))
8481 {
8482 hmR0SvmSetPendingXcptUD(pVCpu);
8483 rcStrict = VINF_SUCCESS;
8484 }
8485
8486 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8487 return rcStrict;
8488}
8489
8490
8491/**
8492 * \#VMEXIT handler for math-fault exceptions (SVM_EXIT_XCPT_16).
8493 * Conditional \#VMEXIT.
8494 */
8495HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8496{
8497 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8498 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8499 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8500
8501 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8502 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8503
8504 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
8505 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
8506
8507 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8508
8509 if (!(pCtx->cr0 & X86_CR0_NE))
8510 {
8511 PDISSTATE pDis = &pVCpu->hmr0.s.svm.Dis;
8512 unsigned cbInstr;
8513 int rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbInstr);
8514 if (RT_SUCCESS(rc))
8515 {
8516 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
8517 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
8518 if (RT_SUCCESS(rc))
8519 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8520 }
8521 else
8522 Log4Func(("EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
8523 return rc;
8524 }
8525
8526 hmR0SvmSetPendingXcptMF(pVCpu);
8527 return VINF_SUCCESS;
8528}
8529
8530
8531/**
8532 * \#VMEXIT handler for debug exceptions (SVM_EXIT_XCPT_1). Conditional
8533 * \#VMEXIT.
8534 */
8535HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8536{
8537 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8538 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8539 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8540 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8541
8542 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
8543 {
8544 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
8545 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8546 }
8547
8548 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8549
8550 /*
8551 * This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data
8552 * breakpoint). However, for both cases DR6 and DR7 are updated to what the exception
8553 * handler expects. See AMD spec. 15.12.2 "#DB (Debug)".
8554 */
8555 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8556 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
8557 int rc = DBGFTrap01Handler(pVM, pVCpu, &pVCpu->cpum.GstCtx, pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
8558 if (rc == VINF_EM_RAW_GUEST_TRAP)
8559 {
8560 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
8561 if (CPUMIsHyperDebugStateActive(pVCpu))
8562 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
8563
8564 /* Reflect the exception back to the guest. */
8565 hmR0SvmSetPendingXcptDB(pVCpu);
8566 rc = VINF_SUCCESS;
8567 }
8568
8569 /*
8570 * Update DR6.
8571 */
8572 if (CPUMIsHyperDebugStateActive(pVCpu))
8573 {
8574 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
8575 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
8576 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
8577 }
8578 else
8579 {
8580 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
8581 Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu));
8582 }
8583
8584 return rc;
8585}
8586
8587
8588/**
8589 * \#VMEXIT handler for alignment check exceptions (SVM_EXIT_XCPT_17).
8590 * Conditional \#VMEXIT.
8591 */
8592HMSVM_EXIT_DECL hmR0SvmExitXcptAC(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8593{
8594 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8595 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8596 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC);
8597
8598 SVMEVENT Event;
8599 Event.u = 0;
8600 Event.n.u1Valid = 1;
8601 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8602 Event.n.u8Vector = X86_XCPT_AC;
8603 Event.n.u1ErrorCodeValid = 1;
8604 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8605 return VINF_SUCCESS;
8606}
8607
8608
8609/**
8610 * \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_XCPT_3).
8611 * Conditional \#VMEXIT.
8612 */
8613HMSVM_EXIT_DECL hmR0SvmExitXcptBP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8614{
8615 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8616 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8617 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8618 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
8619
8620 VBOXSTRICTRC rc = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
8621 if (rc == VINF_EM_RAW_GUEST_TRAP)
8622 {
8623 SVMEVENT Event;
8624 Event.u = 0;
8625 Event.n.u1Valid = 1;
8626 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8627 Event.n.u8Vector = X86_XCPT_BP;
8628 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8629 rc = VINF_SUCCESS;
8630 }
8631
8632 Assert(rc == VINF_SUCCESS || rc == VINF_EM_DBG_BREAKPOINT);
8633 return rc;
8634}
8635
8636
8637/**
8638 * Hacks its way around the lovely mesa driver's backdoor accesses.
8639 *
8640 * @sa hmR0VmxHandleMesaDrvGp
8641 */
8642static int hmR0SvmHandleMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb)
8643{
8644 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK);
8645 Log(("hmR0SvmHandleMesaDrvGp: at %04x:%08RX64 rcx=%RX64 rbx=%RX64\n",
8646 pVmcb->guest.CS.u16Sel, pVmcb->guest.u64RIP, pCtx->rcx, pCtx->rbx));
8647 RT_NOREF(pCtx, pVmcb);
8648
8649 /* For now we'll just skip the instruction. */
8650 hmR0SvmAdvanceRip(pVCpu, 1);
8651 return VINF_SUCCESS;
8652}
8653
8654
8655/**
8656 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
8657 * backdoor logging w/o checking what it is running inside.
8658 *
8659 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
8660 * backdoor port and magic numbers loaded in registers.
8661 *
8662 * @returns true if it is, false if it isn't.
8663 * @sa hmR0VmxIsMesaDrvGp
8664 */
8665DECLINLINE(bool) hmR0SvmIsMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb)
8666{
8667 /* Check magic and port. */
8668 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
8669 /*Log8(("hmR0SvmIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->fExtrn & CPUMCTX_EXTRN_RAX ? pVmcb->guest.u64RAX : pCtx->rax, pCtx->rdx));*/
8670 if (pCtx->dx != UINT32_C(0x5658))
8671 return false;
8672 if ((pCtx->fExtrn & CPUMCTX_EXTRN_RAX ? pVmcb->guest.u64RAX : pCtx->rax) != UINT32_C(0x564d5868))
8673 return false;
8674
8675 /* Check that it is #GP(0). */
8676 if (pVmcb->ctrl.u64ExitInfo1 != 0)
8677 return false;
8678
8679 /* Flat ring-3 CS. */
8680 /*Log8(("hmR0SvmIsMesaDrvGp: u8CPL=%d base=%RX64\n", pVmcb->guest.u8CPL, pCtx->fExtrn & CPUMCTX_EXTRN_CS ? pVmcb->guest.CS.u64Base : pCtx->cs.u64Base));*/
8681 if (pVmcb->guest.u8CPL != 3)
8682 return false;
8683 if ((pCtx->fExtrn & CPUMCTX_EXTRN_CS ? pVmcb->guest.CS.u64Base : pCtx->cs.u64Base) != 0)
8684 return false;
8685
8686 /* 0xed: IN eAX,dx */
8687 if (pVmcb->ctrl.cbInstrFetched < 1) /* unlikely, it turns out. */
8688 {
8689 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_GPRS_MASK
8690 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
8691 uint8_t abInstr[1];
8692 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
8693 /*Log8(("hmR0SvmIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0])); */
8694 if (RT_FAILURE(rc))
8695 return false;
8696 if (abInstr[0] != 0xed)
8697 return false;
8698 }
8699 else
8700 {
8701 /*Log8(("hmR0SvmIsMesaDrvGp: %#x\n", pVmcb->ctrl.abInstr));*/
8702 if (pVmcb->ctrl.abInstr[0] != 0xed)
8703 return false;
8704 }
8705 return true;
8706}
8707
8708
8709/**
8710 * \#VMEXIT handler for general protection faults (SVM_EXIT_XCPT_BP).
8711 * Conditional \#VMEXIT.
8712 */
8713HMSVM_EXIT_DECL hmR0SvmExitXcptGP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8714{
8715 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8716 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8717 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
8718
8719 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8720 Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode);
8721
8722 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8723 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
8724 || !hmR0SvmIsMesaDrvGp(pVCpu, pCtx, pVmcb))
8725 {
8726 SVMEVENT Event;
8727 Event.u = 0;
8728 Event.n.u1Valid = 1;
8729 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8730 Event.n.u8Vector = X86_XCPT_GP;
8731 Event.n.u1ErrorCodeValid = 1;
8732 Event.n.u32ErrorCode = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
8733 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8734 return VINF_SUCCESS;
8735 }
8736 return hmR0SvmHandleMesaDrvGp(pVCpu, pCtx, pVmcb);
8737}
8738
8739
8740/**
8741 * \#VMEXIT handler for generic exceptions. Conditional \#VMEXIT.
8742 */
8743HMSVM_EXIT_DECL hmR0SvmExitXcptGeneric(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8744{
8745 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8746 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8747
8748 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8749 uint8_t const uVector = pVmcb->ctrl.u64ExitCode - SVM_EXIT_XCPT_0;
8750 uint32_t const uErrCode = pVmcb->ctrl.u64ExitInfo1;
8751 Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode);
8752 Assert(uVector <= X86_XCPT_LAST);
8753 Log4Func(("uVector=%#x uErrCode=%u\n", uVector, uErrCode));
8754
8755 SVMEVENT Event;
8756 Event.u = 0;
8757 Event.n.u1Valid = 1;
8758 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8759 Event.n.u8Vector = uVector;
8760 switch (uVector)
8761 {
8762 /* Shouldn't be here for reflecting #PFs (among other things, the fault address isn't passed along). */
8763 case X86_XCPT_PF: AssertMsgFailed(("hmR0SvmExitXcptGeneric: Unexpected exception")); return VERR_SVM_IPE_5;
8764 case X86_XCPT_DF:
8765 case X86_XCPT_TS:
8766 case X86_XCPT_NP:
8767 case X86_XCPT_SS:
8768 case X86_XCPT_GP:
8769 case X86_XCPT_AC:
8770 {
8771 Event.n.u1ErrorCodeValid = 1;
8772 Event.n.u32ErrorCode = uErrCode;
8773 break;
8774 }
8775 }
8776
8777#ifdef VBOX_WITH_STATISTICS
8778 switch (uVector)
8779 {
8780 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); break;
8781 case X86_XCPT_DB: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); break;
8782 case X86_XCPT_BP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); break;
8783 case X86_XCPT_OF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF); break;
8784 case X86_XCPT_BR: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBR); break;
8785 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); break;
8786 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF); break;
8787 case X86_XCPT_DF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDF); break;
8788 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS); break;
8789 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); break;
8790 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); break;
8791 case X86_XCPT_GP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); break;
8792 case X86_XCPT_PF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); break;
8793 case X86_XCPT_MF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); break;
8794 case X86_XCPT_AC: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC); break;
8795 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); break;
8796 default:
8797 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
8798 break;
8799 }
8800#endif
8801
8802 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8803 return VINF_SUCCESS;
8804}
8805
8806
8807/**
8808 * \#VMEXIT handler for software interrupt (INTn). Conditional \#VMEXIT (debug).
8809 */
8810HMSVM_EXIT_DECL hmR0SvmExitSwInt(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8811{
8812 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8813 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8814
8815 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8816 SVMEVENT Event;
8817 Event.u = 0;
8818 Event.n.u1Valid = 1;
8819 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
8820 Event.n.u8Vector = pVmcb->ctrl.u64ExitInfo1 & 0xff;
8821 Log4Func(("uVector=%#x\n", Event.n.u8Vector));
8822 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8823 return VINF_SUCCESS;
8824}
8825
8826
8827/**
8828 * Generic exit handler that interprets the current instruction
8829 *
8830 * Useful exit that only gets triggered by dtrace and the debugger. Caller does
8831 * the exit logging, and this function does the rest.
8832 */
8833static VBOXSTRICTRC hmR0SvmExitInterpretInstruction(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient,
8834 uint64_t fExtraImport, uint64_t fHmChanged)
8835{
8836#if 1
8837 RT_NOREF(pSvmTransient);
8838 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fExtraImport);
8839 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
8840 if (rcStrict == VINF_SUCCESS)
8841 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fHmChanged | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RIP);
8842 else
8843 {
8844 Log4Func(("IEMExecOne -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
8845 if (rcStrict == VINF_IEM_RAISED_XCPT)
8846 {
8847 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK | fHmChanged);
8848 rcStrict = VINF_SUCCESS;
8849 }
8850 else
8851 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fHmChanged);
8852 }
8853 return rcStrict;
8854#else
8855 RT_NOREF(pVCpu, pSvmTransient, fExtraImport, fHmChanged);
8856 return VINF_EM_RAW_EMULATE_INSTR;
8857#endif
8858}
8859
8860
8861/**
8862 * \#VMEXIT handler for STR. Conditional \#VMEXIT (debug).
8863 */
8864HMSVM_EXIT_DECL hmR0SvmExitTrRead(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8865{
8866 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8867 Log4Func(("%04x:%08RX64\n", pSvmTransient->pVmcb->guest.CS.u16Sel, pSvmTransient->pVmcb->guest.u64RIP));
8868 return hmR0SvmExitInterpretInstruction(pVCpu, pSvmTransient, CPUMCTX_EXTRN_TR, 0);
8869}
8870
8871
8872/**
8873 * \#VMEXIT handler for LTR. Conditional \#VMEXIT (OS/2 TLB workaround, debug).
8874 */
8875HMSVM_EXIT_DECL hmR0SvmExitTrWrite(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8876{
8877 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8878
8879 /* Workaround for lack of TLB flushing in OS/2 when returning to protected
8880 mode after a real mode call (like a BIOS call). See ticketref:20625
8881 comment 14. */
8882 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8883 if (pVM->hm.s.fMissingOS2TlbFlushWorkaround)
8884 {
8885 Log4Func(("%04x:%08RX64 TLB flush\n", pSvmTransient->pVmcb->guest.CS.u16Sel, pSvmTransient->pVmcb->guest.u64RIP));
8886 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
8887 }
8888 else
8889 Log4Func(("%04x:%08RX64\n", pSvmTransient->pVmcb->guest.CS.u16Sel, pSvmTransient->pVmcb->guest.u64RIP));
8890
8891 return hmR0SvmExitInterpretInstruction(pVCpu, pSvmTransient, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR, HM_CHANGED_GUEST_TR);
8892}
8893
8894
8895#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
8896/**
8897 * \#VMEXIT handler for CLGI (SVM_EXIT_CLGI). Conditional \#VMEXIT.
8898 */
8899HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8900{
8901 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8902
8903 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8904 Assert(pVmcb);
8905 Assert(!pVmcb->ctrl.IntCtrl.n.u1VGifEnable);
8906
8907 VBOXSTRICTRC rcStrict;
8908 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8909 uint64_t const fImport = CPUMCTX_EXTRN_HWVIRT;
8910 if (fSupportsNextRipSave)
8911 {
8912 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
8913 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8914 rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr);
8915 }
8916 else
8917 {
8918 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
8919 rcStrict = IEMExecOne(pVCpu);
8920 }
8921
8922 if (rcStrict == VINF_SUCCESS)
8923 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
8924 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8925 {
8926 rcStrict = VINF_SUCCESS;
8927 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8928 }
8929 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
8930 return rcStrict;
8931}
8932
8933
8934/**
8935 * \#VMEXIT handler for STGI (SVM_EXIT_STGI). Conditional \#VMEXIT.
8936 */
8937HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8938{
8939 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8940
8941 /*
8942 * When VGIF is not used we always intercept STGI instructions. When VGIF is used,
8943 * we only intercept STGI when events are pending for GIF to become 1.
8944 */
8945 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8946 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
8947 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_STGI);
8948
8949 VBOXSTRICTRC rcStrict;
8950 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8951 uint64_t const fImport = CPUMCTX_EXTRN_HWVIRT;
8952 if (fSupportsNextRipSave)
8953 {
8954 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
8955 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8956 rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr);
8957 }
8958 else
8959 {
8960 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
8961 rcStrict = IEMExecOne(pVCpu);
8962 }
8963
8964 if (rcStrict == VINF_SUCCESS)
8965 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
8966 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8967 {
8968 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8969 rcStrict = VINF_SUCCESS;
8970 }
8971 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
8972 return rcStrict;
8973}
8974
8975
8976/**
8977 * \#VMEXIT handler for VMLOAD (SVM_EXIT_VMLOAD). Conditional \#VMEXIT.
8978 */
8979HMSVM_EXIT_DECL hmR0SvmExitVmload(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8980{
8981 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8982
8983 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8984 Assert(pVmcb);
8985 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
8986
8987 VBOXSTRICTRC rcStrict;
8988 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8989 uint64_t const fImport = CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_KERNEL_GS_BASE
8990 | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_SYSCALL_MSRS
8991 | CPUMCTX_EXTRN_SYSENTER_MSRS;
8992 if (fSupportsNextRipSave)
8993 {
8994 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
8995 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8996 rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr);
8997 }
8998 else
8999 {
9000 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
9001 rcStrict = IEMExecOne(pVCpu);
9002 }
9003
9004 if (rcStrict == VINF_SUCCESS)
9005 {
9006 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS | HM_CHANGED_GUEST_GS
9007 | HM_CHANGED_GUEST_TR | HM_CHANGED_GUEST_LDTR
9008 | HM_CHANGED_GUEST_KERNEL_GS_BASE | HM_CHANGED_GUEST_SYSCALL_MSRS
9009 | HM_CHANGED_GUEST_SYSENTER_MSR_MASK);
9010 }
9011 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9012 {
9013 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9014 rcStrict = VINF_SUCCESS;
9015 }
9016 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9017 return rcStrict;
9018}
9019
9020
9021/**
9022 * \#VMEXIT handler for VMSAVE (SVM_EXIT_VMSAVE). Conditional \#VMEXIT.
9023 */
9024HMSVM_EXIT_DECL hmR0SvmExitVmsave(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9025{
9026 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9027
9028 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
9029 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
9030
9031 VBOXSTRICTRC rcStrict;
9032 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
9033 if (fSupportsNextRipSave)
9034 {
9035 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9036 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
9037 rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr);
9038 }
9039 else
9040 {
9041 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
9042 rcStrict = IEMExecOne(pVCpu);
9043 }
9044
9045 if (rcStrict == VINF_IEM_RAISED_XCPT)
9046 {
9047 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9048 rcStrict = VINF_SUCCESS;
9049 }
9050 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9051 return rcStrict;
9052}
9053
9054
9055/**
9056 * \#VMEXIT handler for INVLPGA (SVM_EXIT_INVLPGA). Conditional \#VMEXIT.
9057 */
9058HMSVM_EXIT_DECL hmR0SvmExitInvlpga(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9059{
9060 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9061
9062 VBOXSTRICTRC rcStrict;
9063 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
9064 if (fSupportsNextRipSave)
9065 {
9066 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9067 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
9068 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
9069 rcStrict = IEMExecDecodedInvlpga(pVCpu, cbInstr);
9070 }
9071 else
9072 {
9073 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
9074 rcStrict = IEMExecOne(pVCpu);
9075 }
9076
9077 if (rcStrict == VINF_IEM_RAISED_XCPT)
9078 {
9079 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9080 rcStrict = VINF_SUCCESS;
9081 }
9082 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9083 return rcStrict;
9084}
9085
9086
9087/**
9088 * \#VMEXIT handler for STGI (SVM_EXIT_VMRUN). Conditional \#VMEXIT.
9089 */
9090HMSVM_EXIT_DECL hmR0SvmExitVmrun(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9091{
9092 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9093 /* We shall import the entire state here, just in case we enter and continue execution of
9094 the nested-guest with hardware-assisted SVM in ring-0, we would be switching VMCBs and
9095 could lose lose part of CPU state. */
9096 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
9097
9098 VBOXSTRICTRC rcStrict;
9099 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
9100 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitVmentry, z);
9101 if (fSupportsNextRipSave)
9102 {
9103 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
9104 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
9105 rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr);
9106 }
9107 else
9108 {
9109 /* We use IEMExecOneBypassEx() here as it supresses attempt to continue emulating any
9110 instruction(s) when interrupt inhibition is set as part of emulating the VMRUN
9111 instruction itself, see @bugref{7243#c126} */
9112 rcStrict = IEMExecOneBypassEx(pVCpu, NULL /* pcbWritten */);
9113 }
9114 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitVmentry, z);
9115
9116 if (rcStrict == VINF_SUCCESS)
9117 {
9118 rcStrict = VINF_SVM_VMRUN;
9119 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_VMRUN_MASK);
9120 }
9121 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9122 {
9123 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9124 rcStrict = VINF_SUCCESS;
9125 }
9126 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9127 return rcStrict;
9128}
9129
9130
9131/**
9132 * Nested-guest \#VMEXIT handler for debug exceptions (SVM_EXIT_XCPT_1).
9133 * Unconditional \#VMEXIT.
9134 */
9135HMSVM_EXIT_DECL hmR0SvmNestedExitXcptDB(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9136{
9137 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9138 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
9139
9140 if (pVCpu->hm.s.Event.fPending)
9141 {
9142 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
9143 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9144 }
9145
9146 hmR0SvmSetPendingXcptDB(pVCpu);
9147 return VINF_SUCCESS;
9148}
9149
9150
9151/**
9152 * Nested-guest \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_XCPT_3).
9153 * Conditional \#VMEXIT.
9154 */
9155HMSVM_EXIT_DECL hmR0SvmNestedExitXcptBP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9156{
9157 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9158 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
9159
9160 SVMEVENT Event;
9161 Event.u = 0;
9162 Event.n.u1Valid = 1;
9163 Event.n.u3Type = SVM_EVENT_EXCEPTION;
9164 Event.n.u8Vector = X86_XCPT_BP;
9165 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
9166 return VINF_SUCCESS;
9167}
9168#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
9169
9170/** @} */
9171
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette