VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp@ 105589

Last change on this file since 105589 was 104516, checked in by vboxsync, 7 months ago

VMM/GCM,IEM,HM: Integrate GCM with IEM, extending it to cover the mesa drv situation and valid ring-0 IN instructions to same port. Untested. TODO: NEM. bugref:9735 bugref:10683

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 380.7 KB
Line 
1/* $Id: HMSVMR0.cpp 104516 2024-05-04 01:53:42Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2013-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_HM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <iprt/asm-amd64-x86.h>
35#include <iprt/thread.h>
36
37#include <VBox/vmm/pdmapi.h>
38#include <VBox/vmm/dbgf.h>
39#include <VBox/vmm/iem.h>
40#include <VBox/vmm/iom.h>
41#include <VBox/vmm/tm.h>
42#include <VBox/vmm/em.h>
43#include <VBox/vmm/gcm.h>
44#include <VBox/vmm/gim.h>
45#include <VBox/vmm/apic.h>
46#include "HMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include <VBox/err.h>
49#include "HMSVMR0.h"
50#include "dtrace/VBoxVMM.h"
51
52#ifdef DEBUG_ramshankar
53# define HMSVM_SYNC_FULL_GUEST_STATE
54# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
55# define HMSVM_ALWAYS_TRAP_PF
56# define HMSVM_ALWAYS_TRAP_TASK_SWITCH
57#endif
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63#ifdef VBOX_WITH_STATISTICS
64# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
65 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \
66 if ((u64ExitCode) == SVM_EXIT_NPF) \
67 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
68 else \
69 STAM_COUNTER_INC(&pVCpu->hm.s.aStatExitReason[(u64ExitCode) & MASK_EXITREASON_STAT]); \
70 } while (0)
71
72# define HMSVM_DEBUG_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
73 STAM_COUNTER_INC(&pVCpu->hm.s.StatDebugExitAll); \
74 if ((u64ExitCode) == SVM_EXIT_NPF) \
75 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
76 else \
77 STAM_COUNTER_INC(&pVCpu->hm.s.aStatExitReason[(u64ExitCode) & MASK_EXITREASON_STAT]); \
78 } while (0)
79
80# define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
81 STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitAll); \
82 if ((u64ExitCode) == SVM_EXIT_NPF) \
83 STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitReasonNpf); \
84 else \
85 STAM_COUNTER_INC(&pVCpu->hm.s.aStatNestedExitReason[(u64ExitCode) & MASK_EXITREASON_STAT]); \
86 } while (0)
87#else
88# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
89# define HMSVM_DEBUG_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
90# define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
91#endif /* !VBOX_WITH_STATISTICS */
92
93/** If we decide to use a function table approach this can be useful to
94 * switch to a "static DECLCALLBACK(int)". */
95#define HMSVM_EXIT_DECL static VBOXSTRICTRC
96
97/**
98 * Subset of the guest-CPU state that is kept by SVM R0 code while executing the
99 * guest using hardware-assisted SVM.
100 *
101 * This excludes state like TSC AUX, GPRs (other than RSP, RAX) which are always
102 * are swapped and restored across the world-switch and also registers like
103 * EFER, PAT MSR etc. which cannot be modified by the guest without causing a
104 * \#VMEXIT.
105 */
106#define HMSVM_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
107 | CPUMCTX_EXTRN_RFLAGS \
108 | CPUMCTX_EXTRN_RAX \
109 | CPUMCTX_EXTRN_RSP \
110 | CPUMCTX_EXTRN_SREG_MASK \
111 | CPUMCTX_EXTRN_CR0 \
112 | CPUMCTX_EXTRN_CR2 \
113 | CPUMCTX_EXTRN_CR3 \
114 | CPUMCTX_EXTRN_TABLE_MASK \
115 | CPUMCTX_EXTRN_DR6 \
116 | CPUMCTX_EXTRN_DR7 \
117 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
118 | CPUMCTX_EXTRN_SYSCALL_MSRS \
119 | CPUMCTX_EXTRN_SYSENTER_MSRS \
120 | CPUMCTX_EXTRN_HWVIRT \
121 | CPUMCTX_EXTRN_INHIBIT_INT \
122 | CPUMCTX_EXTRN_HM_SVM_MASK)
123
124/**
125 * Subset of the guest-CPU state that is shared between the guest and host.
126 */
127#define HMSVM_CPUMCTX_SHARED_STATE CPUMCTX_EXTRN_DR_MASK
128
129/** Macro for importing guest state from the VMCB back into CPUMCTX. */
130#define HMSVM_CPUMCTX_IMPORT_STATE(a_pVCpu, a_fWhat) \
131 do { \
132 if ((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fWhat)) \
133 hmR0SvmImportGuestState((a_pVCpu), (a_fWhat)); \
134 } while (0)
135
136/** Assert that the required state bits are fetched. */
137#define HMSVM_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Assert that preemption is disabled or covered by thread-context hooks. */
142#define HMSVM_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
143 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
144
145/** Assert that we haven't migrated CPUs when thread-context hooks are not
146 * used. */
147#define HMSVM_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
148 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
149 ("Illegal migration! Entered on CPU %u Current %u\n", \
150 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()));
151
152/** Assert that we're not executing a nested-guest. */
153#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
154# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
155#else
156# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
157#endif
158
159/** Assert that we're executing a nested-guest. */
160#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
161# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
162#else
163# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
164#endif
165
166/** Macro for checking and returning from the using function for
167 * \#VMEXIT intercepts that maybe caused during delivering of another
168 * event in the guest. */
169#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
170# define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \
171 do \
172 { \
173 int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \
174 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
175 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
176 else if ( rc == VINF_EM_RESET \
177 && CPUMIsGuestSvmCtrlInterceptSet((a_pVCpu), &(a_pVCpu)->cpum.GstCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \
178 { \
179 HMSVM_CPUMCTX_IMPORT_STATE((a_pVCpu), HMSVM_CPUMCTX_EXTRN_ALL); \
180 return IEMExecSvmVmexit((a_pVCpu), SVM_EXIT_SHUTDOWN, 0, 0); \
181 } \
182 else \
183 return rc; \
184 } while (0)
185#else
186# define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \
187 do \
188 { \
189 int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \
190 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
191 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
192 else \
193 return rc; \
194 } while (0)
195#endif
196
197/** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an
198 * instruction that exited. */
199#define HMSVM_CHECK_SINGLE_STEP(a_pVCpu, a_rc) \
200 do { \
201 if ((a_pVCpu)->hm.s.fSingleInstruction && (a_rc) == VINF_SUCCESS) \
202 (a_rc) = VINF_EM_DBG_STEPPED; \
203 } while (0)
204
205/** Validate segment descriptor granularity bit. */
206#ifdef VBOX_STRICT
207# define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) \
208 AssertMsg( !(a_pCtx)->reg.Attr.n.u1Present \
209 || ( (a_pCtx)->reg.Attr.n.u1Granularity \
210 ? ((a_pCtx)->reg.u32Limit & 0xfff) == 0xfff \
211 : (a_pCtx)->reg.u32Limit <= UINT32_C(0xfffff)), \
212 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", (a_pCtx)->reg.u32Limit, \
213 (a_pCtx)->reg.Attr.u, (a_pCtx)->reg.u64Base))
214#else
215# define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) do { } while (0)
216#endif
217
218/**
219 * Exception bitmap mask for all contributory exceptions.
220 *
221 * Page fault is deliberately excluded here as it's conditional as to whether
222 * it's contributory or benign. Page faults are handled separately.
223 */
224#define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
225 | RT_BIT(X86_XCPT_DE))
226
227/**
228 * Mandatory/unconditional guest control intercepts.
229 *
230 * SMIs can and do happen in normal operation. We need not intercept them
231 * while executing the guest (or nested-guest).
232 */
233#define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS ( SVM_CTRL_INTERCEPT_INTR \
234 | SVM_CTRL_INTERCEPT_NMI \
235 | SVM_CTRL_INTERCEPT_INIT \
236 | SVM_CTRL_INTERCEPT_RDPMC \
237 | SVM_CTRL_INTERCEPT_CPUID \
238 | SVM_CTRL_INTERCEPT_RSM \
239 | SVM_CTRL_INTERCEPT_HLT \
240 | SVM_CTRL_INTERCEPT_IOIO_PROT \
241 | SVM_CTRL_INTERCEPT_MSR_PROT \
242 | SVM_CTRL_INTERCEPT_INVLPGA \
243 | SVM_CTRL_INTERCEPT_SHUTDOWN \
244 | SVM_CTRL_INTERCEPT_FERR_FREEZE \
245 | SVM_CTRL_INTERCEPT_VMRUN \
246 | SVM_CTRL_INTERCEPT_SKINIT \
247 | SVM_CTRL_INTERCEPT_WBINVD \
248 | SVM_CTRL_INTERCEPT_MONITOR \
249 | SVM_CTRL_INTERCEPT_MWAIT \
250 | SVM_CTRL_INTERCEPT_CR0_SEL_WRITE \
251 | SVM_CTRL_INTERCEPT_XSETBV)
252
253/** @name VMCB Clean Bits.
254 *
255 * These flags are used for VMCB-state caching. A set VMCB Clean bit indicates
256 * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
257 * memory.
258 *
259 * @{ */
260/** All intercepts vectors, TSC offset, PAUSE filter counter. */
261#define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
262/** I/O permission bitmap, MSR permission bitmap. */
263#define HMSVM_VMCB_CLEAN_IOPM_MSRPM RT_BIT(1)
264/** ASID. */
265#define HMSVM_VMCB_CLEAN_ASID RT_BIT(2)
266/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
267V_INTR_VECTOR. */
268#define HMSVM_VMCB_CLEAN_INT_CTRL RT_BIT(3)
269/** Nested Paging: Nested CR3 (nCR3), PAT. */
270#define HMSVM_VMCB_CLEAN_NP RT_BIT(4)
271/** Control registers (CR0, CR3, CR4, EFER). */
272#define HMSVM_VMCB_CLEAN_CRX_EFER RT_BIT(5)
273/** Debug registers (DR6, DR7). */
274#define HMSVM_VMCB_CLEAN_DRX RT_BIT(6)
275/** GDT, IDT limit and base. */
276#define HMSVM_VMCB_CLEAN_DT RT_BIT(7)
277/** Segment register: CS, SS, DS, ES limit and base. */
278#define HMSVM_VMCB_CLEAN_SEG RT_BIT(8)
279/** CR2.*/
280#define HMSVM_VMCB_CLEAN_CR2 RT_BIT(9)
281/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
282#define HMSVM_VMCB_CLEAN_LBR RT_BIT(10)
283/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
284PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
285#define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11)
286/** Mask of all valid VMCB Clean bits. */
287#define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \
288 | HMSVM_VMCB_CLEAN_IOPM_MSRPM \
289 | HMSVM_VMCB_CLEAN_ASID \
290 | HMSVM_VMCB_CLEAN_INT_CTRL \
291 | HMSVM_VMCB_CLEAN_NP \
292 | HMSVM_VMCB_CLEAN_CRX_EFER \
293 | HMSVM_VMCB_CLEAN_DRX \
294 | HMSVM_VMCB_CLEAN_DT \
295 | HMSVM_VMCB_CLEAN_SEG \
296 | HMSVM_VMCB_CLEAN_CR2 \
297 | HMSVM_VMCB_CLEAN_LBR \
298 | HMSVM_VMCB_CLEAN_AVIC)
299/** @} */
300
301/**
302 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
303 */
304typedef enum SVMMSREXITREAD
305{
306 /** Reading this MSR causes a \#VMEXIT. */
307 SVMMSREXIT_INTERCEPT_READ = 0xb,
308 /** Reading this MSR does not cause a \#VMEXIT. */
309 SVMMSREXIT_PASSTHRU_READ
310} SVMMSREXITREAD;
311
312/**
313 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
314 */
315typedef enum SVMMSREXITWRITE
316{
317 /** Writing to this MSR causes a \#VMEXIT. */
318 SVMMSREXIT_INTERCEPT_WRITE = 0xd,
319 /** Writing to this MSR does not cause a \#VMEXIT. */
320 SVMMSREXIT_PASSTHRU_WRITE
321} SVMMSREXITWRITE;
322
323/**
324 * SVM \#VMEXIT handler.
325 *
326 * @returns Strict VBox status code.
327 * @param pVCpu The cross context virtual CPU structure.
328 * @param pSvmTransient Pointer to the SVM-transient structure.
329 */
330typedef VBOXSTRICTRC FNSVMEXITHANDLER(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
331
332
333/*********************************************************************************************************************************
334* Internal Functions *
335*********************************************************************************************************************************/
336static void hmR0SvmPendingEventToTrpmTrap(PVMCPUCC pVCpu);
337static void hmR0SvmLeave(PVMCPUCC pVCpu, bool fImportState);
338
339
340/** @name \#VMEXIT handlers.
341 * @{
342 */
343static FNSVMEXITHANDLER hmR0SvmExitIntr;
344static FNSVMEXITHANDLER hmR0SvmExitWbinvd;
345static FNSVMEXITHANDLER hmR0SvmExitInvd;
346static FNSVMEXITHANDLER hmR0SvmExitCpuid;
347static FNSVMEXITHANDLER hmR0SvmExitRdtsc;
348static FNSVMEXITHANDLER hmR0SvmExitRdtscp;
349static FNSVMEXITHANDLER hmR0SvmExitRdpmc;
350static FNSVMEXITHANDLER hmR0SvmExitInvlpg;
351static FNSVMEXITHANDLER hmR0SvmExitHlt;
352static FNSVMEXITHANDLER hmR0SvmExitMonitor;
353static FNSVMEXITHANDLER hmR0SvmExitMwait;
354static FNSVMEXITHANDLER hmR0SvmExitShutdown;
355static FNSVMEXITHANDLER hmR0SvmExitUnexpected;
356static FNSVMEXITHANDLER hmR0SvmExitReadCRx;
357static FNSVMEXITHANDLER hmR0SvmExitWriteCRx;
358static FNSVMEXITHANDLER hmR0SvmExitMsr;
359static FNSVMEXITHANDLER hmR0SvmExitReadDRx;
360static FNSVMEXITHANDLER hmR0SvmExitWriteDRx;
361static FNSVMEXITHANDLER hmR0SvmExitXsetbv;
362static FNSVMEXITHANDLER hmR0SvmExitIOInstr;
363static FNSVMEXITHANDLER hmR0SvmExitNestedPF;
364static FNSVMEXITHANDLER hmR0SvmExitVIntr;
365static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch;
366static FNSVMEXITHANDLER hmR0SvmExitVmmCall;
367static FNSVMEXITHANDLER hmR0SvmExitPause;
368static FNSVMEXITHANDLER hmR0SvmExitFerrFreeze;
369static FNSVMEXITHANDLER hmR0SvmExitIret;
370static FNSVMEXITHANDLER hmR0SvmExitXcptDE;
371static FNSVMEXITHANDLER hmR0SvmExitXcptPF;
372static FNSVMEXITHANDLER hmR0SvmExitXcptUD;
373static FNSVMEXITHANDLER hmR0SvmExitXcptMF;
374static FNSVMEXITHANDLER hmR0SvmExitXcptDB;
375static FNSVMEXITHANDLER hmR0SvmExitXcptAC;
376static FNSVMEXITHANDLER hmR0SvmExitXcptBP;
377static FNSVMEXITHANDLER hmR0SvmExitXcptGP;
378static FNSVMEXITHANDLER hmR0SvmExitXcptGeneric;
379static FNSVMEXITHANDLER hmR0SvmExitSwInt;
380static FNSVMEXITHANDLER hmR0SvmExitTrRead;
381static FNSVMEXITHANDLER hmR0SvmExitTrWrite;
382#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
383static FNSVMEXITHANDLER hmR0SvmExitClgi;
384static FNSVMEXITHANDLER hmR0SvmExitStgi;
385static FNSVMEXITHANDLER hmR0SvmExitVmload;
386static FNSVMEXITHANDLER hmR0SvmExitVmsave;
387static FNSVMEXITHANDLER hmR0SvmExitInvlpga;
388static FNSVMEXITHANDLER hmR0SvmExitVmrun;
389static FNSVMEXITHANDLER hmR0SvmNestedExitXcptDB;
390static FNSVMEXITHANDLER hmR0SvmNestedExitXcptBP;
391#endif
392/** @} */
393
394static VBOXSTRICTRC hmR0SvmHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
395#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
396static VBOXSTRICTRC hmR0SvmHandleExitNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
397#endif
398static VBOXSTRICTRC hmR0SvmRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops);
399
400
401/*********************************************************************************************************************************
402* Global Variables *
403*********************************************************************************************************************************/
404/** Ring-0 memory object for the IO bitmap. */
405static RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
406/** Physical address of the IO bitmap. */
407static RTHCPHYS g_HCPhysIOBitmap;
408/** Pointer to the IO bitmap. */
409static R0PTRTYPE(void *) g_pvIOBitmap;
410
411#ifdef VBOX_STRICT
412# define HMSVM_LOG_RBP_RSP RT_BIT_32(0)
413# define HMSVM_LOG_CR_REGS RT_BIT_32(1)
414# define HMSVM_LOG_CS RT_BIT_32(2)
415# define HMSVM_LOG_SS RT_BIT_32(3)
416# define HMSVM_LOG_FS RT_BIT_32(4)
417# define HMSVM_LOG_GS RT_BIT_32(5)
418# define HMSVM_LOG_LBR RT_BIT_32(6)
419# define HMSVM_LOG_ALL ( HMSVM_LOG_RBP_RSP \
420 | HMSVM_LOG_CR_REGS \
421 | HMSVM_LOG_CS \
422 | HMSVM_LOG_SS \
423 | HMSVM_LOG_FS \
424 | HMSVM_LOG_GS \
425 | HMSVM_LOG_LBR)
426
427/**
428 * Dumps virtual CPU state and additional info. to the logger for diagnostics.
429 *
430 * @param pVCpu The cross context virtual CPU structure.
431 * @param pVmcb Pointer to the VM control block.
432 * @param pszPrefix Log prefix.
433 * @param fFlags Log flags, see HMSVM_LOG_XXX.
434 * @param uVerbose The verbosity level, currently unused.
435 */
436static void hmR0SvmLogState(PVMCPUCC pVCpu, PCSVMVMCB pVmcb, const char *pszPrefix, uint32_t fFlags, uint8_t uVerbose)
437{
438 RT_NOREF2(pVCpu, uVerbose);
439 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
440
441 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
442 Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX64\n", pszPrefix, pCtx->cs.Sel, pCtx->rip, pCtx->rflags.u));
443
444 if (fFlags & HMSVM_LOG_RBP_RSP)
445 {
446 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RBP);
447 Log4(("%s: rsp=%#RX64 rbp=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp));
448 }
449
450 if (fFlags & HMSVM_LOG_CR_REGS)
451 {
452 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4);
453 Log4(("%s: cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", pszPrefix, pCtx->cr0, pCtx->cr3, pCtx->cr4));
454 }
455
456 if (fFlags & HMSVM_LOG_CS)
457 {
458 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
459 Log4(("%s: cs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base,
460 pCtx->cs.u32Limit, pCtx->cs.Attr.u));
461 }
462 if (fFlags & HMSVM_LOG_SS)
463 {
464 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
465 Log4(("%s: ss={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base,
466 pCtx->ss.u32Limit, pCtx->ss.Attr.u));
467 }
468 if (fFlags & HMSVM_LOG_FS)
469 {
470 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
471 Log4(("%s: fs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base,
472 pCtx->fs.u32Limit, pCtx->fs.Attr.u));
473 }
474 if (fFlags & HMSVM_LOG_GS)
475 {
476 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
477 Log4(("%s: gs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base,
478 pCtx->gs.u32Limit, pCtx->gs.Attr.u));
479 }
480
481 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
482 if (fFlags & HMSVM_LOG_LBR)
483 {
484 Log4(("%s: br_from=%#RX64 br_to=%#RX64 lastxcpt_from=%#RX64 lastxcpt_to=%#RX64\n", pszPrefix, pVmcbGuest->u64BR_FROM,
485 pVmcbGuest->u64BR_TO, pVmcbGuest->u64LASTEXCPFROM, pVmcbGuest->u64LASTEXCPTO));
486 }
487 NOREF(pszPrefix); NOREF(pVmcbGuest); NOREF(pCtx);
488}
489#endif /* VBOX_STRICT */
490
491
492/**
493 * Sets up and activates AMD-V on the current CPU.
494 *
495 * @returns VBox status code.
496 * @param pHostCpu The HM physical-CPU structure.
497 * @param pVM The cross context VM structure. Can be
498 * NULL after a resume!
499 * @param pvCpuPage Pointer to the global CPU page.
500 * @param HCPhysCpuPage Physical address of the global CPU page.
501 * @param fEnabledByHost Whether the host OS has already initialized AMD-V.
502 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs (currently
503 * unused).
504 */
505VMMR0DECL(int) SVMR0EnableCpu(PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
506 PCSUPHWVIRTMSRS pHwvirtMsrs)
507{
508 Assert(!fEnabledByHost);
509 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
510 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
511 Assert(pvCpuPage); NOREF(pvCpuPage);
512 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
513
514 RT_NOREF2(fEnabledByHost, pHwvirtMsrs);
515
516 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */
517 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
518
519 /*
520 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
521 */
522 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
523 if (u64HostEfer & MSR_K6_EFER_SVME)
524 {
525 /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
526 if ( pVM
527 && pVM->hm.s.svm.fIgnoreInUseError)
528 pHostCpu->fIgnoreAMDVInUseError = true;
529
530 if (!pHostCpu->fIgnoreAMDVInUseError)
531 {
532 ASMSetFlags(fEFlags);
533 return VERR_SVM_IN_USE;
534 }
535 }
536
537 /* Turn on AMD-V in the EFER MSR. */
538 ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
539
540 /* Write the physical page address where the CPU will store the host state while executing the VM. */
541 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
542
543 /* Restore interrupts. */
544 ASMSetFlags(fEFlags);
545
546 /*
547 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all
548 * non-zero ASIDs when enabling SVM. AMD doesn't have an SVM instruction to flush all
549 * ASIDs (flushing is done upon VMRUN). Therefore, flag that we need to flush the TLB
550 * entirely with before executing any guest code.
551 */
552 pHostCpu->fFlushAsidBeforeUse = true;
553
554 /*
555 * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}.
556 */
557 ++pHostCpu->cTlbFlushes;
558
559 return VINF_SUCCESS;
560}
561
562
563/**
564 * Deactivates AMD-V on the current CPU.
565 *
566 * @returns VBox status code.
567 * @param pHostCpu The HM physical-CPU structure.
568 * @param pvCpuPage Pointer to the global CPU page.
569 * @param HCPhysCpuPage Physical address of the global CPU page.
570 */
571VMMR0DECL(int) SVMR0DisableCpu(PHMPHYSCPU pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
572{
573 RT_NOREF1(pHostCpu);
574 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
575 AssertReturn( HCPhysCpuPage
576 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
577 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
578
579 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
580 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
581
582 /* Turn off AMD-V in the EFER MSR. */
583 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
584 ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
585
586 /* Invalidate host state physical address. */
587 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
588
589 /* Restore interrupts. */
590 ASMSetFlags(fEFlags);
591
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * Does global AMD-V initialization (called during module initialization).
598 *
599 * @returns VBox status code.
600 */
601VMMR0DECL(int) SVMR0GlobalInit(void)
602{
603 /*
604 * Allocate 12 KB (3 pages) for the IO bitmap. Since this is non-optional and we always
605 * intercept all IO accesses, it's done once globally here instead of per-VM.
606 */
607 Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ);
608 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT,
609 NIL_RTHCPHYS /*PhysHighest*/, false /* fExecutable */);
610 if (RT_FAILURE(rc))
611 return rc;
612
613 g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
614 g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
615
616 /* Set all bits to intercept all IO accesses. */
617 ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
618
619 return VINF_SUCCESS;
620}
621
622
623/**
624 * Does global AMD-V termination (called during module termination).
625 */
626VMMR0DECL(void) SVMR0GlobalTerm(void)
627{
628 if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
629 {
630 RTR0MemObjFree(g_hMemObjIOBitmap, true /* fFreeMappings */);
631 g_pvIOBitmap = NULL;
632 g_HCPhysIOBitmap = 0;
633 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
634 }
635}
636
637
638/**
639 * Frees any allocated per-VCPU structures for a VM.
640 *
641 * @param pVM The cross context VM structure.
642 */
643DECLINLINE(void) hmR0SvmFreeStructs(PVMCC pVM)
644{
645 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
646 {
647 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
648 AssertPtr(pVCpu);
649
650 if (pVCpu->hmr0.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
651 {
652 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjVmcbHost, false);
653 pVCpu->hmr0.s.svm.HCPhysVmcbHost = 0;
654 pVCpu->hmr0.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
655 }
656
657 if (pVCpu->hmr0.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ)
658 {
659 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjVmcb, false);
660 pVCpu->hmr0.s.svm.pVmcb = NULL;
661 pVCpu->hmr0.s.svm.HCPhysVmcb = 0;
662 pVCpu->hmr0.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
663 }
664
665 if (pVCpu->hmr0.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
666 {
667 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjMsrBitmap, false);
668 pVCpu->hmr0.s.svm.pvMsrBitmap = NULL;
669 pVCpu->hmr0.s.svm.HCPhysMsrBitmap = 0;
670 pVCpu->hmr0.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
671 }
672 }
673}
674
675
676/**
677 * Sets pfnVMRun to the best suited variant.
678 *
679 * This must be called whenever anything changes relative to the SVMR0VMRun
680 * variant selection:
681 * - pVCpu->hm.s.fLoadSaveGuestXcr0
682 * - CPUMCTX_WSF_IBPB_ENTRY in pVCpu->cpum.GstCtx.fWorldSwitcher
683 * - CPUMCTX_WSF_IBPB_EXIT in pVCpu->cpum.GstCtx.fWorldSwitcher
684 * - Perhaps: CPUMIsGuestFPUStateActive() (windows only)
685 * - Perhaps: CPUMCTX.fXStateMask (windows only)
686 *
687 * We currently ASSUME that neither CPUMCTX_WSF_IBPB_ENTRY nor
688 * CPUMCTX_WSF_IBPB_EXIT cannot be changed at runtime.
689 */
690static void hmR0SvmUpdateVmRunFunction(PVMCPUCC pVCpu)
691{
692 static const struct CLANGWORKAROUND { PFNHMSVMVMRUN pfn; } s_aHmR0SvmVmRunFunctions[] =
693 {
694 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit },
695 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit },
696 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit },
697 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit },
698 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit },
699 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit },
700 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit },
701 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit },
702 };
703 uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0 ? 1 : 0)
704 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ? 2 : 0)
705 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT ? 4 : 0);
706 PFNHMSVMVMRUN const pfnVMRun = s_aHmR0SvmVmRunFunctions[idx].pfn;
707 if (pVCpu->hmr0.s.svm.pfnVMRun != pfnVMRun)
708 pVCpu->hmr0.s.svm.pfnVMRun = pfnVMRun;
709}
710
711
712/**
713 * Selector FNHMSVMVMRUN implementation.
714 */
715static DECLCALLBACK(int) hmR0SvmVMRunSelector(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB)
716{
717 hmR0SvmUpdateVmRunFunction(pVCpu);
718 return pVCpu->hmr0.s.svm.pfnVMRun(pVM, pVCpu, HCPhysVMCB);
719}
720
721
722/**
723 * Does per-VM AMD-V initialization.
724 *
725 * @returns VBox status code.
726 * @param pVM The cross context VM structure.
727 */
728VMMR0DECL(int) SVMR0InitVM(PVMCC pVM)
729{
730 int rc = VERR_INTERNAL_ERROR_5;
731
732 /*
733 * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
734 */
735 uint32_t u32Family;
736 uint32_t u32Model;
737 uint32_t u32Stepping;
738 if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
739 {
740 Log4Func(("AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
741 pVM->hmr0.s.svm.fAlwaysFlushTLB = true;
742 }
743
744 /*
745 * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
746 */
747 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
748 {
749 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
750 pVCpu->hmr0.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
751 pVCpu->hmr0.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
752 pVCpu->hmr0.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
753 }
754
755 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
756 {
757 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
758
759 /*
760 * Initialize the hardware-assisted SVM guest-execution handler.
761 * We now use a single handler for both 32-bit and 64-bit guests, see @bugref{6208#c73}.
762 */
763 pVCpu->hmr0.s.svm.pfnVMRun = hmR0SvmVMRunSelector;
764
765 /*
766 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
767 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
768 */
769 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjVmcbHost, SVM_VMCB_PAGES << HOST_PAGE_SHIFT,
770 NIL_RTHCPHYS /*PhysHighest*/, false /* fExecutable */);
771 if (RT_FAILURE(rc))
772 goto failure_cleanup;
773
774 void *pvVmcbHost = RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjVmcbHost);
775 pVCpu->hmr0.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjVmcbHost, 0 /* iPage */);
776 RT_BZERO(pvVmcbHost, HOST_PAGE_SIZE);
777
778 /*
779 * Allocate one page for the guest-state VMCB.
780 */
781 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjVmcb, SVM_VMCB_PAGES << HOST_PAGE_SHIFT,
782 NIL_RTHCPHYS /*PhysHighest*/, false /* fExecutable */);
783 if (RT_FAILURE(rc))
784 goto failure_cleanup;
785
786 pVCpu->hmr0.s.svm.pVmcb = (PSVMVMCB)RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjVmcb);
787 pVCpu->hmr0.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjVmcb, 0 /* iPage */);
788 RT_BZERO(pVCpu->hmr0.s.svm.pVmcb, HOST_PAGE_SIZE);
789
790 /*
791 * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
792 * SVM to not require one.
793 */
794 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjMsrBitmap, SVM_MSRPM_PAGES << HOST_PAGE_SHIFT,
795 NIL_RTHCPHYS /*PhysHighest*/, false /* fExecutable */);
796 if (RT_FAILURE(rc))
797 goto failure_cleanup;
798
799 pVCpu->hmr0.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjMsrBitmap);
800 pVCpu->hmr0.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
801 /* Set all bits to intercept all MSR accesses (changed later on). */
802 ASMMemFill32(pVCpu->hmr0.s.svm.pvMsrBitmap, SVM_MSRPM_PAGES << HOST_PAGE_SHIFT, UINT32_C(0xffffffff));
803 }
804
805 return VINF_SUCCESS;
806
807failure_cleanup:
808 hmR0SvmFreeStructs(pVM);
809 return rc;
810}
811
812
813/**
814 * Does per-VM AMD-V termination.
815 *
816 * @returns VBox status code.
817 * @param pVM The cross context VM structure.
818 */
819VMMR0DECL(int) SVMR0TermVM(PVMCC pVM)
820{
821 hmR0SvmFreeStructs(pVM);
822 return VINF_SUCCESS;
823}
824
825
826/**
827 * Returns whether the VMCB Clean Bits feature is supported.
828 *
829 * @returns @c true if supported, @c false otherwise.
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param fIsNestedGuest Whether we are currently executing the nested-guest.
832 */
833DECL_FORCE_INLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPUCC pVCpu, bool fIsNestedGuest)
834{
835 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
836 bool const fHostVmcbCleanBits = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN);
837 if (!fIsNestedGuest)
838 return fHostVmcbCleanBits;
839 return fHostVmcbCleanBits && pVM->cpum.ro.GuestFeatures.fSvmVmcbClean;
840}
841
842
843/**
844 * Returns whether the decode assists feature is supported.
845 *
846 * @returns @c true if supported, @c false otherwise.
847 * @param pVCpu The cross context virtual CPU structure.
848 */
849DECLINLINE(bool) hmR0SvmSupportsDecodeAssists(PVMCPUCC pVCpu)
850{
851 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
852#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
853 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
854 return (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS)
855 && pVM->cpum.ro.GuestFeatures.fSvmDecodeAssists;
856#endif
857 return RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
858}
859
860
861/**
862 * Returns whether the NRIP_SAVE feature is supported.
863 *
864 * @returns @c true if supported, @c false otherwise.
865 * @param pVCpu The cross context virtual CPU structure.
866 */
867DECLINLINE(bool) hmR0SvmSupportsNextRipSave(PVMCPUCC pVCpu)
868{
869 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
870#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
871 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
872 return (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
873 && pVM->cpum.ro.GuestFeatures.fSvmNextRipSave;
874#endif
875 return RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
876}
877
878
879/**
880 * Sets the permission bits for the specified MSR in the MSRPM bitmap.
881 *
882 * @param pVCpu The cross context virtual CPU structure.
883 * @param pbMsrBitmap Pointer to the MSR bitmap.
884 * @param idMsr The MSR for which the permissions are being set.
885 * @param enmRead MSR read permissions.
886 * @param enmWrite MSR write permissions.
887 *
888 * @remarks This function does -not- clear the VMCB clean bits for MSRPM. The
889 * caller needs to take care of this.
890 */
891static void hmR0SvmSetMsrPermission(PVMCPUCC pVCpu, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead,
892 SVMMSREXITWRITE enmWrite)
893{
894 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
895 uint16_t offMsrpm;
896 uint8_t uMsrpmBit;
897 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
898 AssertRC(rc);
899
900 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
901 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
902
903 pbMsrBitmap += offMsrpm;
904 if (enmRead == SVMMSREXIT_INTERCEPT_READ)
905 *pbMsrBitmap |= RT_BIT(uMsrpmBit);
906 else
907 {
908 if (!fInNestedGuestMode)
909 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit);
910#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
911 else
912 {
913 /* Only clear the bit if the nested-guest is also not intercepting the MSR read.*/
914 if (!(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit)))
915 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit);
916 else
917 Assert(*pbMsrBitmap & RT_BIT(uMsrpmBit));
918 }
919#endif
920 }
921
922 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
923 *pbMsrBitmap |= RT_BIT(uMsrpmBit + 1);
924 else
925 {
926 if (!fInNestedGuestMode)
927 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1);
928#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
929 else
930 {
931 /* Only clear the bit if the nested-guest is also not intercepting the MSR write.*/
932 if (!(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit + 1)))
933 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1);
934 else
935 Assert(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1));
936 }
937#endif
938 }
939}
940
941
942/**
943 * Sets up AMD-V for the specified VM.
944 * This function is only called once per-VM during initalization.
945 *
946 * @returns VBox status code.
947 * @param pVM The cross context VM structure.
948 */
949VMMR0DECL(int) SVMR0SetupVM(PVMCC pVM)
950{
951 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
952 AssertReturn(pVM, VERR_INVALID_PARAMETER);
953
954 /*
955 * Validate and copy over some parameters.
956 */
957 AssertReturn(pVM->hm.s.svm.fSupported, VERR_INCOMPATIBLE_CONFIG);
958 bool const fNestedPaging = pVM->hm.s.fNestedPagingCfg;
959 AssertReturn(!fNestedPaging || (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING), VERR_INCOMPATIBLE_CONFIG);
960 pVM->hmr0.s.fNestedPaging = fNestedPaging;
961 pVM->hmr0.s.fAllow64BitGuests = pVM->hm.s.fAllow64BitGuestsCfg;
962
963 /*
964 * Determin some configuration parameters.
965 */
966 bool const fPauseFilter = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
967 bool const fPauseFilterThreshold = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
968 bool const fUsePauseFilter = fPauseFilter && pVM->hm.s.svm.cPauseFilter;
969
970 bool const fLbrVirt = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT);
971 bool const fUseLbrVirt = fLbrVirt && pVM->hm.s.svm.fLbrVirt; /** @todo IEM implementation etc. */
972
973#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
974 bool const fVirtVmsaveVmload = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD);
975 bool const fUseVirtVmsaveVmload = fVirtVmsaveVmload && pVM->hm.s.svm.fVirtVmsaveVmload && fNestedPaging;
976
977 bool const fVGif = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF);
978 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
979#endif
980
981 PVMCPUCC pVCpu0 = VMCC_GET_CPU_0(pVM);
982 PSVMVMCB pVmcb0 = pVCpu0->hmr0.s.svm.pVmcb;
983 AssertMsgReturn(RT_VALID_PTR(pVmcb0), ("Invalid pVmcb (%p) for vcpu[0]\n", pVmcb0), VERR_SVM_INVALID_PVMCB);
984 PSVMVMCBCTRL pVmcbCtrl0 = &pVmcb0->ctrl;
985
986 /* Always trap #AC for reasons of security. */
987 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_AC);
988
989 /* Always trap #DB for reasons of security. */
990 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_DB);
991
992 /* Trap exceptions unconditionally (debug purposes). */
993#ifdef HMSVM_ALWAYS_TRAP_PF
994 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_PF);
995#endif
996#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
997 /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
998 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_BP)
999 | RT_BIT_32(X86_XCPT_DE)
1000 | RT_BIT_32(X86_XCPT_NM)
1001 | RT_BIT_32(X86_XCPT_UD)
1002 | RT_BIT_32(X86_XCPT_NP)
1003 | RT_BIT_32(X86_XCPT_SS)
1004 | RT_BIT_32(X86_XCPT_GP)
1005 | RT_BIT_32(X86_XCPT_PF)
1006 | RT_BIT_32(X86_XCPT_MF)
1007 ;
1008#endif
1009
1010 /* Apply the exceptions intercepts needed by the GIM provider. */
1011 if (pVCpu0->hm.s.fGIMTrapXcptUD || pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
1012 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
1013
1014 /* Apply the exceptions intercepts needed by the GCM fixers. */
1015 if (pVCpu0->hm.s.fGCMTrapXcptDE)
1016 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_DE);
1017
1018 /* The mesa 3d driver hack needs #GP. */
1019 if (pVCpu0->hm.s.fTrapXcptGpForLovelyMesaDrv)
1020 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_GP);
1021
1022 /* Set up unconditional intercepts and conditions. */
1023 pVmcbCtrl0->u64InterceptCtrl = HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS
1024 | SVM_CTRL_INTERCEPT_VMMCALL
1025 | SVM_CTRL_INTERCEPT_VMSAVE
1026 | SVM_CTRL_INTERCEPT_VMLOAD
1027 | SVM_CTRL_INTERCEPT_CLGI
1028 | SVM_CTRL_INTERCEPT_STGI;
1029
1030#ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
1031 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH;
1032#endif
1033
1034#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1035 if (pVCpu0->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvm)
1036 {
1037 /* Virtualized VMSAVE/VMLOAD. */
1038 if (fUseVirtVmsaveVmload)
1039 {
1040 pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload = 1;
1041 pVmcbCtrl0->u64InterceptCtrl &= ~( SVM_CTRL_INTERCEPT_VMSAVE
1042 | SVM_CTRL_INTERCEPT_VMLOAD);
1043 }
1044 else
1045 Assert(!pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload);
1046
1047 /* Virtual GIF. */
1048 if (fUseVGif)
1049 {
1050 pVmcbCtrl0->IntCtrl.n.u1VGifEnable = 1;
1051 pVmcbCtrl0->u64InterceptCtrl &= ~( SVM_CTRL_INTERCEPT_CLGI
1052 | SVM_CTRL_INTERCEPT_STGI);
1053 }
1054 else
1055 Assert(!pVmcbCtrl0->IntCtrl.n.u1VGifEnable);
1056 }
1057 else
1058#endif
1059 {
1060 Assert(!pVCpu0->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvm);
1061 Assert(!pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload);
1062 Assert(!pVmcbCtrl0->IntCtrl.n.u1VGifEnable);
1063 }
1064
1065 /* CR4 writes must always be intercepted for tracking PGM mode changes and
1066 AVX (for XCR0 syncing during worlds switching). */
1067 pVmcbCtrl0->u16InterceptWrCRx = RT_BIT(4);
1068
1069 /* Intercept all DRx reads and writes by default. Changed later on. */
1070 pVmcbCtrl0->u16InterceptRdDRx = 0xffff;
1071 pVmcbCtrl0->u16InterceptWrDRx = 0xffff;
1072
1073 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
1074 pVmcbCtrl0->IntCtrl.n.u1VIntrMasking = 1;
1075
1076 /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts
1077 and we currently deliver both PIC and APIC interrupts alike, see hmR0SvmEvaluatePendingEvent() */
1078 pVmcbCtrl0->IntCtrl.n.u1IgnoreTPR = 1;
1079
1080 /* Set the IO permission bitmap physical addresses. */
1081 pVmcbCtrl0->u64IOPMPhysAddr = g_HCPhysIOBitmap;
1082
1083 /* LBR virtualization. */
1084 pVmcbCtrl0->LbrVirt.n.u1LbrVirt = fUseLbrVirt;
1085
1086 /* The host ASID MBZ, for the guest start with 1. */
1087 pVmcbCtrl0->TLBCtrl.n.u32ASID = 1;
1088
1089 /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
1090 pVmcbCtrl0->NestedPagingCtrl.n.u1NestedPaging = fNestedPaging;
1091
1092 /* Without Nested Paging, we need additionally intercepts. */
1093 if (!fNestedPaging)
1094 {
1095 /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
1096 pVmcbCtrl0->u16InterceptRdCRx |= RT_BIT(3);
1097 pVmcbCtrl0->u16InterceptWrCRx |= RT_BIT(3);
1098
1099 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
1100 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG
1101 | SVM_CTRL_INTERCEPT_TASK_SWITCH;
1102
1103 /* Page faults must be intercepted to implement shadow paging. */
1104 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
1105 }
1106
1107 /* Workaround for missing OS/2 TLB flush, see ticketref:20625. */
1108 if (pVM->hm.s.fMissingOS2TlbFlushWorkaround)
1109 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TR_WRITES;
1110
1111 /* Setup Pause Filter for guest pause-loop (spinlock) exiting. */
1112 if (fUsePauseFilter)
1113 {
1114 Assert(pVM->hm.s.svm.cPauseFilter > 0);
1115 pVmcbCtrl0->u16PauseFilterCount = pVM->hm.s.svm.cPauseFilter;
1116 if (fPauseFilterThreshold)
1117 pVmcbCtrl0->u16PauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
1118 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_PAUSE;
1119 }
1120
1121 /*
1122 * Setup the MSR permission bitmap.
1123 * The following MSRs are saved/restored automatically during the world-switch.
1124 * Don't intercept guest read/write accesses to these MSRs.
1125 */
1126 uint8_t *pbMsrBitmap0 = (uint8_t *)pVCpu0->hmr0.s.svm.pvMsrBitmap;
1127 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1128 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1129 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1130 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1131 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1132 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1133 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1134 if (!pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
1135 {
1136 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1137 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1138 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1139 }
1140 else
1141 {
1142 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1143 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1144 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1145 }
1146 pVmcbCtrl0->u64MSRPMPhysAddr = pVCpu0->hmr0.s.svm.HCPhysMsrBitmap;
1147
1148 /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
1149 Assert(pVmcbCtrl0->u32VmcbCleanBits == 0);
1150
1151 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
1152 {
1153 PVMCPUCC pVCpuCur = VMCC_GET_CPU(pVM, idCpu);
1154 PSVMVMCB pVmcbCur = pVCpuCur->hmr0.s.svm.pVmcb;
1155 AssertMsgReturn(RT_VALID_PTR(pVmcbCur), ("Invalid pVmcb (%p) for vcpu[%u]\n", pVmcbCur, idCpu), VERR_SVM_INVALID_PVMCB);
1156 PSVMVMCBCTRL pVmcbCtrlCur = &pVmcbCur->ctrl;
1157
1158 /* Copy the VMCB control area. */
1159 memcpy(pVmcbCtrlCur, pVmcbCtrl0, sizeof(*pVmcbCtrlCur));
1160
1161 /* Copy the MSR bitmap and setup the VCPU-specific host physical address. */
1162 uint8_t *pbMsrBitmapCur = (uint8_t *)pVCpuCur->hmr0.s.svm.pvMsrBitmap;
1163 memcpy(pbMsrBitmapCur, pbMsrBitmap0, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
1164 pVmcbCtrlCur->u64MSRPMPhysAddr = pVCpuCur->hmr0.s.svm.HCPhysMsrBitmap;
1165
1166 /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
1167 Assert(pVmcbCtrlCur->u32VmcbCleanBits == 0);
1168
1169 /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs initially. */
1170 Assert(pVCpuCur->hm.s.fGIMTrapXcptUD == pVCpu0->hm.s.fGIMTrapXcptUD);
1171 /* Same for GCM, #DE trapping should be uniform across VCPUs. */
1172 Assert(pVCpuCur->hm.s.fGCMTrapXcptDE == pVCpu0->hm.s.fGCMTrapXcptDE);
1173 }
1174
1175#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1176 LogRel(("HM: fUsePauseFilter=%RTbool fUseLbrVirt=%RTbool fUseVGif=%RTbool fUseVirtVmsaveVmload=%RTbool\n", fUsePauseFilter,
1177 fUseLbrVirt, fUseVGif, fUseVirtVmsaveVmload));
1178#else
1179 LogRel(("HM: fUsePauseFilter=%RTbool fUseLbrVirt=%RTbool\n", fUsePauseFilter, fUseLbrVirt));
1180#endif
1181 return VINF_SUCCESS;
1182}
1183
1184
1185/**
1186 * Gets a pointer to the currently active guest (or nested-guest) VMCB.
1187 *
1188 * @returns Pointer to the current context VMCB.
1189 * @param pVCpu The cross context virtual CPU structure.
1190 */
1191DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPUCC pVCpu)
1192{
1193#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1194 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1195 return &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
1196#endif
1197 return pVCpu->hmr0.s.svm.pVmcb;
1198}
1199
1200
1201/**
1202 * Gets a pointer to the nested-guest VMCB cache.
1203 *
1204 * @returns Pointer to the nested-guest VMCB cache.
1205 * @param pVCpu The cross context virtual CPU structure.
1206 */
1207DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPUCC pVCpu)
1208{
1209#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1210 Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
1211 return &pVCpu->hm.s.svm.NstGstVmcbCache;
1212#else
1213 RT_NOREF(pVCpu);
1214 return NULL;
1215#endif
1216}
1217
1218
1219/**
1220 * Invalidates a guest page by guest virtual address.
1221 *
1222 * @returns VBox status code.
1223 * @param pVCpu The cross context virtual CPU structure.
1224 * @param GCVirt Guest virtual address of the page to invalidate.
1225 */
1226VMMR0DECL(int) SVMR0InvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCVirt)
1227{
1228 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
1229
1230 bool const fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH) || pVCpu->CTX_SUFF(pVM)->hmr0.s.svm.fAlwaysFlushTLB;
1231
1232 /* Skip it if a TLB flush is already pending. */
1233 if (!fFlushPending)
1234 {
1235 Log4Func(("%#RGv\n", GCVirt));
1236
1237 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
1238 AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
1239
1240 SVMR0InvlpgA(GCVirt, pVmcb->ctrl.TLBCtrl.n.u32ASID);
1241 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1242 }
1243 return VINF_SUCCESS;
1244}
1245
1246
1247/**
1248 * Flushes the appropriate tagged-TLB entries.
1249 *
1250 * @param pHostCpu The HM physical-CPU structure.
1251 * @param pVCpu The cross context virtual CPU structure.
1252 * @param pVmcb Pointer to the VM control block.
1253 */
1254static void hmR0SvmFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1255{
1256 /*
1257 * Force a TLB flush for the first world switch if the current CPU differs from the one
1258 * we ran on last. This can happen both for start & resume due to long jumps back to
1259 * ring-3.
1260 *
1261 * We also force a TLB flush every time when executing a nested-guest VCPU as there is no
1262 * correlation between it and the physical CPU.
1263 *
1264 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while
1265 * flushing the TLB, so we cannot reuse the ASIDs without flushing.
1266 */
1267 bool fNewAsid = false;
1268 Assert(pHostCpu->idCpu != NIL_RTCPUID);
1269 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu
1270 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes
1271#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1272 || CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)
1273#endif
1274 )
1275 {
1276 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1277 pVCpu->hmr0.s.fForceTLBFlush = true;
1278 fNewAsid = true;
1279 }
1280
1281 /* Set TLB flush state as checked until we return from the world switch. */
1282 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
1283
1284 /* Check for explicit TLB flushes. */
1285 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1286 {
1287 pVCpu->hmr0.s.fForceTLBFlush = true;
1288 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1289 }
1290
1291 /*
1292 * If the AMD CPU erratum 170, We need to flush the entire TLB for each world switch. Sad.
1293 * This Host CPU requirement takes precedence.
1294 */
1295 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1296 if (pVM->hmr0.s.svm.fAlwaysFlushTLB)
1297 {
1298 pHostCpu->uCurrentAsid = 1;
1299 pVCpu->hmr0.s.uCurrentAsid = 1;
1300 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
1301 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
1302 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1303
1304 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
1305 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1306 }
1307 else
1308 {
1309 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
1310 if (pVCpu->hmr0.s.fForceTLBFlush)
1311 {
1312 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
1313 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1314
1315 if (fNewAsid)
1316 {
1317 ++pHostCpu->uCurrentAsid;
1318
1319 bool fHitASIDLimit = false;
1320 if (pHostCpu->uCurrentAsid >= g_uHmMaxAsid)
1321 {
1322 pHostCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */
1323 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new ASID. */
1324 fHitASIDLimit = true;
1325 }
1326
1327 if ( fHitASIDLimit
1328 || pHostCpu->fFlushAsidBeforeUse)
1329 {
1330 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1331 pHostCpu->fFlushAsidBeforeUse = false;
1332 }
1333
1334 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid;
1335 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
1336 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
1337 }
1338 else
1339 {
1340 if (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
1341 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
1342 else
1343 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1344 }
1345
1346 pVCpu->hmr0.s.fForceTLBFlush = false;
1347 }
1348 }
1349
1350 /* Update VMCB with the ASID. */
1351 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hmr0.s.uCurrentAsid)
1352 {
1353 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hmr0.s.uCurrentAsid;
1354 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
1355 }
1356
1357 AssertMsg(pVCpu->hmr0.s.idLastCpu == pHostCpu->idCpu,
1358 ("vcpu idLastCpu=%u hostcpu idCpu=%u\n", pVCpu->hmr0.s.idLastCpu, pHostCpu->idCpu));
1359 AssertMsg(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes,
1360 ("Flush count mismatch for cpu %u (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hmr0.s.cTlbFlushes, pHostCpu->cTlbFlushes));
1361 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < g_uHmMaxAsid,
1362 ("cpu%d uCurrentAsid = %x\n", pHostCpu->idCpu, pHostCpu->uCurrentAsid));
1363 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < g_uHmMaxAsid,
1364 ("cpu%d VM uCurrentAsid = %x\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid));
1365
1366#ifdef VBOX_WITH_STATISTICS
1367 if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
1368 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1369 else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
1370 || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
1371 {
1372 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1373 }
1374 else
1375 {
1376 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE);
1377 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushEntire);
1378 }
1379#endif
1380}
1381
1382
1383/**
1384 * Sets an exception intercept in the specified VMCB.
1385 *
1386 * @param pVmcb Pointer to the VM control block.
1387 * @param uXcpt The exception (X86_XCPT_*).
1388 */
1389DECLINLINE(void) hmR0SvmSetXcptIntercept(PSVMVMCB pVmcb, uint8_t uXcpt)
1390{
1391 if (!(pVmcb->ctrl.u32InterceptXcpt & RT_BIT(uXcpt)))
1392 {
1393 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(uXcpt);
1394 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1395 }
1396}
1397
1398
1399/**
1400 * Clears an exception intercept in the specified VMCB.
1401 *
1402 * @param pVCpu The cross context virtual CPU structure.
1403 * @param pVmcb Pointer to the VM control block.
1404 * @param uXcpt The exception (X86_XCPT_*).
1405 *
1406 * @remarks This takes into account if we're executing a nested-guest and only
1407 * removes the exception intercept if both the guest -and- nested-guest
1408 * are not intercepting it.
1409 */
1410DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPUCC pVCpu, PSVMVMCB pVmcb, uint8_t uXcpt)
1411{
1412 Assert(uXcpt != X86_XCPT_DB);
1413 Assert(uXcpt != X86_XCPT_AC);
1414 Assert(uXcpt != X86_XCPT_GP);
1415#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
1416 if (pVmcb->ctrl.u32InterceptXcpt & RT_BIT(uXcpt))
1417 {
1418 bool fRemove = true;
1419# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1420 /* Only remove the intercept if the nested-guest is also not intercepting it! */
1421 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1422 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1423 {
1424 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1425 fRemove = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(uXcpt));
1426 }
1427# else
1428 RT_NOREF(pVCpu);
1429# endif
1430 if (fRemove)
1431 {
1432 pVmcb->ctrl.u32InterceptXcpt &= ~RT_BIT(uXcpt);
1433 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1434 }
1435 }
1436#else
1437 RT_NOREF3(pVCpu, pVmcb, uXcpt);
1438#endif
1439}
1440
1441
1442/**
1443 * Sets a control intercept in the specified VMCB.
1444 *
1445 * @param pVmcb Pointer to the VM control block.
1446 * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*).
1447 */
1448DECLINLINE(void) hmR0SvmSetCtrlIntercept(PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
1449{
1450 if (!(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept))
1451 {
1452 pVmcb->ctrl.u64InterceptCtrl |= fCtrlIntercept;
1453 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1454 }
1455}
1456
1457
1458/**
1459 * Clears a control intercept in the specified VMCB.
1460 *
1461 * @returns @c true if the intercept is still set, @c false otherwise.
1462 * @param pVCpu The cross context virtual CPU structure.
1463 * @param pVmcb Pointer to the VM control block.
1464 * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*).
1465 *
1466 * @remarks This takes into account if we're executing a nested-guest and only
1467 * removes the control intercept if both the guest -and- nested-guest
1468 * are not intercepting it.
1469 */
1470static bool hmR0SvmClearCtrlIntercept(PVMCPUCC pVCpu, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
1471{
1472 if (pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept)
1473 {
1474 bool fRemove = true;
1475#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1476 /* Only remove the control intercept if the nested-guest is also not intercepting it! */
1477 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1478 {
1479 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1480 fRemove = !(pVmcbNstGstCache->u64InterceptCtrl & fCtrlIntercept);
1481 }
1482#else
1483 RT_NOREF(pVCpu);
1484#endif
1485 if (fRemove)
1486 {
1487 pVmcb->ctrl.u64InterceptCtrl &= ~fCtrlIntercept;
1488 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1489 }
1490 }
1491
1492 return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept);
1493}
1494
1495
1496/**
1497 * Exports the guest (or nested-guest) CR0 into the VMCB.
1498 *
1499 * @param pVCpu The cross context virtual CPU structure.
1500 * @param pVmcb Pointer to the VM control block.
1501 *
1502 * @remarks This assumes we always pre-load the guest FPU.
1503 * @remarks No-long-jump zone!!!
1504 */
1505static void hmR0SvmExportGuestCR0(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1506{
1507 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1508
1509 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1510 uint64_t const uGuestCr0 = pCtx->cr0;
1511 uint64_t uShadowCr0 = uGuestCr0;
1512
1513 /* Always enable caching. */
1514 uShadowCr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1515
1516 /* When Nested Paging is not available use shadow page tables and intercept #PFs (latter done in SVMR0SetupVM()). */
1517 if (!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
1518 {
1519 uShadowCr0 |= X86_CR0_PG /* Use shadow page tables. */
1520 | X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
1521 }
1522
1523 /*
1524 * Use the #MF style of legacy-FPU error reporting for now. Although AMD-V has MSRs that
1525 * lets us isolate the host from it, IEM/REM still needs work to emulate it properly,
1526 * see @bugref{7243#c103}.
1527 */
1528 if (!(uGuestCr0 & X86_CR0_NE))
1529 {
1530 uShadowCr0 |= X86_CR0_NE;
1531 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_MF);
1532 }
1533 else
1534 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_MF);
1535
1536 /*
1537 * If the shadow and guest CR0 are identical we can avoid intercepting CR0 reads.
1538 *
1539 * CR0 writes still needs interception as PGM requires tracking paging mode changes,
1540 * see @bugref{6944}.
1541 *
1542 * We also don't ever want to honor weird things like cache disable from the guest.
1543 * However, we can avoid intercepting changes to the TS & MP bits by clearing the CR0
1544 * write intercept below and keeping SVM_CTRL_INTERCEPT_CR0_SEL_WRITE instead.
1545 */
1546 if (uShadowCr0 == uGuestCr0)
1547 {
1548 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1549 {
1550 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(0);
1551 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(0);
1552 Assert(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_CR0_SEL_WRITE);
1553 }
1554 else
1555 {
1556 /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */
1557 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1558 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(0))
1559 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(0));
1560 pVmcb->ctrl.u16InterceptWrCRx = (pVmcb->ctrl.u16InterceptWrCRx & ~RT_BIT(0))
1561 | (pVmcbNstGstCache->u16InterceptWrCRx & RT_BIT(0));
1562 }
1563 }
1564 else
1565 {
1566 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(0);
1567 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(0);
1568 }
1569 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1570
1571 Assert(!RT_HI_U32(uShadowCr0));
1572 if (pVmcb->guest.u64CR0 != uShadowCr0)
1573 {
1574 pVmcb->guest.u64CR0 = uShadowCr0;
1575 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1576 }
1577}
1578
1579
1580/**
1581 * Exports the guest (or nested-guest) CR3 into the VMCB.
1582 *
1583 * @param pVCpu The cross context virtual CPU structure.
1584 * @param pVmcb Pointer to the VM control block.
1585 *
1586 * @remarks No-long-jump zone!!!
1587 */
1588static void hmR0SvmExportGuestCR3(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1589{
1590 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1591
1592 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1593 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1594 if (pVM->hmr0.s.fNestedPaging)
1595 {
1596 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetHyperCR3(pVCpu);
1597 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1598 pVmcb->guest.u64CR3 = pCtx->cr3;
1599 Assert(pVmcb->ctrl.u64NestedPagingCR3);
1600 }
1601 else
1602 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
1603
1604 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1605}
1606
1607
1608/**
1609 * Exports the guest (or nested-guest) CR4 into the VMCB.
1610 *
1611 * @param pVCpu The cross context virtual CPU structure.
1612 * @param pVmcb Pointer to the VM control block.
1613 *
1614 * @remarks No-long-jump zone!!!
1615 */
1616static int hmR0SvmExportGuestCR4(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1617{
1618 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1619
1620 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1621 uint64_t uShadowCr4 = pCtx->cr4;
1622 if (!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
1623 {
1624 switch (pVCpu->hm.s.enmShadowMode)
1625 {
1626 case PGMMODE_REAL:
1627 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
1628 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1629
1630 case PGMMODE_32_BIT: /* 32-bit paging. */
1631 uShadowCr4 &= ~X86_CR4_PAE;
1632 break;
1633
1634 case PGMMODE_PAE: /* PAE paging. */
1635 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
1636 /** Must use PAE paging as we could use physical memory > 4 GB */
1637 uShadowCr4 |= X86_CR4_PAE;
1638 break;
1639
1640 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
1641 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
1642#ifdef VBOX_WITH_64_BITS_GUESTS
1643 break;
1644#else
1645 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1646#endif
1647
1648 default: /* shut up gcc */
1649 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1650 }
1651 }
1652
1653 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
1654 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
1655 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
1656 {
1657 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
1658 hmR0SvmUpdateVmRunFunction(pVCpu);
1659 }
1660
1661 /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */
1662 if (uShadowCr4 == pCtx->cr4)
1663 {
1664 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1665 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(4);
1666 else
1667 {
1668 /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */
1669 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1670 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(4))
1671 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(4));
1672 }
1673 }
1674 else
1675 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(4);
1676
1677 /* CR4 writes are always intercepted (both guest, nested-guest) for tracking
1678 PGM mode changes and AVX (for XCR0 syncing during worlds switching). */
1679 Assert(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(4));
1680
1681 /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */
1682 Assert(!RT_HI_U32(uShadowCr4));
1683 pVmcb->guest.u64CR4 = uShadowCr4;
1684 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS);
1685
1686 return VINF_SUCCESS;
1687}
1688
1689
1690/**
1691 * Exports the guest (or nested-guest) control registers into the VMCB.
1692 *
1693 * @returns VBox status code.
1694 * @param pVCpu The cross context virtual CPU structure.
1695 * @param pVmcb Pointer to the VM control block.
1696 *
1697 * @remarks No-long-jump zone!!!
1698 */
1699static int hmR0SvmExportGuestControlRegs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1700{
1701 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1702
1703 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR_MASK)
1704 {
1705 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR0)
1706 hmR0SvmExportGuestCR0(pVCpu, pVmcb);
1707
1708 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR2)
1709 {
1710 pVmcb->guest.u64CR2 = pVCpu->cpum.GstCtx.cr2;
1711 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
1712 }
1713
1714 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR3)
1715 hmR0SvmExportGuestCR3(pVCpu, pVmcb);
1716
1717 /* CR4 re-loading is ASSUMED to be done everytime we get in from ring-3! (XCR0) */
1718 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR4)
1719 {
1720 int rc = hmR0SvmExportGuestCR4(pVCpu, pVmcb);
1721 if (RT_FAILURE(rc))
1722 return rc;
1723 }
1724
1725 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_CR_MASK;
1726 }
1727 return VINF_SUCCESS;
1728}
1729
1730
1731/**
1732 * Exports the guest (or nested-guest) segment registers into the VMCB.
1733 *
1734 * @param pVCpu The cross context virtual CPU structure.
1735 * @param pVmcb Pointer to the VM control block.
1736 *
1737 * @remarks No-long-jump zone!!!
1738 */
1739static void hmR0SvmExportGuestSegmentRegs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1740{
1741 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1742 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1743
1744 /* Guest segment registers. */
1745 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SREG_MASK)
1746 {
1747 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CS)
1748 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, CS, cs);
1749
1750 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SS)
1751 {
1752 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, SS, ss);
1753 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
1754 }
1755
1756 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DS)
1757 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, DS, ds);
1758
1759 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_ES)
1760 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, ES, es);
1761
1762 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_FS)
1763 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs);
1764
1765 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GS)
1766 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs);
1767
1768 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
1769 }
1770
1771 /* Guest TR. */
1772 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_TR)
1773 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr);
1774
1775 /* Guest LDTR. */
1776 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_LDTR)
1777 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);
1778
1779 /* Guest GDTR. */
1780 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GDTR)
1781 {
1782 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
1783 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
1784 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1785 }
1786
1787 /* Guest IDTR. */
1788 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_IDTR)
1789 {
1790 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
1791 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
1792 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1793 }
1794
1795 pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SREG_MASK
1796 | HM_CHANGED_GUEST_TABLE_MASK);
1797}
1798
1799
1800/**
1801 * Exports the guest (or nested-guest) MSRs into the VMCB.
1802 *
1803 * @param pVCpu The cross context virtual CPU structure.
1804 * @param pVmcb Pointer to the VM control block.
1805 *
1806 * @remarks No-long-jump zone!!!
1807 */
1808static void hmR0SvmExportGuestMsrs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1809{
1810 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1811 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1812
1813 /* Guest Sysenter MSRs. */
1814 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
1815 {
1816 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
1817 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
1818
1819 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
1820 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
1821
1822 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
1823 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
1824 }
1825
1826 /*
1827 * Guest EFER MSR.
1828 * AMD-V requires guest EFER.SVME to be set. Weird.
1829 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
1830 */
1831 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_EFER_MSR)
1832 {
1833 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
1834 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1835 }
1836
1837 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit, otherwise SVM expects amd64 shadow paging. */
1838 if ( !CPUMIsGuestInLongModeEx(pCtx)
1839 && (pCtx->msrEFER & MSR_K6_EFER_LME))
1840 {
1841 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
1842 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1843 }
1844
1845 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSCALL_MSRS)
1846 {
1847 pVmcb->guest.u64STAR = pCtx->msrSTAR;
1848 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
1849 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
1850 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
1851 }
1852
1853 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_KERNEL_GS_BASE)
1854 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1855
1856 pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SYSENTER_MSR_MASK
1857 | HM_CHANGED_GUEST_EFER_MSR
1858 | HM_CHANGED_GUEST_SYSCALL_MSRS
1859 | HM_CHANGED_GUEST_KERNEL_GS_BASE);
1860
1861 /*
1862 * Setup the PAT MSR (applicable for Nested Paging only).
1863 *
1864 * The default value should be MSR_IA32_CR_PAT_INIT_VAL, but we treat all guest memory
1865 * as WB, so choose type 6 for all PAT slots, see @bugref{9634}.
1866 *
1867 * While guests can modify and see the modified values through the shadow values,
1868 * we shall not honor any guest modifications of this MSR to ensure caching is always
1869 * enabled similar to how we clear CR0.CD and NW bits.
1870 *
1871 * For nested-guests this needs to always be set as well, see @bugref{7243#c109}.
1872 */
1873 pVmcb->guest.u64PAT = UINT64_C(0x0006060606060606);
1874
1875 /* Enable the last branch record bit if LBR virtualization is enabled. */
1876 if (pVmcb->ctrl.LbrVirt.n.u1LbrVirt)
1877 pVmcb->guest.u64DBGCTL = MSR_IA32_DEBUGCTL_LBR;
1878}
1879
1880
1881/**
1882 * Exports the guest (or nested-guest) debug state into the VMCB and programs
1883 * the necessary intercepts accordingly.
1884 *
1885 * @param pVCpu The cross context virtual CPU structure.
1886 * @param pVmcb Pointer to the VM control block.
1887 *
1888 * @remarks No-long-jump zone!!!
1889 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
1890 */
1891static void hmR0SvmExportSharedDebugState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1892{
1893 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1894
1895 /** @todo Figure out stepping with nested-guest. */
1896 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1897 {
1898 /*
1899 * We don't want to always intercept DRx read/writes for nested-guests as it causes
1900 * problems when the nested hypervisor isn't intercepting them, see @bugref{10080}.
1901 * Instead, they are strictly only requested when the nested hypervisor intercepts
1902 * them -- handled while merging VMCB controls.
1903 *
1904 * If neither the outer nor the nested-hypervisor is intercepting DRx read/writes,
1905 * then the nested-guest debug state should be actively loaded on the host so that
1906 * nested-guest reads/writes its own debug registers without causing VM-exits.
1907 */
1908 if ( ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
1909 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
1910 && !CPUMIsGuestDebugStateActive(pVCpu))
1911 {
1912 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
1913 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1914 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1915 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1916 }
1917
1918 pVmcb->guest.u64DR6 = pCtx->dr[6];
1919 pVmcb->guest.u64DR7 = pCtx->dr[7];
1920 return;
1921 }
1922
1923 /*
1924 * Anyone single stepping on the host side? If so, we'll have to use the
1925 * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
1926 * the VMM level like the VT-x implementations does.
1927 */
1928 bool fInterceptMovDRx = false;
1929 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
1930 if (fStepping)
1931 {
1932 pVCpu->hmr0.s.fClearTrapFlag = true;
1933 pVmcb->guest.u64RFlags |= X86_EFL_TF;
1934 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
1935 }
1936
1937 if ( fStepping
1938 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
1939 {
1940 /*
1941 * Use the combined guest and host DRx values found in the hypervisor
1942 * register set because the debugger has breakpoints active or someone
1943 * is single stepping on the host side.
1944 *
1945 * Note! DBGF expects a clean DR6 state before executing guest code.
1946 */
1947 if (!CPUMIsHyperDebugStateActive(pVCpu))
1948 {
1949 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1950 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1951 Assert(CPUMIsHyperDebugStateActive(pVCpu));
1952 }
1953
1954 /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
1955 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
1956 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu))
1957 {
1958 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
1959 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
1960 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1961 }
1962
1963 /** @todo If we cared, we could optimize to allow the guest to read registers
1964 * with the same values. */
1965 fInterceptMovDRx = true;
1966 pVCpu->hmr0.s.fUsingHyperDR7 = true;
1967 Log5(("hmR0SvmExportSharedDebugState: Loaded hyper DRx\n"));
1968 }
1969 else
1970 {
1971 /*
1972 * Update DR6, DR7 with the guest values if necessary.
1973 */
1974 if ( pVmcb->guest.u64DR7 != pCtx->dr[7]
1975 || pVmcb->guest.u64DR6 != pCtx->dr[6])
1976 {
1977 pVmcb->guest.u64DR7 = pCtx->dr[7];
1978 pVmcb->guest.u64DR6 = pCtx->dr[6];
1979 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1980 }
1981 pVCpu->hmr0.s.fUsingHyperDR7 = false;
1982
1983 /*
1984 * If the guest has enabled debug registers, we need to load them prior to
1985 * executing guest code so they'll trigger at the right time.
1986 */
1987 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
1988 {
1989 if (!CPUMIsGuestDebugStateActive(pVCpu))
1990 {
1991 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
1992 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1993 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1994 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1995 }
1996 Log5(("hmR0SvmExportSharedDebugState: Loaded guest DRx\n"));
1997 }
1998 /*
1999 * If no debugging enabled, we'll lazy load DR0-3. We don't need to
2000 * intercept #DB as DR6 is updated in the VMCB.
2001 *
2002 * Note! If we cared and dared, we could skip intercepting \#DB here.
2003 * However, \#DB shouldn't be performance critical, so we'll play safe
2004 * and keep the code similar to the VT-x code and always intercept it.
2005 */
2006 else if (!CPUMIsGuestDebugStateActive(pVCpu))
2007 fInterceptMovDRx = true;
2008 }
2009
2010 Assert(pVmcb->ctrl.u32InterceptXcpt & RT_BIT_32(X86_XCPT_DB));
2011 if (fInterceptMovDRx)
2012 {
2013 if ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
2014 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
2015 {
2016 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
2017 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
2018 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2019 }
2020 }
2021 else
2022 {
2023 if ( pVmcb->ctrl.u16InterceptRdDRx
2024 || pVmcb->ctrl.u16InterceptWrDRx)
2025 {
2026 pVmcb->ctrl.u16InterceptRdDRx = 0;
2027 pVmcb->ctrl.u16InterceptWrDRx = 0;
2028 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2029 }
2030 }
2031 Log4Func(("DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7]));
2032}
2033
2034/**
2035 * Exports the hardware virtualization state into the nested-guest
2036 * VMCB.
2037 *
2038 * @param pVCpu The cross context virtual CPU structure.
2039 * @param pVmcb Pointer to the VM control block.
2040 *
2041 * @remarks No-long-jump zone!!!
2042 */
2043static void hmR0SvmExportGuestHwvirtState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2044{
2045 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2046
2047 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_HWVIRT)
2048 {
2049 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
2050 {
2051 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2052 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2053
2054 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); /* Nested VGIF is not supported yet. */
2055 Assert(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF); /* Physical hardware supports VGIF. */
2056 Assert(HMIsSvmVGifActive(pVM)); /* Outer VM has enabled VGIF. */
2057 NOREF(pVM);
2058
2059 pVmcb->ctrl.IntCtrl.n.u1VGif = CPUMGetGuestGif(pCtx);
2060 }
2061
2062 /*
2063 * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp.
2064 * since SVM doesn't have a preemption timer.
2065 *
2066 * We do this here rather than in hmR0SvmSetupVmcbNested() as we may have been executing the
2067 * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters
2068 * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between.
2069 */
2070 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2071 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
2072 uint16_t const uGuestPauseFilterCount = pVM->hm.s.svm.cPauseFilter;
2073 uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
2074 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_PAUSE))
2075 {
2076 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2077 pVmcbCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount);
2078 pVmcbCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold);
2079 }
2080 else
2081 {
2082 /** @todo r=ramshankar: We can turn these assignments into assertions. */
2083 pVmcbCtrl->u16PauseFilterCount = uGuestPauseFilterCount;
2084 pVmcbCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold;
2085 }
2086 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2087
2088 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_HWVIRT;
2089 }
2090}
2091
2092
2093/**
2094 * Exports the guest APIC TPR state into the VMCB.
2095 *
2096 * @returns VBox status code.
2097 * @param pVCpu The cross context virtual CPU structure.
2098 * @param pVmcb Pointer to the VM control block.
2099 */
2100static int hmR0SvmExportGuestApicTpr(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2101{
2102 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2103
2104 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
2105 {
2106 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2107 if ( PDMHasApic(pVM)
2108 && APICIsEnabled(pVCpu))
2109 {
2110 bool fPendingIntr;
2111 uint8_t u8Tpr;
2112 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
2113 AssertRCReturn(rc, rc);
2114
2115 /* Assume that we need to trap all TPR accesses and thus need not check on
2116 every #VMEXIT if we should update the TPR. */
2117 Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking);
2118 pVCpu->hmr0.s.svm.fSyncVTpr = false;
2119
2120 if (!pVM->hm.s.fTprPatchingActive)
2121 {
2122 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2123 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
2124
2125 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we
2126 can deliver the interrupt to the guest. */
2127 if (fPendingIntr)
2128 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
2129 else
2130 {
2131 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
2132 pVCpu->hmr0.s.svm.fSyncVTpr = true;
2133 }
2134
2135 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL);
2136 }
2137 else
2138 {
2139 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
2140 pVmcb->guest.u64LSTAR = u8Tpr;
2141 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hmr0.s.svm.pvMsrBitmap;
2142
2143 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
2144 if (fPendingIntr)
2145 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
2146 else
2147 {
2148 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
2149 pVCpu->hmr0.s.svm.fSyncVTpr = true;
2150 }
2151 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
2152 }
2153 }
2154 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
2155 }
2156 return VINF_SUCCESS;
2157}
2158
2159
2160/**
2161 * Sets up the exception interrupts required for guest execution in the VMCB.
2162 *
2163 * @param pVCpu The cross context virtual CPU structure.
2164 * @param pVmcb Pointer to the VM control block.
2165 *
2166 * @remarks No-long-jump zone!!!
2167 */
2168static void hmR0SvmExportGuestXcptIntercepts(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2169{
2170 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2171
2172 /* If we modify intercepts from here, please check & adjust hmR0SvmMergeVmcbCtrlsNested() if required. */
2173 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_SVM_XCPT_INTERCEPTS)
2174 {
2175 /* Trap #UD for GIM provider (e.g. for hypercalls). */
2176 if (pVCpu->hm.s.fGIMTrapXcptUD || pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
2177 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_UD);
2178 else
2179 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_UD);
2180
2181 /* Trap #BP for INT3 debug breakpoints set by the VM debugger. */
2182 if (pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
2183 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_BP);
2184 else
2185 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_BP);
2186
2187 /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmExportGuestCR0(). */
2188 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_SVM_XCPT_INTERCEPTS);
2189 }
2190}
2191
2192
2193#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2194/**
2195 * Merges guest and nested-guest intercepts for executing the nested-guest using
2196 * hardware-assisted SVM.
2197 *
2198 * This merges the guest and nested-guest intercepts in a way that if the outer
2199 * guest intercept is set we need to intercept it in the nested-guest as
2200 * well.
2201 *
2202 * @param pVCpu The cross context virtual CPU structure.
2203 * @param pVmcbNstGst Pointer to the nested-guest VM control block.
2204 */
2205static void hmR0SvmMergeVmcbCtrlsNested(PVMCPUCC pVCpu)
2206{
2207 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2208 PCSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
2209 PSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
2210 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2211
2212 /* Merge the guest's CR intercepts into the nested-guest VMCB. */
2213 pVmcbNstGstCtrl->u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
2214 pVmcbNstGstCtrl->u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
2215
2216 /* Always intercept CR4 writes for tracking PGM mode changes and AVX (for
2217 XCR0 syncing during worlds switching). */
2218 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(4);
2219
2220 /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */
2221 if (!pVM->hmr0.s.fNestedPaging)
2222 {
2223 pVmcbNstGstCtrl->u16InterceptRdCRx |= RT_BIT(3);
2224 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(3);
2225 }
2226
2227 /* Merge the guest's DR intercepts into the nested-guest VMCB. */
2228 pVmcbNstGstCtrl->u16InterceptRdDRx |= pVmcb->ctrl.u16InterceptRdDRx;
2229 pVmcbNstGstCtrl->u16InterceptWrDRx |= pVmcb->ctrl.u16InterceptWrDRx;
2230
2231 /*
2232 * Merge the guest's exception intercepts into the nested-guest VMCB.
2233 *
2234 * - #UD: Exclude these as the outer guest's GIM hypercalls are not applicable
2235 * while executing the nested-guest.
2236 *
2237 * - #BP: Exclude breakpoints set by the VM debugger for the outer guest. This can
2238 * be tweaked later depending on how we wish to implement breakpoints.
2239 *
2240 * - #GP: Exclude these as it's the inner VMMs problem to get vmsvga 3d drivers
2241 * loaded into their guests, not ours.
2242 *
2243 * Warning!! This ASSUMES we only intercept \#UD for hypercall purposes and \#BP
2244 * for VM debugger breakpoints, see hmR0SvmExportGuestXcptIntercepts().
2245 */
2246#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
2247 pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt
2248 & ~( RT_BIT(X86_XCPT_UD)
2249 | RT_BIT(X86_XCPT_BP)
2250 | (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv ? RT_BIT(X86_XCPT_GP) : 0));
2251#else
2252 pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt;
2253#endif
2254
2255 /*
2256 * Adjust intercepts while executing the nested-guest that differ from the
2257 * outer guest intercepts.
2258 *
2259 * - VINTR: Exclude the outer guest intercept as we don't need to cause VINTR #VMEXITs
2260 * that belong to the nested-guest to the outer guest.
2261 *
2262 * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by
2263 * the nested-guest, the physical CPU raises a \#UD exception as expected.
2264 */
2265 pVmcbNstGstCtrl->u64InterceptCtrl |= (pVmcb->ctrl.u64InterceptCtrl & ~( SVM_CTRL_INTERCEPT_VINTR
2266 | SVM_CTRL_INTERCEPT_VMMCALL))
2267 | HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;
2268
2269 Assert( (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
2270 == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
2271
2272 /* Finally, update the VMCB clean bits. */
2273 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2274}
2275#endif
2276
2277
2278/**
2279 * Enters the AMD-V session.
2280 *
2281 * @returns VBox status code.
2282 * @param pVCpu The cross context virtual CPU structure.
2283 */
2284VMMR0DECL(int) SVMR0Enter(PVMCPUCC pVCpu)
2285{
2286 AssertPtr(pVCpu);
2287 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
2288 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2289
2290 LogFlowFunc(("pVCpu=%p\n", pVCpu));
2291 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
2292 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
2293
2294 pVCpu->hmr0.s.fLeaveDone = false;
2295 return VINF_SUCCESS;
2296}
2297
2298
2299/**
2300 * Thread-context callback for AMD-V.
2301 *
2302 * This is used together with RTThreadCtxHookCreate() on platforms which
2303 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
2304 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
2305 *
2306 * @param enmEvent The thread-context event.
2307 * @param pVCpu The cross context virtual CPU structure.
2308 * @param fGlobalInit Whether global VT-x/AMD-V init. is used.
2309 * @thread EMT(pVCpu)
2310 */
2311VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit)
2312{
2313 NOREF(fGlobalInit);
2314
2315 switch (enmEvent)
2316 {
2317 case RTTHREADCTXEVENT_OUT:
2318 {
2319 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2320 VMCPU_ASSERT_EMT(pVCpu);
2321
2322 /* No longjmps (log-flush, locks) in this fragile context. */
2323 VMMRZCallRing3Disable(pVCpu);
2324
2325 if (!pVCpu->hmr0.s.fLeaveDone)
2326 {
2327 hmR0SvmLeave(pVCpu, false /* fImportState */);
2328 pVCpu->hmr0.s.fLeaveDone = true;
2329 }
2330
2331 /* Leave HM context, takes care of local init (term). */
2332 int rc = HMR0LeaveCpu(pVCpu);
2333 AssertRC(rc); NOREF(rc);
2334
2335 /* Restore longjmp state. */
2336 VMMRZCallRing3Enable(pVCpu);
2337 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
2338 break;
2339 }
2340
2341 case RTTHREADCTXEVENT_IN:
2342 {
2343 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2344 VMCPU_ASSERT_EMT(pVCpu);
2345
2346 /* No longjmps (log-flush, locks) in this fragile context. */
2347 VMMRZCallRing3Disable(pVCpu);
2348
2349 /*
2350 * Initialize the bare minimum state required for HM. This takes care of
2351 * initializing AMD-V if necessary (onlined CPUs, local init etc.)
2352 */
2353 int rc = hmR0EnterCpu(pVCpu);
2354 AssertRC(rc); NOREF(rc);
2355 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
2356 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
2357
2358 pVCpu->hmr0.s.fLeaveDone = false;
2359
2360 /* Restore longjmp state. */
2361 VMMRZCallRing3Enable(pVCpu);
2362 break;
2363 }
2364
2365 default:
2366 break;
2367 }
2368}
2369
2370
2371/**
2372 * Saves the host state.
2373 *
2374 * @returns VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure.
2376 *
2377 * @remarks No-long-jump zone!!!
2378 */
2379VMMR0DECL(int) SVMR0ExportHostState(PVMCPUCC pVCpu)
2380{
2381 NOREF(pVCpu);
2382
2383 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
2384 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_HOST_CONTEXT);
2385 return VINF_SUCCESS;
2386}
2387
2388
2389/**
2390 * Exports the guest or nested-guest state from the virtual-CPU context into the
2391 * VMCB.
2392 *
2393 * Also sets up the appropriate VMRUN function to execute guest or nested-guest
2394 * code based on the virtual-CPU mode.
2395 *
2396 * @returns VBox status code.
2397 * @param pVCpu The cross context virtual CPU structure.
2398 * @param pSvmTransient Pointer to the SVM-transient structure.
2399 *
2400 * @remarks No-long-jump zone!!!
2401 */
2402static int hmR0SvmExportGuestState(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient)
2403{
2404 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
2405
2406 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
2407 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2408 Assert(pVmcb);
2409
2410 pVmcb->guest.u64RIP = pCtx->rip;
2411 pVmcb->guest.u64RSP = pCtx->rsp;
2412 pVmcb->guest.u64RFlags = pCtx->eflags.u;
2413 pVmcb->guest.u64RAX = pCtx->rax;
2414
2415 bool const fIsNestedGuest = pSvmTransient->fIsNestedGuest;
2416 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
2417
2418 int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcb);
2419 AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc);
2420 hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcb);
2421 hmR0SvmExportGuestMsrs(pVCpu, pVmcb);
2422 hmR0SvmExportGuestHwvirtState(pVCpu, pVmcb);
2423
2424 ASMSetFlags(fEFlags);
2425
2426 if (!fIsNestedGuest)
2427 {
2428 /* hmR0SvmExportGuestApicTpr() must be called -after- hmR0SvmExportGuestMsrs() as we
2429 otherwise we would overwrite the LSTAR MSR that we use for TPR patching. */
2430 hmR0SvmExportGuestApicTpr(pVCpu, pVmcb);
2431 hmR0SvmExportGuestXcptIntercepts(pVCpu, pVmcb);
2432 }
2433
2434 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
2435 uint64_t fUnusedMask = HM_CHANGED_GUEST_RIP
2436 | HM_CHANGED_GUEST_RFLAGS
2437 | HM_CHANGED_GUEST_GPRS_MASK
2438 | HM_CHANGED_GUEST_X87
2439 | HM_CHANGED_GUEST_SSE_AVX
2440 | HM_CHANGED_GUEST_OTHER_XSAVE
2441 | HM_CHANGED_GUEST_XCRx
2442 | HM_CHANGED_GUEST_TSC_AUX
2443 | HM_CHANGED_GUEST_OTHER_MSRS;
2444 if (fIsNestedGuest)
2445 fUnusedMask |= HM_CHANGED_SVM_XCPT_INTERCEPTS
2446 | HM_CHANGED_GUEST_APIC_TPR;
2447
2448 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( fUnusedMask
2449 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_MASK)));
2450
2451#ifdef VBOX_STRICT
2452 /*
2453 * All of the guest-CPU state and SVM keeper bits should be exported here by now,
2454 * except for the host-context and/or shared host-guest context bits.
2455 */
2456 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
2457 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)),
2458 ("fCtxChanged=%#RX64\n", fCtxChanged));
2459
2460 /*
2461 * If we need to log state that isn't always imported, we'll need to import them here.
2462 * See hmR0SvmPostRunGuest() for which part of the state is imported uncondtionally.
2463 */
2464 hmR0SvmLogState(pVCpu, pVmcb, "hmR0SvmExportGuestState", 0 /* fFlags */, 0 /* uVerbose */);
2465#endif
2466
2467 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
2468 return VINF_SUCCESS;
2469}
2470
2471#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2472
2473/**
2474 * Merges the guest and nested-guest MSR permission bitmap.
2475 *
2476 * If the guest is intercepting an MSR we need to intercept it regardless of
2477 * whether the nested-guest is intercepting it or not.
2478 *
2479 * @param pHostCpu The HM physical-CPU structure.
2480 * @param pVCpu The cross context virtual CPU structure.
2481 *
2482 * @remarks No-long-jmp zone!!!
2483 */
2484DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu)
2485{
2486 uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hmr0.s.svm.pvMsrBitmap;
2487 uint64_t const *pu64NstGstMsrpm = (uint64_t const *)&pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[0];
2488 uint64_t *pu64DstMsrpm = (uint64_t *)pHostCpu->n.svm.pvNstGstMsrpm;
2489
2490 /* MSRPM bytes from offset 0x1800 are reserved, so we stop merging there. */
2491 uint32_t const offRsvdQwords = 0x1800 >> 3;
2492 for (uint32_t i = 0; i < offRsvdQwords; i++)
2493 pu64DstMsrpm[i] = pu64NstGstMsrpm[i] | pu64GstMsrpm[i];
2494}
2495
2496
2497/**
2498 * Caches the nested-guest VMCB fields before we modify them for execution using
2499 * hardware-assisted SVM.
2500 *
2501 * @returns true if the VMCB was previously already cached, false otherwise.
2502 * @param pVCpu The cross context virtual CPU structure.
2503 *
2504 * @sa HMNotifySvmNstGstVmexit.
2505 */
2506static bool hmR0SvmCacheVmcbNested(PVMCPUCC pVCpu)
2507{
2508 /*
2509 * Cache the nested-guest programmed VMCB fields if we have not cached it yet.
2510 * Otherwise we risk re-caching the values we may have modified, see @bugref{7243#c44}.
2511 *
2512 * Nested-paging CR3 is not saved back into the VMCB on #VMEXIT, hence no need to
2513 * cache and restore it, see AMD spec. 15.25.4 "Nested Paging and VMRUN/#VMEXIT".
2514 */
2515 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
2516 bool const fWasCached = pVmcbNstGstCache->fCacheValid;
2517 if (!fWasCached)
2518 {
2519 PCSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
2520 PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2521 pVmcbNstGstCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
2522 pVmcbNstGstCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
2523 pVmcbNstGstCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
2524 pVmcbNstGstCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
2525 pVmcbNstGstCache->u16PauseFilterThreshold = pVmcbNstGstCtrl->u16PauseFilterThreshold;
2526 pVmcbNstGstCache->u16PauseFilterCount = pVmcbNstGstCtrl->u16PauseFilterCount;
2527 pVmcbNstGstCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt;
2528 pVmcbNstGstCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl;
2529 pVmcbNstGstCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset;
2530 pVmcbNstGstCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
2531 pVmcbNstGstCache->fNestedPaging = pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging;
2532 pVmcbNstGstCache->fLbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
2533 pVmcbNstGstCache->fCacheValid = true;
2534 Log4Func(("Cached VMCB fields\n"));
2535 }
2536
2537 return fWasCached;
2538}
2539
2540
2541/**
2542 * Sets up the nested-guest VMCB for execution using hardware-assisted SVM.
2543 *
2544 * This is done the first time we enter nested-guest execution using SVM R0
2545 * until the nested-guest \#VMEXIT (not to be confused with physical CPU
2546 * \#VMEXITs which may or may not cause a corresponding nested-guest \#VMEXIT).
2547 *
2548 * @param pVCpu The cross context virtual CPU structure.
2549 */
2550static void hmR0SvmSetupVmcbNested(PVMCPUCC pVCpu)
2551{
2552 PSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
2553 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2554
2555 HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2556
2557 /*
2558 * First cache the nested-guest VMCB fields we may potentially modify.
2559 */
2560 bool const fVmcbCached = hmR0SvmCacheVmcbNested(pVCpu);
2561 if (!fVmcbCached)
2562 {
2563 /*
2564 * The IOPM of the nested-guest can be ignored because the the guest always
2565 * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
2566 * than the nested-guest IOPM and swap the field back on the #VMEXIT.
2567 */
2568 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
2569
2570 /*
2571 * Use the same nested-paging as the outer guest. We can't dynamically switch off
2572 * nested-paging suddenly while executing a VM (see assertion at the end of
2573 * Trap0eHandler() in PGMAllBth.h).
2574 */
2575 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging;
2576
2577 /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */
2578 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = 1;
2579
2580 /*
2581 * Turn off TPR syncing on #VMEXIT for nested-guests as CR8 intercepts are subject
2582 * to the nested-guest intercepts and we always run with V_INTR_MASKING.
2583 */
2584 pVCpu->hmr0.s.svm.fSyncVTpr = false;
2585
2586# ifdef DEBUG_ramshankar
2587 /* For debugging purposes - copy the LBR info. from outer guest VMCB. */
2588 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt;
2589# endif
2590
2591 /*
2592 * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we
2593 * need to intercept VMSAVE/VMLOAD instructions executed by the nested-guest.
2594 */
2595 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
2596 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
2597 | SVM_CTRL_INTERCEPT_VMLOAD;
2598
2599 /*
2600 * If we don't expose Virtual GIF feature to the outer guest, we need to intercept
2601 * CLGI/STGI instructions executed by the nested-guest.
2602 */
2603 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif)
2604 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
2605 | SVM_CTRL_INTERCEPT_STGI;
2606
2607 /* Merge the guest and nested-guest intercepts. */
2608 hmR0SvmMergeVmcbCtrlsNested(pVCpu);
2609
2610 /* Update the VMCB clean bits. */
2611 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2612 }
2613 else
2614 {
2615 Assert(!pVCpu->hmr0.s.svm.fSyncVTpr);
2616 Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap);
2617 Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
2618 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPagingCfg == pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
2619 }
2620}
2621
2622#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
2623
2624/**
2625 * Exports the state shared between the host and guest (or nested-guest) into
2626 * the VMCB.
2627 *
2628 * @param pVCpu The cross context virtual CPU structure.
2629 * @param pVmcb Pointer to the VM control block.
2630 *
2631 * @remarks No-long-jump zone!!!
2632 */
2633static void hmR0SvmExportSharedState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2634{
2635 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2636 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2637
2638 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
2639 hmR0SvmExportSharedDebugState(pVCpu, pVmcb);
2640
2641 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
2642 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE),
2643 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
2644}
2645
2646
2647/**
2648 * Worker for SVMR0ImportStateOnDemand.
2649 *
2650 * @param pVCpu The cross context virtual CPU structure.
2651 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2652 */
2653static void hmR0SvmImportGuestState(PVMCPUCC pVCpu, uint64_t fWhat)
2654{
2655 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
2656
2657 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2658 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
2659 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
2660 PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
2661
2662 /*
2663 * We disable interrupts to make the updating of the state and in particular
2664 * the fExtrn modification atomic wrt to preemption hooks.
2665 */
2666 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
2667
2668 fWhat &= pCtx->fExtrn;
2669 if (fWhat)
2670 {
2671#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2672 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
2673 {
2674 if (pVmcbCtrl->IntCtrl.n.u1VGifEnable)
2675 {
2676 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); /* We don't yet support passing VGIF feature to the guest. */
2677 Assert(HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM))); /* VM has configured it. */
2678 CPUMSetGuestGif(pCtx, pVmcbCtrl->IntCtrl.n.u1VGif);
2679 }
2680 }
2681
2682 if (fWhat & CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)
2683 {
2684 if ( !pVmcbCtrl->IntCtrl.n.u1VIrqPending
2685 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
2686 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2687 }
2688#endif
2689
2690 if (fWhat & CPUMCTX_EXTRN_INHIBIT_INT)
2691 CPUMUpdateInterruptShadowEx(pCtx, pVmcbCtrl->IntShadow.n.u1IntShadow, pVmcbGuest->u64RIP);
2692
2693 if (fWhat & CPUMCTX_EXTRN_RIP)
2694 pCtx->rip = pVmcbGuest->u64RIP;
2695
2696 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
2697 {
2698 pCtx->eflags.u = pVmcbGuest->u64RFlags;
2699 if (pVCpu->hmr0.s.fClearTrapFlag)
2700 {
2701 pVCpu->hmr0.s.fClearTrapFlag = false;
2702 pCtx->eflags.Bits.u1TF = 0;
2703 }
2704 }
2705
2706 if (fWhat & CPUMCTX_EXTRN_RSP)
2707 pCtx->rsp = pVmcbGuest->u64RSP;
2708
2709 if (fWhat & CPUMCTX_EXTRN_RAX)
2710 pCtx->rax = pVmcbGuest->u64RAX;
2711
2712 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
2713 {
2714 if (fWhat & CPUMCTX_EXTRN_CS)
2715 {
2716 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, CS, cs);
2717 /* Correct the CS granularity bit. Haven't seen it being wrong in any other register (yet). */
2718 /** @todo SELM might need to be fixed as it too should not care about the
2719 * granularity bit. See @bugref{6785}. */
2720 if ( !pCtx->cs.Attr.n.u1Granularity
2721 && pCtx->cs.Attr.n.u1Present
2722 && pCtx->cs.u32Limit > UINT32_C(0xfffff))
2723 {
2724 Assert((pCtx->cs.u32Limit & 0xfff) == 0xfff);
2725 pCtx->cs.Attr.n.u1Granularity = 1;
2726 }
2727 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, cs);
2728 }
2729 if (fWhat & CPUMCTX_EXTRN_SS)
2730 {
2731 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, SS, ss);
2732 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ss);
2733 /*
2734 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the
2735 * VMCB and uses that and thus it's possible that when the CPL changes during
2736 * guest execution that the SS DPL isn't updated by AMD-V. Observed on some
2737 * AMD Fusion CPUs with 64-bit guests.
2738 *
2739 * See AMD spec. 15.5.1 "Basic operation".
2740 */
2741 Assert(!(pVmcbGuest->u8CPL & ~0x3));
2742 uint8_t const uCpl = pVmcbGuest->u8CPL;
2743 if (pCtx->ss.Attr.n.u2Dpl != uCpl)
2744 pCtx->ss.Attr.n.u2Dpl = uCpl & 0x3;
2745 }
2746 if (fWhat & CPUMCTX_EXTRN_DS)
2747 {
2748 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, DS, ds);
2749 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ds);
2750 }
2751 if (fWhat & CPUMCTX_EXTRN_ES)
2752 {
2753 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, ES, es);
2754 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, es);
2755 }
2756 if (fWhat & CPUMCTX_EXTRN_FS)
2757 {
2758 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, FS, fs);
2759 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, fs);
2760 }
2761 if (fWhat & CPUMCTX_EXTRN_GS)
2762 {
2763 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, GS, gs);
2764 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, gs);
2765 }
2766 }
2767
2768 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
2769 {
2770 if (fWhat & CPUMCTX_EXTRN_TR)
2771 {
2772 /*
2773 * Fixup TR attributes so it's compatible with Intel. Important when saved-states
2774 * are used between Intel and AMD, see @bugref{6208#c39}.
2775 * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode.
2776 */
2777 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, TR, tr);
2778 if (pCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
2779 {
2780 if ( pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2781 || CPUMIsGuestInLongModeEx(pCtx))
2782 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2783 else if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
2784 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
2785 }
2786 }
2787
2788 if (fWhat & CPUMCTX_EXTRN_LDTR)
2789 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, LDTR, ldtr);
2790
2791 if (fWhat & CPUMCTX_EXTRN_GDTR)
2792 {
2793 pCtx->gdtr.cbGdt = pVmcbGuest->GDTR.u32Limit;
2794 pCtx->gdtr.pGdt = pVmcbGuest->GDTR.u64Base;
2795 }
2796
2797 if (fWhat & CPUMCTX_EXTRN_IDTR)
2798 {
2799 pCtx->idtr.cbIdt = pVmcbGuest->IDTR.u32Limit;
2800 pCtx->idtr.pIdt = pVmcbGuest->IDTR.u64Base;
2801 }
2802 }
2803
2804 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2805 {
2806 pCtx->msrSTAR = pVmcbGuest->u64STAR;
2807 pCtx->msrLSTAR = pVmcbGuest->u64LSTAR;
2808 pCtx->msrCSTAR = pVmcbGuest->u64CSTAR;
2809 pCtx->msrSFMASK = pVmcbGuest->u64SFMASK;
2810 }
2811
2812 if ( (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2813 && !pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit /* Intercepted. AMD-V would clear the high 32 bits of EIP & ESP. */)
2814 {
2815 pCtx->SysEnter.cs = pVmcbGuest->u64SysEnterCS;
2816 pCtx->SysEnter.eip = pVmcbGuest->u64SysEnterEIP;
2817 pCtx->SysEnter.esp = pVmcbGuest->u64SysEnterESP;
2818 }
2819
2820 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
2821 pCtx->msrKERNELGSBASE = pVmcbGuest->u64KernelGSBase;
2822
2823 if (fWhat & CPUMCTX_EXTRN_DR_MASK)
2824 {
2825 if (fWhat & CPUMCTX_EXTRN_DR6)
2826 {
2827 if (!pVCpu->hmr0.s.fUsingHyperDR7)
2828 pCtx->dr[6] = pVmcbGuest->u64DR6;
2829 else
2830 CPUMSetHyperDR6(pVCpu, pVmcbGuest->u64DR6);
2831 }
2832
2833 if (fWhat & CPUMCTX_EXTRN_DR7)
2834 {
2835 if (!pVCpu->hmr0.s.fUsingHyperDR7)
2836 pCtx->dr[7] = pVmcbGuest->u64DR7;
2837 else
2838 Assert(pVmcbGuest->u64DR7 == CPUMGetHyperDR7(pVCpu));
2839 }
2840 }
2841
2842 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
2843 {
2844 if (fWhat & CPUMCTX_EXTRN_CR0)
2845 {
2846 /* We intercept changes to all CR0 bits except maybe TS & MP bits. */
2847 uint64_t const uCr0 = (pCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP))
2848 | (pVmcbGuest->u64CR0 & (X86_CR0_TS | X86_CR0_MP));
2849 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */
2850 CPUMSetGuestCR0(pVCpu, uCr0);
2851 VMMRZCallRing3Enable(pVCpu);
2852 }
2853
2854 if (fWhat & CPUMCTX_EXTRN_CR2)
2855 pCtx->cr2 = pVmcbGuest->u64CR2;
2856
2857 if (fWhat & CPUMCTX_EXTRN_CR3)
2858 {
2859 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
2860 && pCtx->cr3 != pVmcbGuest->u64CR3)
2861 {
2862 CPUMSetGuestCR3(pVCpu, pVmcbGuest->u64CR3);
2863 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2864 }
2865 }
2866
2867 /* Changes to CR4 are always intercepted. */
2868 }
2869
2870 /* Update fExtrn. */
2871 pCtx->fExtrn &= ~fWhat;
2872
2873 /* If everything has been imported, clear the HM keeper bit. */
2874 if (!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL))
2875 {
2876 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
2877 Assert(!pCtx->fExtrn);
2878 }
2879 }
2880 else
2881 Assert(!pCtx->fExtrn || (pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
2882
2883 ASMSetFlags(fEFlags);
2884
2885 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x);
2886
2887 /*
2888 * Honor any pending CR3 updates.
2889 *
2890 * Consider this scenario: #VMEXIT -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp
2891 * -> SVMR0CallRing3Callback() -> VMMRZCallRing3Disable() -> hmR0SvmImportGuestState()
2892 * -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp -> continue with #VMEXIT
2893 * handling -> hmR0SvmImportGuestState() and here we are.
2894 *
2895 * The reason for such complicated handling is because VM-exits that call into PGM expect
2896 * CR3 to be up-to-date and thus any CR3-saves -before- the VM-exit (longjmp) would've
2897 * postponed the CR3 update via the force-flag and cleared CR3 from fExtrn. Any SVM R0
2898 * VM-exit handler that requests CR3 to be saved will end up here and we call PGMUpdateCR3().
2899 *
2900 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again,
2901 * and does not process force-flag like regular exits to ring-3 either, we cover for it here.
2902 */
2903 if ( VMMRZCallRing3IsEnabled(pVCpu)
2904 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
2905 {
2906 AssertMsg(pCtx->cr3 == pVmcbGuest->u64CR3, ("cr3=%#RX64 vmcb_cr3=%#RX64\n", pCtx->cr3, pVmcbGuest->u64CR3));
2907 PGMUpdateCR3(pVCpu, pCtx->cr3);
2908 }
2909}
2910
2911
2912/**
2913 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU
2914 * context.
2915 *
2916 * Currently there is no residual state left in the CPU that is not updated in the
2917 * VMCB.
2918 *
2919 * @returns VBox status code.
2920 * @param pVCpu The cross context virtual CPU structure.
2921 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2922 */
2923VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2924{
2925 hmR0SvmImportGuestState(pVCpu, fWhat);
2926 return VINF_SUCCESS;
2927}
2928
2929
2930/**
2931 * Gets SVM \#VMEXIT auxiliary information.
2932 *
2933 * @returns VBox status code.
2934 * @param pVCpu The cross context virtual CPU structure.
2935 * @param pSvmExitAux Where to store the auxiliary info.
2936 */
2937VMMR0DECL(int) SVMR0GetExitAuxInfo(PVMCPUCC pVCpu, PSVMEXITAUX pSvmExitAux)
2938{
2939 PCSVMTRANSIENT pSvmTransient = pVCpu->hmr0.s.svm.pSvmTransient;
2940 if (RT_LIKELY(pSvmTransient))
2941 {
2942 PCSVMVMCB pVmcb = pSvmTransient->pVmcb;
2943 if (RT_LIKELY(pVmcb))
2944 {
2945 pSvmExitAux->u64ExitCode = pVmcb->ctrl.u64ExitCode;
2946 pSvmExitAux->u64ExitInfo1 = pVmcb->ctrl.u64ExitInfo1;
2947 pSvmExitAux->u64ExitInfo2 = pVmcb->ctrl.u64ExitInfo2;
2948 pSvmExitAux->ExitIntInfo = pVmcb->ctrl.ExitIntInfo;
2949 return VINF_SUCCESS;
2950 }
2951 return VERR_SVM_IPE_5;
2952 }
2953 return VERR_NOT_AVAILABLE;
2954}
2955
2956
2957/**
2958 * Does the necessary state syncing before returning to ring-3 for any reason
2959 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
2960 *
2961 * @param pVCpu The cross context virtual CPU structure.
2962 * @param fImportState Whether to import the guest state from the VMCB back
2963 * to the guest-CPU context.
2964 *
2965 * @remarks No-long-jmp zone!!!
2966 */
2967static void hmR0SvmLeave(PVMCPUCC pVCpu, bool fImportState)
2968{
2969 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2970 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2971
2972 /*
2973 * !!! IMPORTANT !!!
2974 * If you modify code here, make sure to check whether SVMR0CallRing3Callback() needs to be updated too.
2975 */
2976
2977 /* Save the guest state if necessary. */
2978 if (fImportState)
2979 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
2980
2981 /* Restore host FPU state if necessary and resync on next R0 reentry. */
2982 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
2983 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
2984
2985 /*
2986 * Restore host debug registers if necessary and resync on next R0 reentry.
2987 */
2988#ifdef VBOX_STRICT
2989 if (CPUMIsHyperDebugStateActive(pVCpu))
2990 {
2991 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb; /** @todo nested-guest. */
2992 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
2993 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
2994 }
2995#endif
2996 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
2997 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
2998 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
2999
3000 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
3001 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
3002 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
3003 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
3004 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
3005 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitVmentry);
3006 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
3007
3008 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
3009}
3010
3011
3012/**
3013 * Leaves the AMD-V session.
3014 *
3015 * Only used while returning to ring-3 either due to longjump or exits to
3016 * ring-3.
3017 *
3018 * @returns VBox status code.
3019 * @param pVCpu The cross context virtual CPU structure.
3020 */
3021static int hmR0SvmLeaveSession(PVMCPUCC pVCpu)
3022{
3023 HM_DISABLE_PREEMPT(pVCpu);
3024 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3025 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3026
3027 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
3028 and done this from the SVMR0ThreadCtxCallback(). */
3029 if (!pVCpu->hmr0.s.fLeaveDone)
3030 {
3031 hmR0SvmLeave(pVCpu, true /* fImportState */);
3032 pVCpu->hmr0.s.fLeaveDone = true;
3033 }
3034
3035 /*
3036 * !!! IMPORTANT !!!
3037 * If you modify code here, make sure to check whether SVMR0CallRing3Callback() needs to be updated too.
3038 */
3039
3040 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
3041 /* Deregister hook now that we've left HM context before re-enabling preemption. */
3042 VMMR0ThreadCtxHookDisable(pVCpu);
3043
3044 /* Leave HM context. This takes care of local init (term). */
3045 int rc = HMR0LeaveCpu(pVCpu);
3046
3047 HM_RESTORE_PREEMPT();
3048 return rc;
3049}
3050
3051
3052/**
3053 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
3054 * any remaining host state) before we go back to ring-3 due to an assertion.
3055 *
3056 * @param pVCpu The cross context virtual CPU structure.
3057 */
3058VMMR0DECL(int) SVMR0AssertionCallback(PVMCPUCC pVCpu)
3059{
3060 /*
3061 * !!! IMPORTANT !!!
3062 * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
3063 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
3064 */
3065 VMMR0AssertionRemoveNotification(pVCpu);
3066 VMMRZCallRing3Disable(pVCpu);
3067 HM_DISABLE_PREEMPT(pVCpu);
3068
3069 /* Import the entire guest state. */
3070 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3071
3072 /* Restore host FPU state if necessary and resync on next R0 reentry. */
3073 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
3074
3075 /* Restore host debug registers if necessary and resync on next R0 reentry. */
3076 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
3077
3078 /* Deregister the hook now that we've left HM context before re-enabling preemption. */
3079 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
3080 VMMR0ThreadCtxHookDisable(pVCpu);
3081
3082 /* Leave HM context. This takes care of local init (term). */
3083 HMR0LeaveCpu(pVCpu);
3084
3085 HM_RESTORE_PREEMPT();
3086 return VINF_SUCCESS;
3087}
3088
3089
3090/**
3091 * Take necessary actions before going back to ring-3.
3092 *
3093 * An action requires us to go back to ring-3. This function does the necessary
3094 * steps before we can safely return to ring-3. This is not the same as longjmps
3095 * to ring-3, this is voluntary.
3096 *
3097 * @returns Strict VBox status code.
3098 * @param pVCpu The cross context virtual CPU structure.
3099 * @param rcExit The reason for exiting to ring-3. Can be
3100 * VINF_VMM_UNKNOWN_RING3_CALL.
3101 */
3102static VBOXSTRICTRC hmR0SvmExitToRing3(PVMCPUCC pVCpu, VBOXSTRICTRC rcExit)
3103{
3104 Assert(pVCpu);
3105 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
3106
3107 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
3108 VMMRZCallRing3Disable(pVCpu);
3109 Log4Func(("rcExit=%d LocalFF=%#RX64 GlobalFF=%#RX32\n", VBOXSTRICTRC_VAL(rcExit), (uint64_t)pVCpu->fLocalForcedActions,
3110 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions));
3111
3112 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
3113 if (pVCpu->hm.s.Event.fPending)
3114 {
3115 hmR0SvmPendingEventToTrpmTrap(pVCpu);
3116 Assert(!pVCpu->hm.s.Event.fPending);
3117 }
3118
3119 /* Sync. the necessary state for going back to ring-3. */
3120 hmR0SvmLeaveSession(pVCpu);
3121 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
3122
3123 /* Thread-context hooks are unregistered at this point!!! */
3124 /* Ring-3 callback notifications are unregistered at this point!!! */
3125
3126 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
3127 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
3128 | CPUM_CHANGED_LDTR
3129 | CPUM_CHANGED_GDTR
3130 | CPUM_CHANGED_IDTR
3131 | CPUM_CHANGED_TR
3132 | CPUM_CHANGED_HIDDEN_SEL_REGS);
3133 if ( pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging
3134 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
3135 {
3136 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
3137 }
3138
3139 /* Update the exit-to-ring 3 reason. */
3140 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
3141
3142 /* On our way back from ring-3, reload the guest-CPU state if it may change while in ring-3. */
3143 if ( rcExit != VINF_EM_RAW_INTERRUPT
3144 || CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
3145 {
3146 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
3147 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
3148 }
3149
3150 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
3151 VMMRZCallRing3Enable(pVCpu);
3152
3153 /*
3154 * If we're emulating an instruction, we shouldn't have any TRPM traps pending
3155 * and if we're injecting an event we should have a TRPM trap pending.
3156 */
3157 AssertReturnStmt(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu),
3158 pVCpu->hm.s.u32HMError = VBOXSTRICTRC_VAL(rcExit),
3159 VERR_SVM_IPE_5);
3160 AssertReturnStmt(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu),
3161 pVCpu->hm.s.u32HMError = VBOXSTRICTRC_VAL(rcExit),
3162 VERR_SVM_IPE_4);
3163
3164 return rcExit;
3165}
3166
3167
3168/**
3169 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
3170 * intercepts.
3171 *
3172 * @param pVCpu The cross context virtual CPU structure.
3173 * @param pVmcb Pointer to the VM control block.
3174 *
3175 * @remarks No-long-jump zone!!!
3176 */
3177static void hmR0SvmUpdateTscOffsetting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3178{
3179 /*
3180 * Avoid intercepting RDTSC/RDTSCP if we determined the host TSC (++) is stable
3181 * and in case of a nested-guest, if the nested-VMCB specifies it is not intercepting
3182 * RDTSC/RDTSCP as well.
3183 */
3184 bool fParavirtTsc;
3185 uint64_t uTscOffset;
3186 bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu->CTX_SUFF(pVM), pVCpu, &uTscOffset, &fParavirtTsc);
3187
3188 bool fIntercept;
3189 if (fCanUseRealTsc)
3190 fIntercept = hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
3191 else
3192 {
3193 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
3194 fIntercept = true;
3195 }
3196
3197 if (!fIntercept)
3198 {
3199#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3200 /* Apply the nested-guest VMCB's TSC offset over the guest TSC offset. */
3201 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
3202 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
3203#endif
3204
3205 /* Update the TSC offset in the VMCB and the relevant clean bits. */
3206 pVmcb->ctrl.u64TSCOffset = uTscOffset;
3207 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
3208 }
3209
3210 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
3211 information before every VM-entry, hence we have nothing to do here at the moment. */
3212 if (fParavirtTsc)
3213 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
3214}
3215
3216
3217/**
3218 * Sets an event as a pending event to be injected into the guest.
3219 *
3220 * @param pVCpu The cross context virtual CPU structure.
3221 * @param pEvent Pointer to the SVM event.
3222 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3223 * page-fault.
3224 *
3225 * @remarks Statistics counter assumes this is a guest event being reflected to
3226 * the guest i.e. 'StatInjectPendingReflect' is incremented always.
3227 */
3228DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPUCC pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
3229{
3230 Assert(!pVCpu->hm.s.Event.fPending);
3231 Assert(pEvent->n.u1Valid);
3232
3233 pVCpu->hm.s.Event.u64IntInfo = pEvent->u;
3234 pVCpu->hm.s.Event.fPending = true;
3235 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
3236
3237 Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
3238 (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
3239}
3240
3241
3242/**
3243 * Sets an divide error (\#DE) exception as pending-for-injection into the VM.
3244 *
3245 * @param pVCpu The cross context virtual CPU structure.
3246 */
3247DECLINLINE(void) hmR0SvmSetPendingXcptDE(PVMCPUCC pVCpu)
3248{
3249 SVMEVENT Event;
3250 Event.u = 0;
3251 Event.n.u1Valid = 1;
3252 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3253 Event.n.u8Vector = X86_XCPT_DE;
3254 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3255}
3256
3257
3258/**
3259 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3260 *
3261 * @param pVCpu The cross context virtual CPU structure.
3262 */
3263DECLINLINE(void) hmR0SvmSetPendingXcptUD(PVMCPUCC pVCpu)
3264{
3265 SVMEVENT Event;
3266 Event.u = 0;
3267 Event.n.u1Valid = 1;
3268 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3269 Event.n.u8Vector = X86_XCPT_UD;
3270 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3271}
3272
3273
3274/**
3275 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3276 *
3277 * @param pVCpu The cross context virtual CPU structure.
3278 */
3279DECLINLINE(void) hmR0SvmSetPendingXcptDB(PVMCPUCC pVCpu)
3280{
3281 SVMEVENT Event;
3282 Event.u = 0;
3283 Event.n.u1Valid = 1;
3284 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3285 Event.n.u8Vector = X86_XCPT_DB;
3286 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3287}
3288
3289
3290/**
3291 * Sets a page fault (\#PF) exception as pending-for-injection into the VM.
3292 *
3293 * @param pVCpu The cross context virtual CPU structure.
3294 * @param u32ErrCode The error-code for the page-fault.
3295 * @param uFaultAddress The page fault address (CR2).
3296 *
3297 * @remarks This updates the guest CR2 with @a uFaultAddress!
3298 */
3299DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPUCC pVCpu, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)
3300{
3301 SVMEVENT Event;
3302 Event.u = 0;
3303 Event.n.u1Valid = 1;
3304 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3305 Event.n.u8Vector = X86_XCPT_PF;
3306 Event.n.u1ErrorCodeValid = 1;
3307 Event.n.u32ErrorCode = u32ErrCode;
3308
3309 /* Update CR2 of the guest. */
3310 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR2);
3311 if (pVCpu->cpum.GstCtx.cr2 != uFaultAddress)
3312 {
3313 pVCpu->cpum.GstCtx.cr2 = uFaultAddress;
3314 /* The VMCB clean bit for CR2 will be updated while re-loading the guest state. */
3315 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2);
3316 }
3317
3318 hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress);
3319}
3320
3321
3322/**
3323 * Sets a math-fault (\#MF) exception as pending-for-injection into the VM.
3324 *
3325 * @param pVCpu The cross context virtual CPU structure.
3326 */
3327DECLINLINE(void) hmR0SvmSetPendingXcptMF(PVMCPUCC pVCpu)
3328{
3329 SVMEVENT Event;
3330 Event.u = 0;
3331 Event.n.u1Valid = 1;
3332 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3333 Event.n.u8Vector = X86_XCPT_MF;
3334 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3335}
3336
3337
3338/**
3339 * Sets a double fault (\#DF) exception as pending-for-injection into the VM.
3340 *
3341 * @param pVCpu The cross context virtual CPU structure.
3342 */
3343DECLINLINE(void) hmR0SvmSetPendingXcptDF(PVMCPUCC pVCpu)
3344{
3345 SVMEVENT Event;
3346 Event.u = 0;
3347 Event.n.u1Valid = 1;
3348 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3349 Event.n.u8Vector = X86_XCPT_DF;
3350 Event.n.u1ErrorCodeValid = 1;
3351 Event.n.u32ErrorCode = 0;
3352 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3353}
3354
3355
3356/**
3357 * Injects an event into the guest upon VMRUN by updating the relevant field
3358 * in the VMCB.
3359 *
3360 * @param pVCpu The cross context virtual CPU structure.
3361 * @param pVmcb Pointer to the guest VM control block.
3362 * @param pEvent Pointer to the event.
3363 *
3364 * @remarks No-long-jump zone!!!
3365 * @remarks Requires CR0!
3366 */
3367DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPUCC pVCpu, PSVMVMCB pVmcb, PSVMEVENT pEvent)
3368{
3369 Assert(!pVmcb->ctrl.EventInject.n.u1Valid);
3370 pVmcb->ctrl.EventInject.u = pEvent->u;
3371 if ( pVmcb->ctrl.EventInject.n.u3Type == SVM_EVENT_EXCEPTION
3372 || pVmcb->ctrl.EventInject.n.u3Type == SVM_EVENT_NMI)
3373 {
3374 Assert(pEvent->n.u8Vector <= X86_XCPT_LAST);
3375 STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedXcpts[pEvent->n.u8Vector]);
3376 }
3377 else
3378 STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedIrqs[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
3379 RT_NOREF(pVCpu);
3380
3381 Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
3382 (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
3383}
3384
3385
3386
3387/**
3388 * Converts any TRPM trap into a pending HM event. This is typically used when
3389 * entering from ring-3 (not longjmp returns).
3390 *
3391 * @param pVCpu The cross context virtual CPU structure.
3392 */
3393static void hmR0SvmTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3394{
3395 Assert(TRPMHasTrap(pVCpu));
3396 Assert(!pVCpu->hm.s.Event.fPending);
3397
3398 uint8_t uVector;
3399 TRPMEVENT enmTrpmEvent;
3400 uint32_t uErrCode;
3401 RTGCUINTPTR GCPtrFaultAddress;
3402 uint8_t cbInstr;
3403
3404 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, NULL /* pfIcebp */);
3405 AssertRC(rc);
3406
3407 SVMEVENT Event;
3408 Event.u = 0;
3409 Event.n.u1Valid = 1;
3410 Event.n.u8Vector = uVector;
3411
3412 /* Refer AMD spec. 15.20 "Event Injection" for the format. */
3413 if (enmTrpmEvent == TRPM_TRAP)
3414 {
3415 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3416 switch (uVector)
3417 {
3418 case X86_XCPT_BP:
3419 case X86_XCPT_OF:
3420 AssertMsgFailed(("Invalid TRPM vector %d for event type %d\n", uVector, enmTrpmEvent));
3421 RT_FALL_THRU();
3422
3423 case X86_XCPT_PF:
3424 case X86_XCPT_DF:
3425 case X86_XCPT_TS:
3426 case X86_XCPT_NP:
3427 case X86_XCPT_SS:
3428 case X86_XCPT_GP:
3429 case X86_XCPT_AC:
3430 {
3431 Event.n.u1ErrorCodeValid = 1;
3432 Event.n.u32ErrorCode = uErrCode;
3433 break;
3434 }
3435 }
3436 }
3437 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
3438 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3439 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
3440 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
3441 else if (enmTrpmEvent == TRPM_NMI)
3442 Event.n.u3Type = SVM_EVENT_NMI;
3443 else
3444 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
3445
3446 rc = TRPMResetTrap(pVCpu);
3447 AssertRC(rc);
3448
3449 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
3450 !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode));
3451
3452 hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress);
3453}
3454
3455
3456/**
3457 * Converts any pending SVM event into a TRPM trap. Typically used when leaving
3458 * AMD-V to execute any instruction.
3459 *
3460 * @param pVCpu The cross context virtual CPU structure.
3461 */
3462static void hmR0SvmPendingEventToTrpmTrap(PVMCPUCC pVCpu)
3463{
3464 Assert(pVCpu->hm.s.Event.fPending);
3465 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
3466
3467 SVMEVENT Event;
3468 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3469
3470 uint8_t uVector = Event.n.u8Vector;
3471 TRPMEVENT enmTrapType = HMSvmEventToTrpmEventType(&Event, uVector);
3472
3473 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, Event.n.u3Type));
3474
3475 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
3476 AssertRC(rc);
3477
3478 if (Event.n.u1ErrorCodeValid)
3479 TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode);
3480
3481 if ( enmTrapType == TRPM_TRAP
3482 && uVector == X86_XCPT_PF)
3483 {
3484 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
3485 Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
3486 }
3487 else if (enmTrapType == TRPM_SOFTWARE_INT)
3488 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
3489 pVCpu->hm.s.Event.fPending = false;
3490}
3491
3492
3493/**
3494 * Sets the virtual interrupt intercept control in the VMCB.
3495 *
3496 * @param pVCpu The cross context virtual CPU structure.
3497 * @param pVmcb Pointer to the VM control block.
3498 */
3499static void hmR0SvmSetIntWindowExiting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3500{
3501 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); NOREF(pVCpu);
3502
3503 /*
3504 * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when the guest
3505 * is ready to accept interrupts. At #VMEXIT, we then get the interrupt from the APIC
3506 * (updating ISR at the right time) and inject the interrupt.
3507 *
3508 * With AVIC is supported, we could make use of the asynchronously delivery without
3509 * #VMEXIT and we would be passing the AVIC page to SVM.
3510 *
3511 * In AMD-V, an interrupt window is achieved using a combination of V_IRQ (an interrupt
3512 * is pending), V_IGN_TPR (ignore TPR priorities) and the VINTR intercept all being set.
3513 */
3514 Assert(pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR);
3515 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
3516 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
3517 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_VINTR);
3518 Log4(("Set VINTR intercept\n"));
3519}
3520
3521
3522/**
3523 * Clears the virtual interrupt intercept control in the VMCB as
3524 * we are figured the guest is unable process any interrupts
3525 * at this point of time.
3526 *
3527 * @param pVCpu The cross context virtual CPU structure.
3528 * @param pVmcb Pointer to the VM control block.
3529 */
3530static void hmR0SvmClearIntWindowExiting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3531{
3532 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); NOREF(pVCpu);
3533
3534 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
3535 if ( pVmcbCtrl->IntCtrl.n.u1VIrqPending
3536 || (pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
3537 {
3538 pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
3539 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
3540 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_VINTR);
3541 Log4(("Cleared VINTR intercept\n"));
3542 }
3543}
3544
3545
3546/**
3547 * Evaluates the event to be delivered to the guest and sets it as the pending
3548 * event.
3549 *
3550 * @returns Strict VBox status code.
3551 * @param pVCpu The cross context virtual CPU structure.
3552 * @param pSvmTransient Pointer to the SVM transient structure.
3553 */
3554static VBOXSTRICTRC hmR0SvmEvaluatePendingEvent(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient)
3555{
3556 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3557 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT
3558 | CPUMCTX_EXTRN_RFLAGS
3559 | CPUMCTX_EXTRN_INHIBIT_INT
3560 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ);
3561
3562 Assert(!pVCpu->hm.s.Event.fPending);
3563 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
3564 Assert(pVmcb);
3565
3566 bool const fGif = CPUMGetGuestGif(pCtx);
3567 bool const fIntShadow = CPUMIsInInterruptShadowWithUpdate(pCtx);
3568 bool const fBlockNmi = CPUMAreInterruptsInhibitedByNmi(pCtx);
3569
3570 Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntPending=%RTbool fNmiPending=%RTbool\n",
3571 fGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
3572 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
3573
3574 /** @todo SMI. SMIs take priority over NMIs. */
3575
3576 /*
3577 * Check if the guest or nested-guest can receive NMIs.
3578 * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
3579 * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
3580 */
3581 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
3582 && !fBlockNmi)
3583 {
3584 if ( fGif
3585 && !fIntShadow)
3586 {
3587#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3588 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI))
3589 {
3590 Log4(("Intercepting NMI -> #VMEXIT\n"));
3591 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3592 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
3593 }
3594#endif
3595 Log4(("Setting NMI pending for injection\n"));
3596 SVMEVENT Event;
3597 Event.u = 0;
3598 Event.n.u1Valid = 1;
3599 Event.n.u8Vector = X86_XCPT_NMI;
3600 Event.n.u3Type = SVM_EVENT_NMI;
3601 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3602 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3603 }
3604 else if (!fGif)
3605 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
3606 else if (!pSvmTransient->fIsNestedGuest)
3607 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
3608 /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCB controls. */
3609 }
3610 /*
3611 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt()
3612 * returns a valid interrupt we -must- deliver the interrupt. We can no longer re-request
3613 * it from the APIC device.
3614 *
3615 * For nested-guests, physical interrupts always take priority over virtual interrupts.
3616 * We don't need to inject nested-guest virtual interrupts here, we can let the hardware
3617 * do that work when we execute nested-guest code esp. since all the required information
3618 * is in the VMCB, unlike physical interrupts where we need to fetch the interrupt from
3619 * the virtual interrupt controller.
3620 *
3621 * See AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
3622 */
3623 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
3624 && !pVCpu->hm.s.fSingleInstruction)
3625 {
3626 bool const fBlockInt = !pSvmTransient->fIsNestedGuest ? !(pCtx->eflags.u & X86_EFL_IF)
3627 : CPUMIsGuestSvmPhysIntrEnabled(pVCpu, pCtx);
3628 if ( fGif
3629 && !fBlockInt
3630 && !fIntShadow)
3631 {
3632#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3633 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
3634 {
3635 Log4(("Intercepting INTR -> #VMEXIT\n"));
3636 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3637 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
3638 }
3639#endif
3640 uint8_t u8Interrupt;
3641 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
3642 if (RT_SUCCESS(rc))
3643 {
3644 Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
3645 SVMEVENT Event;
3646 Event.u = 0;
3647 Event.n.u1Valid = 1;
3648 Event.n.u8Vector = u8Interrupt;
3649 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3650 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3651 }
3652 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
3653 {
3654 /*
3655 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
3656 * updated eventually when the TPR is written by the guest.
3657 */
3658 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
3659 }
3660 else
3661 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
3662 }
3663 else if (!fGif)
3664 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
3665 else if (!pSvmTransient->fIsNestedGuest)
3666 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
3667 /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCB controls. */
3668 }
3669
3670 return VINF_SUCCESS;
3671}
3672
3673
3674/**
3675 * Injects any pending events into the guest (or nested-guest).
3676 *
3677 * @param pVCpu The cross context virtual CPU structure.
3678 * @param pVmcb Pointer to the VM control block.
3679 *
3680 * @remarks Must only be called when we are guaranteed to enter
3681 * hardware-assisted SVM execution and not return to ring-3
3682 * prematurely.
3683 */
3684static void hmR0SvmInjectPendingEvent(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3685{
3686 Assert(!TRPMHasTrap(pVCpu));
3687 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3688
3689 bool const fIntShadow = CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx);
3690#ifdef VBOX_STRICT
3691 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3692 bool const fGif = CPUMGetGuestGif(pCtx);
3693 bool fAllowInt = fGif;
3694 if (fGif)
3695 {
3696 /*
3697 * For nested-guests we have no way to determine if we're injecting a physical or
3698 * virtual interrupt at this point. Hence the partial verification below.
3699 */
3700 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3701 fAllowInt = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, pCtx) || CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
3702 else
3703 fAllowInt = RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
3704 }
3705#endif
3706
3707 if (pVCpu->hm.s.Event.fPending)
3708 {
3709 SVMEVENT Event;
3710 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3711 Assert(Event.n.u1Valid);
3712
3713 /*
3714 * Validate event injection pre-conditions.
3715 */
3716 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
3717 {
3718 Assert(fAllowInt);
3719 Assert(!fIntShadow);
3720 }
3721 else if (Event.n.u3Type == SVM_EVENT_NMI)
3722 {
3723 Assert(fGif);
3724 Assert(!fIntShadow);
3725 }
3726
3727 /*
3728 * Before injecting an NMI we must set VMCPU_FF_BLOCK_NMIS to prevent nested NMIs. We
3729 * do this only when we are surely going to inject the NMI as otherwise if we return
3730 * to ring-3 prematurely we could leave NMIs blocked indefinitely upon re-entry into
3731 * SVM R0.
3732 *
3733 * With VT-x, this is handled by the Guest interruptibility information VMCS field
3734 * which will set the VMCS field after actually delivering the NMI which we read on
3735 * VM-exit to determine the state.
3736 */
3737 if ( Event.n.u3Type == SVM_EVENT_NMI
3738 && Event.n.u8Vector == X86_XCPT_NMI)
3739 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3740
3741 /*
3742 * Inject it (update VMCB for injection by the hardware).
3743 */
3744 Log4(("Injecting pending HM event\n"));
3745 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, &Event);
3746 pVCpu->hm.s.Event.fPending = false;
3747
3748 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
3749 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
3750 else
3751 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
3752 }
3753 else
3754 Assert(pVmcb->ctrl.EventInject.n.u1Valid == 0);
3755
3756 /*
3757 * We could have injected an NMI through IEM and continue guest execution using
3758 * hardware-assisted SVM. In which case, we would not have any events pending (above)
3759 * but we still need to intercept IRET in order to eventually clear NMI inhibition.
3760 */
3761 if (CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
3762 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET);
3763
3764 /*
3765 * Update the guest interrupt shadow in the guest (or nested-guest) VMCB.
3766 *
3767 * For nested-guests: We need to update it too for the scenario where IEM executes
3768 * the nested-guest but execution later continues here with an interrupt shadow active.
3769 */
3770 pVmcb->ctrl.IntShadow.n.u1IntShadow = fIntShadow;
3771}
3772
3773
3774/**
3775 * Reports world-switch error and dumps some useful debug info.
3776 *
3777 * @param pVCpu The cross context virtual CPU structure.
3778 * @param rcVMRun The return code from VMRUN (or
3779 * VERR_SVM_INVALID_GUEST_STATE for invalid
3780 * guest-state).
3781 */
3782static void hmR0SvmReportWorldSwitchError(PVMCPUCC pVCpu, int rcVMRun)
3783{
3784 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
3785 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
3786 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3787
3788 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
3789 {
3790#ifdef VBOX_STRICT
3791 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
3792 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
3793 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits));
3794 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx));
3795 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx));
3796 Log4(("ctrl.u16InterceptRdDRx %#x\n", pVmcb->ctrl.u16InterceptRdDRx));
3797 Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx));
3798 Log4(("ctrl.u32InterceptXcpt %#x\n", pVmcb->ctrl.u32InterceptXcpt));
3799 Log4(("ctrl.u64InterceptCtrl %#RX64\n", pVmcb->ctrl.u64InterceptCtrl));
3800 Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr));
3801 Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr));
3802 Log4(("ctrl.u64TSCOffset %#RX64\n", pVmcb->ctrl.u64TSCOffset));
3803
3804 Log4(("ctrl.TLBCtrl.u32ASID %#x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID));
3805 Log4(("ctrl.TLBCtrl.u8TLBFlush %#x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush));
3806 Log4(("ctrl.TLBCtrl.u24Reserved %#x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved));
3807
3808 Log4(("ctrl.IntCtrl.u8VTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR));
3809 Log4(("ctrl.IntCtrl.u1VIrqPending %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqPending));
3810 Log4(("ctrl.IntCtrl.u1VGif %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGif));
3811 Log4(("ctrl.IntCtrl.u6Reserved0 %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved));
3812 Log4(("ctrl.IntCtrl.u4VIntrPrio %#x\n", pVmcb->ctrl.IntCtrl.n.u4VIntrPrio));
3813 Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR));
3814 Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved));
3815 Log4(("ctrl.IntCtrl.u1VIntrMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIntrMasking));
3816 Log4(("ctrl.IntCtrl.u1VGifEnable %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGifEnable));
3817 Log4(("ctrl.IntCtrl.u5Reserved1 %#x\n", pVmcb->ctrl.IntCtrl.n.u5Reserved));
3818 Log4(("ctrl.IntCtrl.u8VIntrVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIntrVector));
3819 Log4(("ctrl.IntCtrl.u24Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved));
3820
3821 Log4(("ctrl.IntShadow.u1IntShadow %#x\n", pVmcb->ctrl.IntShadow.n.u1IntShadow));
3822 Log4(("ctrl.IntShadow.u1GuestIntMask %#x\n", pVmcb->ctrl.IntShadow.n.u1GuestIntMask));
3823 Log4(("ctrl.u64ExitCode %#RX64\n", pVmcb->ctrl.u64ExitCode));
3824 Log4(("ctrl.u64ExitInfo1 %#RX64\n", pVmcb->ctrl.u64ExitInfo1));
3825 Log4(("ctrl.u64ExitInfo2 %#RX64\n", pVmcb->ctrl.u64ExitInfo2));
3826 Log4(("ctrl.ExitIntInfo.u8Vector %#x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector));
3827 Log4(("ctrl.ExitIntInfo.u3Type %#x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type));
3828 Log4(("ctrl.ExitIntInfo.u1ErrorCodeValid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
3829 Log4(("ctrl.ExitIntInfo.u19Reserved %#x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved));
3830 Log4(("ctrl.ExitIntInfo.u1Valid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid));
3831 Log4(("ctrl.ExitIntInfo.u32ErrorCode %#x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
3832 Log4(("ctrl.NestedPagingCtrl.u1NestedPaging %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging));
3833 Log4(("ctrl.NestedPagingCtrl.u1Sev %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1Sev));
3834 Log4(("ctrl.NestedPagingCtrl.u1SevEs %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1SevEs));
3835 Log4(("ctrl.EventInject.u8Vector %#x\n", pVmcb->ctrl.EventInject.n.u8Vector));
3836 Log4(("ctrl.EventInject.u3Type %#x\n", pVmcb->ctrl.EventInject.n.u3Type));
3837 Log4(("ctrl.EventInject.u1ErrorCodeValid %#x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid));
3838 Log4(("ctrl.EventInject.u19Reserved %#x\n", pVmcb->ctrl.EventInject.n.u19Reserved));
3839 Log4(("ctrl.EventInject.u1Valid %#x\n", pVmcb->ctrl.EventInject.n.u1Valid));
3840 Log4(("ctrl.EventInject.u32ErrorCode %#x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode));
3841
3842 Log4(("ctrl.u64NestedPagingCR3 %#RX64\n", pVmcb->ctrl.u64NestedPagingCR3));
3843
3844 Log4(("ctrl.LbrVirt.u1LbrVirt %#x\n", pVmcb->ctrl.LbrVirt.n.u1LbrVirt));
3845 Log4(("ctrl.LbrVirt.u1VirtVmsaveVmload %#x\n", pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload));
3846
3847 Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel));
3848 Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr));
3849 Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit));
3850 Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base));
3851 Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel));
3852 Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr));
3853 Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit));
3854 Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base));
3855 Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel));
3856 Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr));
3857 Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit));
3858 Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base));
3859 Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel));
3860 Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr));
3861 Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit));
3862 Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base));
3863 Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel));
3864 Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr));
3865 Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit));
3866 Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base));
3867
3868 Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit));
3869 Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base));
3870
3871 Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel));
3872 Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr));
3873 Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit));
3874 Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base));
3875
3876 Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit));
3877 Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base));
3878
3879 Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel));
3880 Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr));
3881 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit));
3882 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base));
3883
3884 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL));
3885 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0));
3886 Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2));
3887 Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3));
3888 Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4));
3889 Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6));
3890 Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7));
3891
3892 Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP));
3893 Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP));
3894 Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX));
3895 Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags));
3896
3897 Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS));
3898 Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP));
3899 Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP));
3900
3901 Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER));
3902 Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR));
3903 Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR));
3904 Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR));
3905 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK));
3906 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase));
3907 Log4(("guest.u64PAT %#RX64\n", pVmcb->guest.u64PAT));
3908 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL));
3909 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM));
3910 Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO));
3911 Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM));
3912 Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO));
3913
3914 NOREF(pVmcb);
3915#endif /* VBOX_STRICT */
3916 }
3917 else
3918 Log4Func(("rcVMRun=%d\n", rcVMRun));
3919}
3920
3921
3922/**
3923 * Check per-VM and per-VCPU force flag actions that require us to go back to
3924 * ring-3 for one reason or another.
3925 *
3926 * @returns Strict VBox status code (information status code included).
3927 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3928 * ring-3.
3929 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3930 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3931 * interrupts)
3932 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3933 * all EMTs to be in ring-3.
3934 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3935 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3936 * to the EM loop.
3937 *
3938 * @param pVCpu The cross context virtual CPU structure.
3939 */
3940static VBOXSTRICTRC hmR0SvmCheckForceFlags(PVMCPUCC pVCpu)
3941{
3942 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3943
3944 /* Could happen as a result of longjump. */
3945 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
3946 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3947
3948 /* Update pending interrupts into the APIC's IRR. */
3949 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3950 APICUpdatePendingInterrupts(pVCpu);
3951
3952 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3953 if ( VM_FF_IS_ANY_SET(pVM, !pVCpu->hm.s.fSingleInstruction
3954 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
3955 || VMCPU_FF_IS_ANY_SET(pVCpu, !pVCpu->hm.s.fSingleInstruction
3956 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3957 {
3958 /* Pending PGM C3 sync. */
3959 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3960 {
3961 int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4,
3962 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3963 if (rc != VINF_SUCCESS)
3964 {
3965 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
3966 return rc;
3967 }
3968 }
3969
3970 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3971 /* -XXX- what was that about single stepping? */
3972 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3973 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3974 {
3975 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
3976 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3977 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3978 return rc;
3979 }
3980
3981 /* Pending VM request packets, such as hardware interrupts. */
3982 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3983 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3984 {
3985 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchVmReq);
3986 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3987 return VINF_EM_PENDING_REQUEST;
3988 }
3989
3990 /* Pending PGM pool flushes. */
3991 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3992 {
3993 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPgmPoolFlush);
3994 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3995 return VINF_PGM_POOL_FLUSH_PENDING;
3996 }
3997
3998 /* Pending DMA requests. */
3999 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4000 {
4001 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchDma);
4002 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4003 return VINF_EM_RAW_TO_R3;
4004 }
4005 }
4006
4007 return VINF_SUCCESS;
4008}
4009
4010
4011/**
4012 * Does the preparations before executing guest code in AMD-V.
4013 *
4014 * This may cause longjmps to ring-3 and may even result in rescheduling to the
4015 * recompiler. We must be cautious what we do here regarding committing
4016 * guest-state information into the VMCB assuming we assuredly execute the guest
4017 * in AMD-V. If we fall back to the recompiler after updating the VMCB and
4018 * clearing the common-state (TRPM/forceflags), we must undo those changes so
4019 * that the recompiler can (and should) use them when it resumes guest
4020 * execution. Otherwise such operations must be done when we can no longer
4021 * exit to ring-3.
4022 *
4023 * @returns Strict VBox status code (informational status codes included).
4024 * @retval VINF_SUCCESS if we can proceed with running the guest.
4025 * @retval VINF_* scheduling changes, we have to go back to ring-3.
4026 *
4027 * @param pVCpu The cross context virtual CPU structure.
4028 * @param pSvmTransient Pointer to the SVM transient structure.
4029 */
4030static VBOXSTRICTRC hmR0SvmPreRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4031{
4032 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
4033
4034#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
4035 if (pSvmTransient->fIsNestedGuest)
4036 {
4037 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
4038 return VINF_EM_RESCHEDULE_REM;
4039 }
4040#endif
4041
4042 /* Check force flag actions that might require us to go back to ring-3. */
4043 VBOXSTRICTRC rc = hmR0SvmCheckForceFlags(pVCpu);
4044 if (rc != VINF_SUCCESS)
4045 return rc;
4046
4047 if (TRPMHasTrap(pVCpu))
4048 hmR0SvmTrpmTrapToPendingEvent(pVCpu);
4049 else if (!pVCpu->hm.s.Event.fPending)
4050 {
4051 rc = hmR0SvmEvaluatePendingEvent(pVCpu, pSvmTransient);
4052 if ( rc != VINF_SUCCESS
4053 || pSvmTransient->fIsNestedGuest != CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
4054 {
4055 /* If a nested-guest VM-exit occurred, bail. */
4056 if (pSvmTransient->fIsNestedGuest)
4057 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
4058 return rc;
4059 }
4060 }
4061
4062 /*
4063 * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.
4064 * Just do it in software, see @bugref{8411}.
4065 * NB: If we could continue a task switch exit we wouldn't need to do this.
4066 */
4067 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4068 if (RT_UNLIKELY( !g_fHmSvmFeatures
4069 && pVCpu->hm.s.Event.fPending
4070 && SVM_EVENT_GET_TYPE(pVCpu->hm.s.Event.u64IntInfo) == SVM_EVENT_NMI))
4071 return VINF_EM_RAW_INJECT_TRPM_EVENT;
4072
4073#ifdef HMSVM_SYNC_FULL_GUEST_STATE
4074 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
4075 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
4076#endif
4077
4078#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4079 /*
4080 * Set up the nested-guest VMCB for execution using hardware-assisted SVM.
4081 */
4082 if (pSvmTransient->fIsNestedGuest)
4083 hmR0SvmSetupVmcbNested(pVCpu);
4084#endif
4085
4086 /*
4087 * Export the guest state bits that are not shared with the host in any way as we can
4088 * longjmp or get preempted in the midst of exporting some of the state.
4089 */
4090 rc = hmR0SvmExportGuestState(pVCpu, pSvmTransient);
4091 AssertRCReturn(rc, rc);
4092 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
4093
4094 /* Ensure we've cached (and hopefully modified) the nested-guest VMCB for execution using hardware-assisted SVM. */
4095 Assert(!pSvmTransient->fIsNestedGuest || pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
4096
4097 /*
4098 * If we're not intercepting TPR changes in the guest, save the guest TPR before the
4099 * world-switch so we can update it on the way back if the guest changed the TPR.
4100 */
4101 if (pVCpu->hmr0.s.svm.fSyncVTpr)
4102 {
4103 Assert(!pSvmTransient->fIsNestedGuest);
4104 PCSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
4105 if (pVM->hm.s.fTprPatchingActive)
4106 pSvmTransient->u8GuestTpr = pVmcb->guest.u64LSTAR;
4107 else
4108 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
4109 }
4110
4111 /*
4112 * No longjmps to ring-3 from this point on!!!
4113 *
4114 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4115 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4116 */
4117 VMMRZCallRing3Disable(pVCpu);
4118
4119 /*
4120 * We disable interrupts so that we don't miss any interrupts that would flag preemption
4121 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
4122 * preemption disabled for a while. Since this is purly to aid the
4123 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
4124 * disable interrupt on NT.
4125 *
4126 * We need to check for force-flags that could've possible been altered since we last
4127 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
4128 * see @bugref{6398}).
4129 *
4130 * We also check a couple of other force-flags as a last opportunity to get the EMT back
4131 * to ring-3 before executing guest code.
4132 */
4133 pSvmTransient->fEFlags = ASMIntDisableFlags();
4134 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4135 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4136 {
4137 ASMSetFlags(pSvmTransient->fEFlags);
4138 VMMRZCallRing3Enable(pVCpu);
4139 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
4140 return VINF_EM_RAW_TO_R3;
4141 }
4142 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
4143 {
4144 ASMSetFlags(pSvmTransient->fEFlags);
4145 VMMRZCallRing3Enable(pVCpu);
4146 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);
4147 return VINF_EM_RAW_INTERRUPT;
4148 }
4149
4150 return VINF_SUCCESS;
4151}
4152
4153
4154/**
4155 * Prepares to run guest (or nested-guest) code in AMD-V and we've committed to
4156 * doing so.
4157 *
4158 * This means there is no backing out to ring-3 or anywhere else at this point.
4159 *
4160 * @param pVCpu The cross context virtual CPU structure.
4161 * @param pSvmTransient Pointer to the SVM transient structure.
4162 *
4163 * @remarks Called with preemption disabled.
4164 * @remarks No-long-jump zone!!!
4165 */
4166static void hmR0SvmPreRunGuestCommitted(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4167{
4168 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4169 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4170
4171 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4172 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
4173
4174 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4175 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
4176
4177 hmR0SvmInjectPendingEvent(pVCpu, pVmcb);
4178
4179 if (!CPUMIsGuestFPUStateActive(pVCpu))
4180 {
4181 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
4182 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
4183 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
4184 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
4185 }
4186
4187 /* Load the state shared between host and guest (FPU, debug). */
4188 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)
4189 hmR0SvmExportSharedState(pVCpu, pVmcb);
4190
4191 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT; /* Preemption might set this, nothing to do on AMD-V. */
4192 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
4193
4194 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
4195 RTCPUID const idHostCpu = pHostCpu->idCpu;
4196 bool const fMigratedHostCpu = idHostCpu != pVCpu->hmr0.s.idLastCpu;
4197
4198 /* Setup TSC offsetting. */
4199 if ( pSvmTransient->fUpdateTscOffsetting
4200 || fMigratedHostCpu)
4201 {
4202 hmR0SvmUpdateTscOffsetting(pVCpu, pVmcb);
4203 pSvmTransient->fUpdateTscOffsetting = false;
4204 }
4205
4206 /* Record statistics of how often we use TSC offsetting as opposed to intercepting RDTSC/P. */
4207 if (!(pVmcb->ctrl.u64InterceptCtrl & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP)))
4208 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4209 else
4210 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4211
4212 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
4213 if (fMigratedHostCpu)
4214 pVmcb->ctrl.u32VmcbCleanBits = 0;
4215
4216 /* Store status of the shared guest-host state at the time of VMRUN. */
4217 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
4218 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
4219
4220#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4221 uint8_t *pbMsrBitmap;
4222 if (!pSvmTransient->fIsNestedGuest)
4223 pbMsrBitmap = (uint8_t *)pVCpu->hmr0.s.svm.pvMsrBitmap;
4224 else
4225 {
4226 /** @todo We could perhaps optimize this by monitoring if the guest modifies its
4227 * MSRPM and only perform this if it changed also use EVEX.POR when it
4228 * does. */
4229 hmR0SvmMergeMsrpmNested(pHostCpu, pVCpu);
4230
4231 /* Update the nested-guest VMCB with the newly merged MSRPM (clean bits updated below). */
4232 pVmcb->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm;
4233 pbMsrBitmap = (uint8_t *)pHostCpu->n.svm.pvNstGstMsrpm;
4234 }
4235#else
4236 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
4237#endif
4238
4239 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
4240 /* Flush the appropriate tagged-TLB entries. */
4241 hmR0SvmFlushTaggedTlb(pHostCpu, pVCpu, pVmcb);
4242 Assert(pVCpu->hmr0.s.idLastCpu == idHostCpu);
4243
4244 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
4245
4246 TMNotifyStartOfExecution(pVM, pVCpu); /* Finally, notify TM to resume its clocks as we're about
4247 to start executing. */
4248
4249 /*
4250 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that RDTSCPs
4251 * (that don't cause exits) reads the guest MSR, see @bugref{3324}.
4252 *
4253 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
4254 */
4255 if ( g_CpumHostFeatures.s.fRdTscP
4256 && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
4257 {
4258 uint64_t const uGuestTscAux = CPUMGetGuestTscAux(pVCpu);
4259 pVCpu->hmr0.s.svm.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
4260 if (uGuestTscAux != pVCpu->hmr0.s.svm.u64HostTscAux)
4261 ASMWrMsr(MSR_K8_TSC_AUX, uGuestTscAux);
4262 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
4263 pSvmTransient->fRestoreTscAuxMsr = true;
4264 }
4265 else
4266 {
4267 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
4268 pSvmTransient->fRestoreTscAuxMsr = false;
4269 }
4270 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
4271
4272 /*
4273 * If VMCB Clean bits isn't supported by the CPU or exposed to the guest in the nested
4274 * virtualization case, mark all state-bits as dirty indicating to the CPU to re-load
4275 * from the VMCB.
4276 */
4277 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pSvmTransient->fIsNestedGuest);
4278 if (!fSupportsVmcbCleanBits)
4279 pVmcb->ctrl.u32VmcbCleanBits = 0;
4280}
4281
4282
4283/**
4284 * Wrapper for running the guest (or nested-guest) code in AMD-V.
4285 *
4286 * @returns VBox strict status code.
4287 * @param pVCpu The cross context virtual CPU structure.
4288 * @param HCPhysVmcb The host physical address of the VMCB.
4289 *
4290 * @remarks No-long-jump zone!!!
4291 */
4292DECLINLINE(int) hmR0SvmRunGuest(PVMCPUCC pVCpu, RTHCPHYS HCPhysVmcb)
4293{
4294 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
4295 pVCpu->cpum.GstCtx.fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
4296 return pVCpu->hmr0.s.svm.pfnVMRun(pVCpu->CTX_SUFF(pVM), pVCpu, HCPhysVmcb);
4297}
4298
4299
4300/**
4301 * Performs some essential restoration of state after running guest (or
4302 * nested-guest) code in AMD-V.
4303 *
4304 * @param pVCpu The cross context virtual CPU structure.
4305 * @param pSvmTransient Pointer to the SVM transient structure.
4306 * @param rcVMRun Return code of VMRUN.
4307 *
4308 * @remarks Called with interrupts disabled.
4309 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
4310 * unconditionally when it is safe to do so.
4311 */
4312static void hmR0SvmPostRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, VBOXSTRICTRC rcVMRun)
4313{
4314 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4315
4316 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
4317 ASMAtomicIncU32(&pVCpu->hmr0.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
4318
4319 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
4320 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
4321
4322 /* TSC read must be done early for maximum accuracy. */
4323 if (!(pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
4324 {
4325 if (!pSvmTransient->fIsNestedGuest)
4326 TMCpuTickSetLastSeen(pVCpu, pVCpu->hmr0.s.uTscExit + pVmcbCtrl->u64TSCOffset);
4327#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4328 else
4329 {
4330 /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMNotifySvmNstGstVmexit(). */
4331 uint64_t const uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, pVCpu->hmr0.s.uTscExit + pVmcbCtrl->u64TSCOffset);
4332 TMCpuTickSetLastSeen(pVCpu, uGstTsc);
4333 }
4334#endif
4335 }
4336
4337 if (pSvmTransient->fRestoreTscAuxMsr)
4338 {
4339 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
4340 CPUMSetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
4341 if (u64GuestTscAuxMsr != pVCpu->hmr0.s.svm.u64HostTscAux)
4342 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hmr0.s.svm.u64HostTscAux);
4343 }
4344
4345 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
4346 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4347 TMNotifyEndOfExecution(pVM, pVCpu, pVCpu->hmr0.s.uTscExit); /* Notify TM that the guest is no longer running. */
4348 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4349
4350 Assert(!(ASMGetFlags() & X86_EFL_IF));
4351 ASMSetFlags(pSvmTransient->fEFlags); /* Enable interrupts. */
4352 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
4353
4354 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
4355 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
4356 {
4357 Log4Func(("VMRUN failure: rcVMRun=%Rrc\n", VBOXSTRICTRC_VAL(rcVMRun)));
4358 return;
4359 }
4360
4361 pSvmTransient->u64ExitCode = pVmcbCtrl->u64ExitCode; /* Save the #VMEXIT reason. */
4362 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
4363 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
4364 pVmcbCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
4365
4366#ifdef HMSVM_SYNC_FULL_GUEST_STATE
4367 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4368 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
4369#else
4370 /*
4371 * Always import the following:
4372 *
4373 * - RIP for exit optimizations and evaluating event injection on re-entry.
4374 * - RFLAGS for evaluating event injection on VM re-entry and for exporting shared debug
4375 * state on preemption.
4376 * - Interrupt shadow, GIF for evaluating event injection on VM re-entry.
4377 * - CS for exit optimizations.
4378 * - RAX, RSP for simplifying assumptions on GPRs. All other GPRs are swapped by the
4379 * assembly switcher code.
4380 * - Shared state (only DR7 currently) for exporting shared debug state on preemption.
4381 */
4382 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP
4383 | CPUMCTX_EXTRN_RFLAGS
4384 | CPUMCTX_EXTRN_RAX
4385 | CPUMCTX_EXTRN_RSP
4386 | CPUMCTX_EXTRN_CS
4387 | CPUMCTX_EXTRN_HWVIRT
4388 | CPUMCTX_EXTRN_INHIBIT_INT
4389 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ
4390 | HMSVM_CPUMCTX_SHARED_STATE);
4391#endif
4392
4393 if ( pSvmTransient->u64ExitCode != SVM_EXIT_INVALID
4394 && pVCpu->hmr0.s.svm.fSyncVTpr)
4395 {
4396 Assert(!pSvmTransient->fIsNestedGuest);
4397 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
4398 if ( pVM->hm.s.fTprPatchingActive
4399 && (pVmcb->guest.u64LSTAR & 0xff) != pSvmTransient->u8GuestTpr)
4400 {
4401 int rc = APICSetTpr(pVCpu, pVmcb->guest.u64LSTAR & 0xff);
4402 AssertRC(rc);
4403 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
4404 }
4405 /* Sync TPR when we aren't intercepting CR8 writes. */
4406 else if (pSvmTransient->u8GuestTpr != pVmcbCtrl->IntCtrl.n.u8VTPR)
4407 {
4408 int rc = APICSetTpr(pVCpu, pVmcbCtrl->IntCtrl.n.u8VTPR << 4);
4409 AssertRC(rc);
4410 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
4411 }
4412 }
4413
4414#ifdef DEBUG_ramshankar
4415 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
4416 {
4417 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4418 hmR0SvmLogState(pVCpu, pVmcb, pVCpu->cpum.GstCtx, "hmR0SvmPostRunGuestNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR,
4419 0 /* uVerbose */);
4420 }
4421#endif
4422
4423 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
4424 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_SVM, pSvmTransient->u64ExitCode & EMEXIT_F_TYPE_MASK),
4425 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, pVCpu->hmr0.s.uTscExit);
4426}
4427
4428
4429/**
4430 * Runs the guest code using AMD-V.
4431 *
4432 * @returns Strict VBox status code.
4433 * @param pVCpu The cross context virtual CPU structure.
4434 * @param pcLoops Pointer to the number of executed loops.
4435 */
4436static VBOXSTRICTRC hmR0SvmRunGuestCodeNormal(PVMCPUCC pVCpu, uint32_t *pcLoops)
4437{
4438 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
4439 Assert(pcLoops);
4440 Assert(*pcLoops <= cMaxResumeLoops);
4441
4442 SVMTRANSIENT SvmTransient;
4443 RT_ZERO(SvmTransient);
4444 SvmTransient.fUpdateTscOffsetting = true;
4445 SvmTransient.pVmcb = pVCpu->hmr0.s.svm.pVmcb;
4446
4447 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_5;
4448 for (;;)
4449 {
4450 Assert(!HMR0SuspendPending());
4451 HMSVM_ASSERT_CPU_SAFE(pVCpu);
4452
4453 /* Preparatory work for running nested-guest code, this may force us to return to
4454 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4455 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4456 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
4457 if (rc != VINF_SUCCESS)
4458 break;
4459
4460 /*
4461 * No longjmps to ring-3 from this point on!!!
4462 *
4463 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4464 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4465 */
4466 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
4467 rc = hmR0SvmRunGuest(pVCpu, pVCpu->hmr0.s.svm.HCPhysVmcb);
4468
4469 /* Restore any residual host-state and save any bits shared between host and guest
4470 into the guest-CPU state. Re-enables interrupts! */
4471 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
4472
4473 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
4474 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
4475 {
4476 if (rc == VINF_SUCCESS)
4477 rc = VERR_SVM_INVALID_GUEST_STATE;
4478 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
4479 hmR0SvmReportWorldSwitchError(pVCpu, VBOXSTRICTRC_VAL(rc));
4480 break;
4481 }
4482
4483 /* Handle the #VMEXIT. */
4484 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4485 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
4486 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, SvmTransient.u64ExitCode, pVCpu->hmr0.s.svm.pVmcb);
4487 rc = hmR0SvmHandleExit(pVCpu, &SvmTransient);
4488 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
4489 if (rc != VINF_SUCCESS)
4490 break;
4491 if (++(*pcLoops) >= cMaxResumeLoops)
4492 {
4493 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4494 rc = VINF_EM_RAW_INTERRUPT;
4495 break;
4496 }
4497 }
4498
4499 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4500 return rc;
4501}
4502
4503
4504#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4505/**
4506 * Runs the nested-guest code using AMD-V.
4507 *
4508 * @returns Strict VBox status code.
4509 * @param pVCpu The cross context virtual CPU structure.
4510 * @param pcLoops Pointer to the number of executed loops. If we're switching
4511 * from the guest-code execution loop to this nested-guest
4512 * execution loop pass the remainder value, else pass 0.
4513 */
4514static VBOXSTRICTRC hmR0SvmRunGuestCodeNested(PVMCPUCC pVCpu, uint32_t *pcLoops)
4515{
4516 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4517 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
4518 Assert(pcLoops);
4519 Assert(*pcLoops <= pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops);
4520 /** @todo r=bird: Sharing this with ring-3 isn't safe in the long run, I fear... */
4521 RTHCPHYS const HCPhysVmcb = GVMMR0ConvertGVMPtr2HCPhys(pVCpu->pGVM, &pCtx->hwvirt.svm.Vmcb);
4522
4523 SVMTRANSIENT SvmTransient;
4524 RT_ZERO(SvmTransient);
4525 SvmTransient.fUpdateTscOffsetting = true;
4526 SvmTransient.pVmcb = &pCtx->hwvirt.svm.Vmcb;
4527 SvmTransient.fIsNestedGuest = true;
4528
4529 /* Setup pointer so PGM/IEM can query #VMEXIT auxiliary info. on demand in ring-0. */
4530 pVCpu->hmr0.s.svm.pSvmTransient = &SvmTransient;
4531
4532 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_4;
4533 for (;;)
4534 {
4535 Assert(!HMR0SuspendPending());
4536 HMSVM_ASSERT_CPU_SAFE(pVCpu);
4537
4538 /* Preparatory work for running nested-guest code, this may force us to return to
4539 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4540 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4541 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
4542 if ( rc != VINF_SUCCESS
4543 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
4544 break;
4545
4546 /*
4547 * No longjmps to ring-3 from this point on!!!
4548 *
4549 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4550 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4551 */
4552 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
4553
4554 rc = hmR0SvmRunGuest(pVCpu, HCPhysVmcb);
4555
4556 /* Restore any residual host-state and save any bits shared between host and guest
4557 into the guest-CPU state. Re-enables interrupts! */
4558 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
4559
4560 if (RT_LIKELY( rc == VINF_SUCCESS
4561 && SvmTransient.u64ExitCode != SVM_EXIT_INVALID))
4562 { /* extremely likely */ }
4563 else
4564 {
4565 /* VMRUN failed, shouldn't really happen, Guru. */
4566 if (rc != VINF_SUCCESS)
4567 break;
4568
4569 /* Invalid nested-guest state. Cause a #VMEXIT but assert on strict builds. */
4570 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4571 AssertMsgFailed(("Invalid nested-guest state. rc=%Rrc u64ExitCode=%#RX64\n", rc, SvmTransient.u64ExitCode));
4572 rc = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0, 0);
4573 break;
4574 }
4575
4576 /* Handle the #VMEXIT. */
4577 HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4578 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
4579 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, &pCtx->hwvirt.svm.Vmcb);
4580 rc = hmR0SvmHandleExitNested(pVCpu, &SvmTransient);
4581 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
4582 if (rc == VINF_SUCCESS)
4583 {
4584 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
4585 {
4586 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
4587 rc = VINF_SVM_VMEXIT;
4588 }
4589 else
4590 {
4591 if (++(*pcLoops) <= pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops)
4592 continue;
4593 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4594 rc = VINF_EM_RAW_INTERRUPT;
4595 }
4596 }
4597 else
4598 Assert(rc != VINF_SVM_VMEXIT);
4599 break;
4600 /** @todo NSTSVM: handle single-stepping. */
4601 }
4602
4603 /* Ensure #VMEXIT auxiliary info. is no longer available. */
4604 pVCpu->hmr0.s.svm.pSvmTransient = NULL;
4605
4606 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4607 return rc;
4608}
4609#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
4610
4611
4612/**
4613 * Checks if any expensive dtrace probes are enabled and we should go to the
4614 * debug loop.
4615 *
4616 * @returns true if we should use debug loop, false if not.
4617 */
4618static bool hmR0SvmAnyExpensiveProbesEnabled(void)
4619{
4620 /* It's probably faster to OR the raw 32-bit counter variables together.
4621 Since the variables are in an array and the probes are next to one
4622 another (more or less), we have good locality. So, better read
4623 eight-nine cache lines ever time and only have one conditional, than
4624 128+ conditionals, right? */
4625 return ( VBOXVMM_R0_HMSVM_VMEXIT_ENABLED_RAW() /* expensive too due to context */
4626 | VBOXVMM_XCPT_DE_ENABLED_RAW()
4627 | VBOXVMM_XCPT_DB_ENABLED_RAW()
4628 | VBOXVMM_XCPT_BP_ENABLED_RAW()
4629 | VBOXVMM_XCPT_OF_ENABLED_RAW()
4630 | VBOXVMM_XCPT_BR_ENABLED_RAW()
4631 | VBOXVMM_XCPT_UD_ENABLED_RAW()
4632 | VBOXVMM_XCPT_NM_ENABLED_RAW()
4633 | VBOXVMM_XCPT_DF_ENABLED_RAW()
4634 | VBOXVMM_XCPT_TS_ENABLED_RAW()
4635 | VBOXVMM_XCPT_NP_ENABLED_RAW()
4636 | VBOXVMM_XCPT_SS_ENABLED_RAW()
4637 | VBOXVMM_XCPT_GP_ENABLED_RAW()
4638 | VBOXVMM_XCPT_PF_ENABLED_RAW()
4639 | VBOXVMM_XCPT_MF_ENABLED_RAW()
4640 | VBOXVMM_XCPT_AC_ENABLED_RAW()
4641 | VBOXVMM_XCPT_XF_ENABLED_RAW()
4642 | VBOXVMM_XCPT_VE_ENABLED_RAW()
4643 | VBOXVMM_XCPT_SX_ENABLED_RAW()
4644 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
4645 | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
4646 ) != 0
4647 || ( VBOXVMM_INSTR_HALT_ENABLED_RAW()
4648 | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
4649 | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
4650 | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
4651 | VBOXVMM_INSTR_INVD_ENABLED_RAW()
4652 | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
4653 | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
4654 | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
4655 | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
4656 | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
4657 | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
4658 | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
4659 | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
4660 | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
4661 | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
4662 | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
4663 | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
4664 | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
4665 | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
4666 | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
4667 | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
4668 | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
4669 | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
4670 | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
4671 | VBOXVMM_INSTR_STR_ENABLED_RAW()
4672 | VBOXVMM_INSTR_LTR_ENABLED_RAW()
4673 //| VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
4674 | VBOXVMM_INSTR_RSM_ENABLED_RAW()
4675 //| VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
4676 //| VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
4677 //| VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
4678 //| VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
4679 | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
4680 | VBOXVMM_INSTR_SVM_VMRUN_ENABLED_RAW()
4681 | VBOXVMM_INSTR_SVM_VMLOAD_ENABLED_RAW()
4682 | VBOXVMM_INSTR_SVM_VMSAVE_ENABLED_RAW()
4683 | VBOXVMM_INSTR_SVM_STGI_ENABLED_RAW()
4684 | VBOXVMM_INSTR_SVM_CLGI_ENABLED_RAW()
4685 ) != 0
4686 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
4687 | VBOXVMM_EXIT_HALT_ENABLED_RAW()
4688 | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
4689 | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
4690 | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
4691 | VBOXVMM_EXIT_INVD_ENABLED_RAW()
4692 | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
4693 | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
4694 | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
4695 | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
4696 | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
4697 | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
4698 | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
4699 | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
4700 | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
4701 | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
4702 | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
4703 | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
4704 | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
4705 | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
4706 | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
4707 | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
4708 | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
4709 | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
4710 | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
4711 | VBOXVMM_EXIT_STR_ENABLED_RAW()
4712 | VBOXVMM_EXIT_LTR_ENABLED_RAW()
4713 //| VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
4714 | VBOXVMM_EXIT_RSM_ENABLED_RAW()
4715 //| VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
4716 //| VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
4717 //| VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
4718 //| VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
4719 | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
4720 | VBOXVMM_EXIT_SVM_VMRUN_ENABLED_RAW()
4721 | VBOXVMM_EXIT_SVM_VMLOAD_ENABLED_RAW()
4722 | VBOXVMM_EXIT_SVM_VMSAVE_ENABLED_RAW()
4723 | VBOXVMM_EXIT_SVM_STGI_ENABLED_RAW()
4724 | VBOXVMM_EXIT_SVM_CLGI_ENABLED_RAW()
4725 ) != 0;
4726}
4727
4728
4729/**
4730 * Runs the guest code using AMD-V.
4731 *
4732 * @returns Strict VBox status code.
4733 * @param pVCpu The cross context virtual CPU structure.
4734 */
4735VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPUCC pVCpu)
4736{
4737 AssertPtr(pVCpu);
4738 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4739 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4740 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
4741 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
4742
4743 uint32_t cLoops = 0;
4744 VBOXSTRICTRC rc;
4745 for (;;)
4746 {
4747#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4748 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
4749#else
4750 NOREF(pCtx);
4751 bool const fInNestedGuestMode = false;
4752#endif
4753 if (!fInNestedGuestMode)
4754 {
4755 if ( !pVCpu->hm.s.fUseDebugLoop
4756 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0SvmAnyExpensiveProbesEnabled())
4757 && !DBGFIsStepping(pVCpu)
4758 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
4759 rc = hmR0SvmRunGuestCodeNormal(pVCpu, &cLoops);
4760 else
4761 rc = hmR0SvmRunGuestCodeDebug(pVCpu, &cLoops);
4762 }
4763#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4764 else
4765 rc = hmR0SvmRunGuestCodeNested(pVCpu, &cLoops);
4766
4767 if (rc == VINF_SVM_VMRUN)
4768 {
4769 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
4770 continue;
4771 }
4772 if (rc == VINF_SVM_VMEXIT)
4773 {
4774 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
4775 continue;
4776 }
4777#endif
4778 break;
4779 }
4780
4781 /* Fixup error codes. */
4782 if (rc == VERR_EM_INTERPRETER)
4783 rc = VINF_EM_RAW_EMULATE_INSTR;
4784 else if (rc == VINF_EM_RESET)
4785 rc = VINF_EM_TRIPLE_FAULT;
4786
4787 /* Prepare to return to ring-3. This will remove longjmp notifications. */
4788 rc = hmR0SvmExitToRing3(pVCpu, rc);
4789 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
4790 Assert(!VMMR0AssertionIsNotificationSet(pVCpu));
4791 return rc;
4792}
4793
4794#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4795
4796/**
4797 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
4798 *
4799 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
4800 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO.
4801 */
4802static bool hmR0SvmIsIoInterceptSet(void *pvIoBitmap, PSVMIOIOEXITINFO pIoExitInfo)
4803{
4804 const uint16_t u16Port = pIoExitInfo->n.u16Port;
4805 const SVMIOIOTYPE enmIoType = (SVMIOIOTYPE)pIoExitInfo->n.u1Type;
4806 const uint8_t cbReg = (pIoExitInfo->u >> SVM_IOIO_OP_SIZE_SHIFT) & 7;
4807 const uint8_t cAddrSizeBits = ((pIoExitInfo->u >> SVM_IOIO_ADDR_SIZE_SHIFT) & 7) << 4;
4808 const uint8_t iEffSeg = pIoExitInfo->n.u3Seg;
4809 const bool fRep = pIoExitInfo->n.u1Rep;
4810 const bool fStrIo = pIoExitInfo->n.u1Str;
4811
4812 return CPUMIsSvmIoInterceptSet(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo,
4813 NULL /* pIoExitInfo */);
4814}
4815
4816
4817/**
4818 * Handles a nested-guest \#VMEXIT (for all EXITCODE values except
4819 * SVM_EXIT_INVALID).
4820 *
4821 * @returns VBox status code (informational status codes included).
4822 * @param pVCpu The cross context virtual CPU structure.
4823 * @param pSvmTransient Pointer to the SVM transient structure.
4824 */
4825static VBOXSTRICTRC hmR0SvmHandleExitNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4826{
4827 HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
4828 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
4829 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
4830
4831 /*
4832 * We import the complete state here because we use separate VMCBs for the guest and the
4833 * nested-guest, and the guest's VMCB is used after the #VMEXIT. We can only save/restore
4834 * the #VMEXIT specific state if we used the same VMCB for both guest and nested-guest.
4835 */
4836#define NST_GST_VMEXIT_CALL_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
4837 do { \
4838 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \
4839 return IEMExecSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
4840 } while (0)
4841
4842 /*
4843 * For all the #VMEXITs here we primarily figure out if the #VMEXIT is expected by the
4844 * nested-guest. If it isn't, it should be handled by the (outer) guest.
4845 */
4846 PSVMVMCB pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb;
4847 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4848 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
4849 uint64_t const uExitCode = pVmcbNstGstCtrl->u64ExitCode;
4850 uint64_t const uExitInfo1 = pVmcbNstGstCtrl->u64ExitInfo1;
4851 uint64_t const uExitInfo2 = pVmcbNstGstCtrl->u64ExitInfo2;
4852
4853 Assert(uExitCode == pVmcbNstGstCtrl->u64ExitCode);
4854 switch (uExitCode)
4855 {
4856 case SVM_EXIT_CPUID:
4857 {
4858 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CPUID))
4859 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4860 return hmR0SvmExitCpuid(pVCpu, pSvmTransient);
4861 }
4862
4863 case SVM_EXIT_RDTSC:
4864 {
4865 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSC))
4866 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4867 return hmR0SvmExitRdtsc(pVCpu, pSvmTransient);
4868 }
4869
4870 case SVM_EXIT_RDTSCP:
4871 {
4872 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSCP))
4873 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4874 return hmR0SvmExitRdtscp(pVCpu, pSvmTransient);
4875 }
4876
4877 case SVM_EXIT_MONITOR:
4878 {
4879 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MONITOR))
4880 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4881 return hmR0SvmExitMonitor(pVCpu, pSvmTransient);
4882 }
4883
4884 case SVM_EXIT_MWAIT:
4885 {
4886 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MWAIT))
4887 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4888 return hmR0SvmExitMwait(pVCpu, pSvmTransient);
4889 }
4890
4891 case SVM_EXIT_HLT:
4892 {
4893 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_HLT))
4894 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4895 return hmR0SvmExitHlt(pVCpu, pSvmTransient);
4896 }
4897
4898 case SVM_EXIT_MSR:
4899 {
4900 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MSR_PROT))
4901 {
4902 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
4903 uint16_t offMsrpm;
4904 uint8_t uMsrpmBit;
4905 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
4906 if (RT_SUCCESS(rc))
4907 {
4908 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
4909 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
4910
4911 uint8_t const * const pbMsrBitmap = &pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm];
4912 bool const fInterceptRead = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit));
4913 bool const fInterceptWrite = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1));
4914
4915 if ( (fInterceptWrite && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
4916 || (fInterceptRead && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ))
4917 {
4918 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4919 }
4920 }
4921 else
4922 {
4923 /*
4924 * MSRs not covered by the MSRPM automatically cause an #VMEXIT.
4925 * See AMD-V spec. "15.11 MSR Intercepts".
4926 */
4927 Assert(rc == VERR_OUT_OF_RANGE);
4928 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4929 }
4930 }
4931 return hmR0SvmExitMsr(pVCpu, pSvmTransient);
4932 }
4933
4934 case SVM_EXIT_IOIO:
4935 {
4936 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT))
4937 {
4938 SVMIOIOEXITINFO IoExitInfo;
4939 IoExitInfo.u = pVmcbNstGst->ctrl.u64ExitInfo1;
4940 bool const fIntercept = hmR0SvmIsIoInterceptSet(pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap, &IoExitInfo);
4941 if (fIntercept)
4942 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4943 }
4944 return hmR0SvmExitIOInstr(pVCpu, pSvmTransient);
4945 }
4946
4947 case SVM_EXIT_XCPT_PF:
4948 {
4949 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4950 if (pVM->hmr0.s.fNestedPaging)
4951 {
4952 uint32_t const u32ErrCode = pVmcbNstGstCtrl->u64ExitInfo1;
4953 uint64_t const uFaultAddress = pVmcbNstGstCtrl->u64ExitInfo2;
4954
4955 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
4956 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
4957 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, u32ErrCode, uFaultAddress);
4958
4959 /* If the nested-guest is not intercepting #PFs, forward the #PF to the guest. */
4960 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2);
4961 hmR0SvmSetPendingXcptPF(pVCpu, u32ErrCode, uFaultAddress);
4962 return VINF_SUCCESS;
4963 }
4964 return hmR0SvmExitXcptPF(pVCpu, pSvmTransient);
4965 }
4966
4967 case SVM_EXIT_XCPT_UD:
4968 {
4969 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_UD))
4970 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4971 hmR0SvmSetPendingXcptUD(pVCpu);
4972 return VINF_SUCCESS;
4973 }
4974
4975 case SVM_EXIT_XCPT_MF:
4976 {
4977 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_MF))
4978 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4979 return hmR0SvmExitXcptMF(pVCpu, pSvmTransient);
4980 }
4981
4982 case SVM_EXIT_XCPT_DB:
4983 {
4984 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_DB))
4985 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4986 return hmR0SvmNestedExitXcptDB(pVCpu, pSvmTransient);
4987 }
4988
4989 case SVM_EXIT_XCPT_AC:
4990 {
4991 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_AC))
4992 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4993 return hmR0SvmExitXcptAC(pVCpu, pSvmTransient);
4994 }
4995
4996 case SVM_EXIT_XCPT_BP:
4997 {
4998 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_BP))
4999 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5000 return hmR0SvmNestedExitXcptBP(pVCpu, pSvmTransient);
5001 }
5002
5003 case SVM_EXIT_READ_CR0:
5004 case SVM_EXIT_READ_CR3:
5005 case SVM_EXIT_READ_CR4:
5006 {
5007 uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0;
5008 if (CPUMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr))
5009 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5010 return hmR0SvmExitReadCRx(pVCpu, pSvmTransient);
5011 }
5012
5013 case SVM_EXIT_CR0_SEL_WRITE:
5014 {
5015 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5016 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5017 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
5018 }
5019
5020 case SVM_EXIT_WRITE_CR0:
5021 case SVM_EXIT_WRITE_CR3:
5022 case SVM_EXIT_WRITE_CR4:
5023 case SVM_EXIT_WRITE_CR8: /* CR8 writes would go to the V_TPR rather than here, since we run with V_INTR_MASKING. */
5024 {
5025 uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;
5026 Log4Func(("Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2));
5027
5028 if (CPUMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr))
5029 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5030 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
5031 }
5032
5033 case SVM_EXIT_PAUSE:
5034 {
5035 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
5036 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5037 return hmR0SvmExitPause(pVCpu, pSvmTransient);
5038 }
5039
5040 case SVM_EXIT_VINTR:
5041 {
5042 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
5043 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5044 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
5045 }
5046
5047 case SVM_EXIT_INTR:
5048 case SVM_EXIT_NMI:
5049 case SVM_EXIT_SMI:
5050 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */
5051 {
5052 /*
5053 * We shouldn't direct physical interrupts, NMIs, SMIs to the nested-guest.
5054 *
5055 * Although we don't intercept SMIs, the nested-guest might. Therefore, we might
5056 * get an SMI #VMEXIT here so simply ignore rather than causing a corresponding
5057 * nested-guest #VMEXIT.
5058 *
5059 * We shall import the complete state here as we may cause #VMEXITs from ring-3
5060 * while trying to inject interrupts, see comment at the top of this function.
5061 */
5062 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_ALL);
5063 return hmR0SvmExitIntr(pVCpu, pSvmTransient);
5064 }
5065
5066 case SVM_EXIT_FERR_FREEZE:
5067 {
5068 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_FERR_FREEZE))
5069 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5070 return hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient);
5071 }
5072
5073 case SVM_EXIT_INVLPG:
5074 {
5075 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPG))
5076 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5077 return hmR0SvmExitInvlpg(pVCpu, pSvmTransient);
5078 }
5079
5080 case SVM_EXIT_WBINVD:
5081 {
5082 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_WBINVD))
5083 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5084 return hmR0SvmExitWbinvd(pVCpu, pSvmTransient);
5085 }
5086
5087 case SVM_EXIT_INVD:
5088 {
5089 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVD))
5090 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5091 return hmR0SvmExitInvd(pVCpu, pSvmTransient);
5092 }
5093
5094 case SVM_EXIT_RDPMC:
5095 {
5096 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDPMC))
5097 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5098 return hmR0SvmExitRdpmc(pVCpu, pSvmTransient);
5099 }
5100
5101 default:
5102 {
5103 switch (uExitCode)
5104 {
5105 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5106 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
5107 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
5108 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5109 {
5110 uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0;
5111 if (CPUMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr))
5112 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5113 return hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
5114 }
5115
5116 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5117 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
5118 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
5119 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5120 {
5121 uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0;
5122 if (CPUMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr))
5123 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5124 return hmR0SvmExitWriteDRx(pVCpu, pSvmTransient);
5125 }
5126
5127 case SVM_EXIT_XCPT_DE:
5128 /* SVM_EXIT_XCPT_DB: */ /* Handled above. */
5129 /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */
5130 /* SVM_EXIT_XCPT_BP: */ /* Handled above. */
5131 case SVM_EXIT_XCPT_OF:
5132 case SVM_EXIT_XCPT_BR:
5133 /* SVM_EXIT_XCPT_UD: */ /* Handled above. */
5134 case SVM_EXIT_XCPT_NM:
5135 case SVM_EXIT_XCPT_DF:
5136 case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
5137 case SVM_EXIT_XCPT_TS:
5138 case SVM_EXIT_XCPT_NP:
5139 case SVM_EXIT_XCPT_SS:
5140 case SVM_EXIT_XCPT_GP:
5141 /* SVM_EXIT_XCPT_PF: */ /* Handled above. */
5142 case SVM_EXIT_XCPT_15: /* Reserved. */
5143 /* SVM_EXIT_XCPT_MF: */ /* Handled above. */
5144 /* SVM_EXIT_XCPT_AC: */ /* Handled above. */
5145 case SVM_EXIT_XCPT_MC:
5146 case SVM_EXIT_XCPT_XF:
5147 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
5148 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
5149 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
5150 {
5151 uint8_t const uVector = uExitCode - SVM_EXIT_XCPT_0;
5152 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector))
5153 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5154 return hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient);
5155 }
5156
5157 case SVM_EXIT_XSETBV:
5158 {
5159 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_XSETBV))
5160 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5161 return hmR0SvmExitXsetbv(pVCpu, pSvmTransient);
5162 }
5163
5164 case SVM_EXIT_TASK_SWITCH:
5165 {
5166 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_TASK_SWITCH))
5167 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5168 return hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient);
5169 }
5170
5171 case SVM_EXIT_IRET:
5172 {
5173 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IRET))
5174 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5175 return hmR0SvmExitIret(pVCpu, pSvmTransient);
5176 }
5177
5178 case SVM_EXIT_SHUTDOWN:
5179 {
5180 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN))
5181 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5182 return hmR0SvmExitShutdown(pVCpu, pSvmTransient);
5183 }
5184
5185 case SVM_EXIT_VMMCALL:
5186 {
5187 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMMCALL))
5188 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5189 return hmR0SvmExitVmmCall(pVCpu, pSvmTransient);
5190 }
5191
5192 case SVM_EXIT_CLGI:
5193 {
5194 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CLGI))
5195 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5196 return hmR0SvmExitClgi(pVCpu, pSvmTransient);
5197 }
5198
5199 case SVM_EXIT_STGI:
5200 {
5201 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_STGI))
5202 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5203 return hmR0SvmExitStgi(pVCpu, pSvmTransient);
5204 }
5205
5206 case SVM_EXIT_VMLOAD:
5207 {
5208 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMLOAD))
5209 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5210 return hmR0SvmExitVmload(pVCpu, pSvmTransient);
5211 }
5212
5213 case SVM_EXIT_VMSAVE:
5214 {
5215 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMSAVE))
5216 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5217 return hmR0SvmExitVmsave(pVCpu, pSvmTransient);
5218 }
5219
5220 case SVM_EXIT_INVLPGA:
5221 {
5222 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPGA))
5223 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5224 return hmR0SvmExitInvlpga(pVCpu, pSvmTransient);
5225 }
5226
5227 case SVM_EXIT_VMRUN:
5228 {
5229 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMRUN))
5230 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5231 return hmR0SvmExitVmrun(pVCpu, pSvmTransient);
5232 }
5233
5234 case SVM_EXIT_RSM:
5235 {
5236 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RSM))
5237 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5238 hmR0SvmSetPendingXcptUD(pVCpu);
5239 return VINF_SUCCESS;
5240 }
5241
5242 case SVM_EXIT_SKINIT:
5243 {
5244 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SKINIT))
5245 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5246 hmR0SvmSetPendingXcptUD(pVCpu);
5247 return VINF_SUCCESS;
5248 }
5249
5250 case SVM_EXIT_NPF:
5251 {
5252 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
5253 return hmR0SvmExitNestedPF(pVCpu, pSvmTransient);
5254 }
5255
5256 case SVM_EXIT_INIT: /* We shouldn't get INIT signals while executing a nested-guest. */
5257 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
5258
5259 default:
5260 {
5261 AssertMsgFailed(("hmR0SvmHandleExitNested: Unknown exit code %#x\n", pSvmTransient->u64ExitCode));
5262 pVCpu->hm.s.u32HMError = pSvmTransient->u64ExitCode;
5263 return VERR_SVM_UNKNOWN_EXIT;
5264 }
5265 }
5266 }
5267 }
5268 /* not reached */
5269
5270# undef NST_GST_VMEXIT_CALL_RET
5271}
5272
5273#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5274
5275/** @def VMEXIT_CALL_RET
5276 * Used by hmR0SvmHandleExit and hmR0SvmDebugHandleExit
5277 */
5278#ifdef DEBUG_ramshankar
5279# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) \
5280 do { \
5281 if ((a_fDbg) == 1) \
5282 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \
5283 int rc = a_CallExpr; \
5284 if ((a_fDbg) == 1) \
5285 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
5286 return rc; \
5287 } while (0)
5288#else
5289# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) return a_CallExpr
5290#endif
5291
5292/**
5293 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
5294 *
5295 * @returns Strict VBox status code (informational status codes included).
5296 * @param pVCpu The cross context virtual CPU structure.
5297 * @param pSvmTransient Pointer to the SVM transient structure.
5298 */
5299static VBOXSTRICTRC hmR0SvmHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5300{
5301 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
5302 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
5303
5304 /*
5305 * The ordering of the case labels is based on most-frequently-occurring #VMEXITs
5306 * for most guests under normal workloads (for some definition of "normal").
5307 */
5308 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
5309 switch (uExitCode)
5310 {
5311 case SVM_EXIT_NPF: VMEXIT_CALL_RET(0, hmR0SvmExitNestedPF(pVCpu, pSvmTransient));
5312 case SVM_EXIT_IOIO: VMEXIT_CALL_RET(0, hmR0SvmExitIOInstr(pVCpu, pSvmTransient));
5313 case SVM_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0SvmExitRdtsc(pVCpu, pSvmTransient));
5314 case SVM_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0SvmExitRdtscp(pVCpu, pSvmTransient));
5315 case SVM_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0SvmExitCpuid(pVCpu, pSvmTransient));
5316 case SVM_EXIT_XCPT_PF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptPF(pVCpu, pSvmTransient));
5317 case SVM_EXIT_MSR: VMEXIT_CALL_RET(0, hmR0SvmExitMsr(pVCpu, pSvmTransient));
5318 case SVM_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0SvmExitMonitor(pVCpu, pSvmTransient));
5319 case SVM_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0SvmExitMwait(pVCpu, pSvmTransient));
5320 case SVM_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0SvmExitHlt(pVCpu, pSvmTransient));
5321
5322 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */
5323 case SVM_EXIT_INTR:
5324 case SVM_EXIT_NMI: VMEXIT_CALL_RET(0, hmR0SvmExitIntr(pVCpu, pSvmTransient));
5325
5326 case SVM_EXIT_READ_CR0:
5327 case SVM_EXIT_READ_CR3:
5328 case SVM_EXIT_READ_CR4: VMEXIT_CALL_RET(0, hmR0SvmExitReadCRx(pVCpu, pSvmTransient));
5329
5330 case SVM_EXIT_CR0_SEL_WRITE:
5331 case SVM_EXIT_WRITE_CR0:
5332 case SVM_EXIT_WRITE_CR3:
5333 case SVM_EXIT_WRITE_CR4:
5334 case SVM_EXIT_WRITE_CR8: VMEXIT_CALL_RET(0, hmR0SvmExitWriteCRx(pVCpu, pSvmTransient));
5335
5336 case SVM_EXIT_VINTR: VMEXIT_CALL_RET(0, hmR0SvmExitVIntr(pVCpu, pSvmTransient));
5337 case SVM_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0SvmExitPause(pVCpu, pSvmTransient));
5338 case SVM_EXIT_VMMCALL: VMEXIT_CALL_RET(0, hmR0SvmExitVmmCall(pVCpu, pSvmTransient));
5339 case SVM_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpg(pVCpu, pSvmTransient));
5340 case SVM_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0SvmExitWbinvd(pVCpu, pSvmTransient));
5341 case SVM_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0SvmExitInvd(pVCpu, pSvmTransient));
5342 case SVM_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0SvmExitRdpmc(pVCpu, pSvmTransient));
5343 case SVM_EXIT_IRET: VMEXIT_CALL_RET(0, hmR0SvmExitIret(pVCpu, pSvmTransient));
5344 case SVM_EXIT_XCPT_DE: VMEXIT_CALL_RET(0, hmR0SvmExitXcptDE(pVCpu, pSvmTransient));
5345 case SVM_EXIT_XCPT_UD: VMEXIT_CALL_RET(0, hmR0SvmExitXcptUD(pVCpu, pSvmTransient));
5346 case SVM_EXIT_XCPT_MF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptMF(pVCpu, pSvmTransient));
5347 case SVM_EXIT_XCPT_DB: VMEXIT_CALL_RET(0, hmR0SvmExitXcptDB(pVCpu, pSvmTransient));
5348 case SVM_EXIT_XCPT_AC: VMEXIT_CALL_RET(0, hmR0SvmExitXcptAC(pVCpu, pSvmTransient));
5349 case SVM_EXIT_XCPT_BP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptBP(pVCpu, pSvmTransient));
5350 case SVM_EXIT_XCPT_GP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptGP(pVCpu, pSvmTransient));
5351 case SVM_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0SvmExitXsetbv(pVCpu, pSvmTransient));
5352 case SVM_EXIT_FERR_FREEZE: VMEXIT_CALL_RET(0, hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient));
5353
5354 default:
5355 {
5356 switch (pSvmTransient->u64ExitCode)
5357 {
5358 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5359 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
5360 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
5361 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5362 VMEXIT_CALL_RET(0, hmR0SvmExitReadDRx(pVCpu, pSvmTransient));
5363
5364 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5365 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
5366 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
5367 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5368 VMEXIT_CALL_RET(0, hmR0SvmExitWriteDRx(pVCpu, pSvmTransient));
5369
5370 case SVM_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient));
5371 case SVM_EXIT_SHUTDOWN: VMEXIT_CALL_RET(0, hmR0SvmExitShutdown(pVCpu, pSvmTransient));
5372
5373 case SVM_EXIT_SMI:
5374 case SVM_EXIT_INIT:
5375 {
5376 /*
5377 * We don't intercept SMIs. As for INIT signals, it really shouldn't ever happen here.
5378 * If it ever does, we want to know about it so log the exit code and bail.
5379 */
5380 VMEXIT_CALL_RET(0, hmR0SvmExitUnexpected(pVCpu, pSvmTransient));
5381 }
5382
5383#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5384 case SVM_EXIT_CLGI: VMEXIT_CALL_RET(0, hmR0SvmExitClgi(pVCpu, pSvmTransient));
5385 case SVM_EXIT_STGI: VMEXIT_CALL_RET(0, hmR0SvmExitStgi(pVCpu, pSvmTransient));
5386 case SVM_EXIT_VMLOAD: VMEXIT_CALL_RET(0, hmR0SvmExitVmload(pVCpu, pSvmTransient));
5387 case SVM_EXIT_VMSAVE: VMEXIT_CALL_RET(0, hmR0SvmExitVmsave(pVCpu, pSvmTransient));
5388 case SVM_EXIT_INVLPGA: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpga(pVCpu, pSvmTransient));
5389 case SVM_EXIT_VMRUN: VMEXIT_CALL_RET(0, hmR0SvmExitVmrun(pVCpu, pSvmTransient));
5390#else
5391 case SVM_EXIT_CLGI:
5392 case SVM_EXIT_STGI:
5393 case SVM_EXIT_VMLOAD:
5394 case SVM_EXIT_VMSAVE:
5395 case SVM_EXIT_INVLPGA:
5396 case SVM_EXIT_VMRUN:
5397#endif
5398 case SVM_EXIT_RSM:
5399 case SVM_EXIT_SKINIT:
5400 {
5401 hmR0SvmSetPendingXcptUD(pVCpu);
5402 return VINF_SUCCESS;
5403 }
5404
5405 /*
5406 * The remaining should only be possible when debugging or dtracing.
5407 */
5408 case SVM_EXIT_XCPT_DE:
5409 /* SVM_EXIT_XCPT_DB: */ /* Handled above. */
5410 /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */
5411 /* SVM_EXIT_XCPT_BP: */ /* Handled above. */
5412 case SVM_EXIT_XCPT_OF:
5413 case SVM_EXIT_XCPT_BR:
5414 /* SVM_EXIT_XCPT_UD: */ /* Handled above. */
5415 case SVM_EXIT_XCPT_NM:
5416 case SVM_EXIT_XCPT_DF:
5417 case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
5418 case SVM_EXIT_XCPT_TS:
5419 case SVM_EXIT_XCPT_NP:
5420 case SVM_EXIT_XCPT_SS:
5421 /* SVM_EXIT_XCPT_GP: */ /* Handled above. */
5422 /* SVM_EXIT_XCPT_PF: */
5423 case SVM_EXIT_XCPT_15: /* Reserved. */
5424 /* SVM_EXIT_XCPT_MF: */ /* Handled above. */
5425 /* SVM_EXIT_XCPT_AC: */ /* Handled above. */
5426 case SVM_EXIT_XCPT_MC:
5427 case SVM_EXIT_XCPT_XF:
5428 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
5429 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
5430 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
5431 VMEXIT_CALL_RET(0, hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient));
5432
5433 case SVM_EXIT_SWINT: VMEXIT_CALL_RET(0, hmR0SvmExitSwInt(pVCpu, pSvmTransient));
5434 case SVM_EXIT_TR_READ: VMEXIT_CALL_RET(0, hmR0SvmExitTrRead(pVCpu, pSvmTransient));
5435 case SVM_EXIT_TR_WRITE: VMEXIT_CALL_RET(0, hmR0SvmExitTrWrite(pVCpu, pSvmTransient)); /* Also OS/2 TLB workaround. */
5436
5437 default:
5438 {
5439 AssertMsgFailed(("hmR0SvmHandleExit: Unknown exit code %#RX64\n", uExitCode));
5440 pVCpu->hm.s.u32HMError = uExitCode;
5441 return VERR_SVM_UNKNOWN_EXIT;
5442 }
5443 }
5444 }
5445 }
5446 /* not reached */
5447}
5448
5449
5450/** @name Execution loop for single stepping, DBGF events and expensive Dtrace probes.
5451 *
5452 * The following few functions and associated structure contains the bloat
5453 * necessary for providing detailed debug events and dtrace probes as well as
5454 * reliable host side single stepping. This works on the principle of
5455 * "subclassing" the normal execution loop and workers. We replace the loop
5456 * method completely and override selected helpers to add necessary adjustments
5457 * to their core operation.
5458 *
5459 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
5460 * any performance for debug and analysis features.
5461 *
5462 * @{
5463 */
5464
5465/**
5466 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
5467 * the debug run loop.
5468 */
5469typedef struct SVMRUNDBGSTATE
5470{
5471 /** The initial SVMVMCBCTRL::u64InterceptCtrl value (helps with restore). */
5472 uint64_t bmInterceptInitial;
5473 /** The initial SVMVMCBCTRL::u32InterceptXcpt value (helps with restore). */
5474 uint32_t bmXcptInitial;
5475 /** The initial SVMVMCBCTRL::u16InterceptRdCRx value (helps with restore). */
5476 uint16_t bmInterceptRdCRxInitial;
5477 /** The initial SVMVMCBCTRL::u16InterceptWrCRx value (helps with restore). */
5478 uint16_t bmInterceptWrCRxInitial;
5479 /** The initial SVMVMCBCTRL::u16InterceptRdDRx value (helps with restore). */
5480 uint16_t bmInterceptRdDRxInitial;
5481 /** The initial SVMVMCBCTRL::u16InterceptWrDRx value (helps with restore). */
5482 uint16_t bmInterceptWrDRxInitial;
5483
5484 /** Whether we've actually modified the intercept control qword. */
5485 bool fModifiedInterceptCtrl : 1;
5486 /** Whether we've actually modified the exception bitmap. */
5487 bool fModifiedXcptBitmap : 1;
5488 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptRdCRx. */
5489 bool fModifiedInterceptRdCRx : 1;
5490 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptWrCRx. */
5491 bool fModifiedInterceptWrCRx : 1;
5492 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptRdDRx. */
5493 bool fModifiedInterceptRdDRx : 1;
5494 /** Whether we've actually modified SVMVMCBCTRL::u16InterceptWrDRx. */
5495 bool fModifiedInterceptWrDRx : 1;
5496
5497 /** The CS we started executing with. */
5498 uint16_t uCsStart;
5499 /** The RIP we started executing at. This is for detecting that we stepped. */
5500 uint64_t uRipStart;
5501
5502 /** The sequence number of the Dtrace provider settings the state was
5503 * configured against. */
5504 uint32_t uDtraceSettingsSeqNo;
5505 /** Extra stuff we need in SVMVMCBCTRL::u32InterceptXcpt. */
5506 uint32_t bmXcptExtra;
5507 /** Extra stuff we need in SVMVMCBCTRL::u64InterceptCtrl. */
5508 uint64_t bmInterceptExtra;
5509 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptRdCRx. */
5510 uint16_t bmInterceptRdCRxExtra;
5511 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptWrCRx. */
5512 uint16_t bmInterceptWrCRxExtra;
5513 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptRdDRx. */
5514 uint16_t bmInterceptRdDRxExtra;
5515 /** Extra stuff we need in SVMVMCBCTRL::u16InterceptWrDRx. */
5516 uint16_t bmInterceptWrDRxExtra;
5517 /** VM-exits to check (one bit per VM-exit). */
5518 uint32_t bmExitsToCheck[33];
5519} SVMRUNDBGSTATE;
5520AssertCompileMemberSize(SVMRUNDBGSTATE, bmExitsToCheck, (SVM_EXIT_MAX + 1 + 31) / 32 * 4);
5521typedef SVMRUNDBGSTATE *PSVMRUNDBGSTATE;
5522
5523
5524/**
5525 * Initializes the SVMRUNDBGSTATE structure.
5526 *
5527 * @param pVCpu The cross context virtual CPU structure of the
5528 * calling EMT.
5529 * @param pSvmTransient The SVM-transient structure.
5530 * @param pDbgState The debug state to initialize.
5531 */
5532static void hmR0SvmRunDebugStateInit(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5533{
5534 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
5535 pDbgState->bmInterceptInitial = pVmcb->ctrl.u64InterceptCtrl;
5536 pDbgState->bmXcptInitial = pVmcb->ctrl.u32InterceptXcpt;
5537 pDbgState->bmInterceptRdCRxInitial = pVmcb->ctrl.u16InterceptRdCRx;
5538 pDbgState->bmInterceptWrCRxInitial = pVmcb->ctrl.u16InterceptWrCRx;
5539 pDbgState->bmInterceptRdDRxInitial = pVmcb->ctrl.u16InterceptRdDRx;
5540 pDbgState->bmInterceptWrDRxInitial = pVmcb->ctrl.u16InterceptWrDRx;
5541
5542 pDbgState->fModifiedInterceptCtrl = false;
5543 pDbgState->fModifiedXcptBitmap = false;
5544 pDbgState->fModifiedInterceptRdCRx = false;
5545 pDbgState->fModifiedInterceptWrCRx = false;
5546 pDbgState->fModifiedInterceptRdDRx = false;
5547 pDbgState->fModifiedInterceptWrDRx = false;
5548
5549 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
5550 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
5551
5552 /* We don't really need to zero these. */
5553 pDbgState->bmInterceptExtra = 0;
5554 pDbgState->bmXcptExtra = 0;
5555 pDbgState->bmInterceptRdCRxExtra = 0;
5556 pDbgState->bmInterceptWrCRxExtra = 0;
5557 pDbgState->bmInterceptRdDRxExtra = 0;
5558 pDbgState->bmInterceptWrDRxExtra = 0;
5559}
5560
5561
5562/**
5563 * Updates the VMCB fields with changes requested by @a pDbgState.
5564 *
5565 * This is performed after hmR0SvmPreRunGuestDebugStateUpdate as well
5566 * immediately before executing guest code, i.e. when interrupts are disabled.
5567 * We don't check status codes here as we cannot easily assert or return in the
5568 * latter case.
5569 *
5570 * @param pSvmTransient The SVM-transient structure.
5571 * @param pDbgState The debug state.
5572 */
5573static void hmR0SvmPreRunGuestDebugStateApply(PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5574{
5575 /*
5576 * Ensure desired flags in VMCS control fields are set.
5577 */
5578 PSVMVMCB const pVmcb = pSvmTransient->pVmcb;
5579#define ADD_EXTRA_INTERCEPTS(a_VmcbCtrlField, a_bmExtra, a_fModified) do { \
5580 if ((pVmcb->ctrl. a_VmcbCtrlField & (a_bmExtra)) != (a_bmExtra)) \
5581 { \
5582 pVmcb->ctrl. a_VmcbCtrlField |= (a_bmExtra); \
5583 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; \
5584 Log6Func((#a_VmcbCtrlField ": %#RX64\n", pVmcb->ctrl. a_VmcbCtrlField)); \
5585 (a_fModified) = true; \
5586 } \
5587 } while (0)
5588 ADD_EXTRA_INTERCEPTS(u64InterceptCtrl, pDbgState->bmInterceptExtra, pDbgState->fModifiedInterceptCtrl);
5589 ADD_EXTRA_INTERCEPTS(u32InterceptXcpt, pDbgState->bmXcptExtra, pDbgState->fModifiedXcptBitmap);
5590 ADD_EXTRA_INTERCEPTS(u16InterceptRdCRx, pDbgState->bmInterceptRdCRxExtra, pDbgState->fModifiedInterceptRdCRx);
5591 ADD_EXTRA_INTERCEPTS(u16InterceptWrCRx, pDbgState->bmInterceptWrCRxExtra, pDbgState->fModifiedInterceptWrCRx);
5592 ADD_EXTRA_INTERCEPTS(u16InterceptRdDRx, pDbgState->bmInterceptRdDRxExtra, pDbgState->fModifiedInterceptRdDRx);
5593 ADD_EXTRA_INTERCEPTS(u16InterceptWrDRx, pDbgState->bmInterceptWrDRxExtra, pDbgState->fModifiedInterceptWrDRx);
5594#undef ADD_EXTRA_INTERCEPTS
5595}
5596
5597
5598/**
5599 * Restores VMCB fields that were changed by hmR0SvmPreRunGuestDebugStateApply
5600 * for re-entry next time around.
5601 *
5602 * @param pSvmTransient The SVM-transient structure.
5603 * @param pDbgState The debug state.
5604 */
5605static void hmR0SvmRunDebugStateRevert(PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5606{
5607 /*
5608 * Restore VM-exit control settings as we may not reenter this function the
5609 * next time around.
5610 */
5611 PSVMVMCB const pVmcb = pSvmTransient->pVmcb;
5612
5613#define RESTORE_INTERCEPTS(a_VmcbCtrlField, a_bmInitial, a_fModified) do { \
5614 if ((a_fModified)) \
5615 { \
5616 pVmcb->ctrl. a_VmcbCtrlField = (a_bmInitial); \
5617 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; \
5618 } \
5619 } while (0)
5620 RESTORE_INTERCEPTS(u64InterceptCtrl, pDbgState->bmInterceptInitial, pDbgState->fModifiedInterceptCtrl);
5621 RESTORE_INTERCEPTS(u32InterceptXcpt, pDbgState->bmXcptInitial, pDbgState->fModifiedXcptBitmap);
5622 RESTORE_INTERCEPTS(u16InterceptRdCRx, pDbgState->bmInterceptRdCRxInitial, pDbgState->fModifiedInterceptRdCRx);
5623 RESTORE_INTERCEPTS(u16InterceptWrCRx, pDbgState->bmInterceptWrCRxInitial, pDbgState->fModifiedInterceptWrCRx);
5624 RESTORE_INTERCEPTS(u16InterceptRdDRx, pDbgState->bmInterceptRdDRxInitial, pDbgState->fModifiedInterceptRdDRx);
5625 RESTORE_INTERCEPTS(u16InterceptWrDRx, pDbgState->bmInterceptWrDRxInitial, pDbgState->fModifiedInterceptWrDRx);
5626#undef RESTORE_INTERCEPTS
5627}
5628
5629
5630/**
5631 * Configures VM-exit controls for current DBGF and DTrace settings.
5632 *
5633 * This updates @a pDbgState and the VMCB execution control fields (in the debug
5634 * state) to reflect the necessary VM-exits demanded by DBGF and DTrace.
5635 *
5636 * @param pVCpu The cross context virtual CPU structure.
5637 * @param pSvmTransient The SVM-transient structure. May update
5638 * fUpdatedTscOffsettingAndPreemptTimer.
5639 * @param pDbgState The debug state.
5640 */
5641static void hmR0SvmPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
5642{
5643 /*
5644 * Take down the dtrace serial number so we can spot changes.
5645 */
5646 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
5647 ASMCompilerBarrier();
5648
5649 /*
5650 * Clear data members that we'll be rebuilding here.
5651 */
5652 pDbgState->bmXcptExtra = 0;
5653 pDbgState->bmInterceptExtra = 0;
5654 pDbgState->bmInterceptRdCRxExtra = 0;
5655 pDbgState->bmInterceptWrCRxExtra = 0;
5656 pDbgState->bmInterceptRdDRxExtra = 0;
5657 pDbgState->bmInterceptWrDRxExtra = 0;
5658 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
5659 pDbgState->bmExitsToCheck[i] = 0;
5660
5661 /*
5662 * Software interrupts (INT XXh)
5663 */
5664 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5665 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
5666 || VBOXVMM_INT_SOFTWARE_ENABLED())
5667 {
5668 pDbgState->bmInterceptExtra |= SVM_CTRL_INTERCEPT_INTN;
5669 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_SWINT);
5670 }
5671
5672 /*
5673 * INT3 breakpoints - triggered by #BP exceptions.
5674 */
5675 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
5676 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
5677
5678 /*
5679 * Exception bitmap and XCPT events+probes.
5680 */
5681#define SET_XCPT(a_iXcpt) do { \
5682 pDbgState->bmXcptExtra |= RT_BIT_32(a_iXcpt); \
5683 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_XCPT_0 + (a_iXcpt)); \
5684 } while (0)
5685
5686 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
5687 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
5688 SET_XCPT(iXcpt);
5689
5690 if (VBOXVMM_XCPT_DE_ENABLED()) SET_XCPT(X86_XCPT_DE);
5691 if (VBOXVMM_XCPT_DB_ENABLED()) SET_XCPT(X86_XCPT_DB);
5692 if (VBOXVMM_XCPT_BP_ENABLED()) SET_XCPT(X86_XCPT_BP);
5693 if (VBOXVMM_XCPT_OF_ENABLED()) SET_XCPT(X86_XCPT_OF);
5694 if (VBOXVMM_XCPT_BR_ENABLED()) SET_XCPT(X86_XCPT_BR);
5695 if (VBOXVMM_XCPT_UD_ENABLED()) SET_XCPT(X86_XCPT_UD);
5696 if (VBOXVMM_XCPT_NM_ENABLED()) SET_XCPT(X86_XCPT_NM);
5697 if (VBOXVMM_XCPT_DF_ENABLED()) SET_XCPT(X86_XCPT_DF);
5698 if (VBOXVMM_XCPT_TS_ENABLED()) SET_XCPT(X86_XCPT_TS);
5699 if (VBOXVMM_XCPT_NP_ENABLED()) SET_XCPT(X86_XCPT_NP);
5700 if (VBOXVMM_XCPT_SS_ENABLED()) SET_XCPT(X86_XCPT_SS);
5701 if (VBOXVMM_XCPT_GP_ENABLED()) SET_XCPT(X86_XCPT_GP);
5702 if (VBOXVMM_XCPT_PF_ENABLED()) SET_XCPT(X86_XCPT_PF);
5703 if (VBOXVMM_XCPT_MF_ENABLED()) SET_XCPT(X86_XCPT_MF);
5704 if (VBOXVMM_XCPT_AC_ENABLED()) SET_XCPT(X86_XCPT_AC);
5705 if (VBOXVMM_XCPT_XF_ENABLED()) SET_XCPT(X86_XCPT_XF);
5706 if (VBOXVMM_XCPT_VE_ENABLED()) SET_XCPT(X86_XCPT_VE);
5707 if (VBOXVMM_XCPT_SX_ENABLED()) SET_XCPT(X86_XCPT_SX);
5708
5709#undef SET_XCPT
5710
5711 /*
5712 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
5713 *
5714 * Note! This is the reverse of what hmR0SvmHandleExitDtraceEvents does.
5715 * So, when adding/changing/removing please don't forget to update it.
5716 *
5717 * Some of the macros are picking up local variables to save horizontal space,
5718 * (being able to see it in a table is the lesser evil here).
5719 */
5720#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
5721 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
5722 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
5723#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
5724 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
5725 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
5726 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
5727 } else do { } while (0)
5728#define SET_INCP_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fInterceptCtrl) \
5729 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
5730 { \
5731 (pDbgState)->bmInterceptExtra |= (a_fInterceptCtrl); \
5732 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
5733 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
5734 } else do { } while (0)
5735
5736 /** @todo double check these */
5737 /** @todo Check what more AMD-V specific we can intercept. */
5738 //SET_INCP_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, SVM_EXIT_TASK_SWITCH, SVM_CTRL_INTERCEPT_TASK_SWITCH);
5739 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, SVM_EXIT_TASK_SWITCH);
5740 SET_INCP_XBM_IF_EITHER_EN(INSTR_VMM_CALL, SVM_EXIT_VMMCALL, SVM_CTRL_INTERCEPT_VMMCALL);
5741 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, SVM_EXIT_VMMCALL);
5742 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMRUN, SVM_EXIT_VMRUN, SVM_CTRL_INTERCEPT_VMRUN);
5743 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMRUN, SVM_EXIT_VMRUN);
5744 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMLOAD, SVM_EXIT_VMLOAD, SVM_CTRL_INTERCEPT_VMLOAD);
5745 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMLOAD, SVM_EXIT_VMLOAD);
5746 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMSAVE, SVM_EXIT_VMSAVE, SVM_CTRL_INTERCEPT_VMSAVE);
5747 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMSAVE, SVM_EXIT_VMSAVE);
5748 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_STGI, SVM_EXIT_STGI, SVM_CTRL_INTERCEPT_STGI);
5749 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_STGI, SVM_EXIT_STGI);
5750 SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_CLGI, SVM_EXIT_CLGI, SVM_CTRL_INTERCEPT_CLGI);
5751 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_CLGI, SVM_EXIT_CLGI);
5752
5753 SET_INCP_XBM_IF_EITHER_EN(INSTR_CPUID, SVM_EXIT_CPUID, SVM_CTRL_INTERCEPT_CPUID);
5754 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, SVM_EXIT_CPUID);
5755 SET_INCP_XBM_IF_EITHER_EN(INSTR_HALT, SVM_EXIT_HLT, SVM_CTRL_INTERCEPT_HLT);
5756 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, SVM_EXIT_HLT);
5757 SET_INCP_XBM_IF_EITHER_EN(INSTR_INVD, SVM_EXIT_INVD, SVM_CTRL_INTERCEPT_INVD);
5758 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, SVM_EXIT_INVD);
5759 SET_INCP_XBM_IF_EITHER_EN(INSTR_INVLPG, SVM_EXIT_INVLPG, SVM_CTRL_INTERCEPT_INVLPG);
5760 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, SVM_EXIT_INVLPG);
5761 SET_INCP_XBM_IF_EITHER_EN(INSTR_RDPMC, SVM_EXIT_RDPMC, SVM_CTRL_INTERCEPT_RDPMC);
5762 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, SVM_EXIT_RDPMC);
5763 SET_INCP_XBM_IF_EITHER_EN(INSTR_RDTSC, SVM_EXIT_RDTSC, SVM_CTRL_INTERCEPT_RDTSC);
5764 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, SVM_EXIT_RDTSC);
5765 SET_INCP_XBM_IF_EITHER_EN(INSTR_RDTSCP, SVM_EXIT_RDTSCP, SVM_CTRL_INTERCEPT_RDTSCP);
5766 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, SVM_EXIT_RDTSCP);
5767 SET_INCP_XBM_IF_EITHER_EN(INSTR_RSM, SVM_EXIT_RSM, SVM_CTRL_INTERCEPT_RSM);
5768 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, SVM_EXIT_RSM);
5769
5770 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
5771 pDbgState->bmInterceptRdCRxExtra = 0xffff;
5772 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ) || IS_EITHER_ENABLED(pVM, EXIT_CRX_READ))
5773 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_READ_CR0, SVM_EXIT_READ_CR15 + 1);
5774
5775 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
5776 pDbgState->bmInterceptWrCRxExtra = 0xffff;
5777 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE) || IS_EITHER_ENABLED(pVM, EXIT_CRX_WRITE))
5778 {
5779 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_WRITE_CR0, SVM_EXIT_WRITE_CR15 + 1);
5780 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_CR0_SEL_WRITE);
5781 }
5782
5783 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_READ))
5784 pDbgState->bmInterceptRdDRxExtra = 0xffff;
5785 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_READ) || IS_EITHER_ENABLED(pVM, EXIT_DRX_READ))
5786 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_READ_DR0, SVM_EXIT_READ_DR15 + 1);
5787
5788 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
5789 pDbgState->bmInterceptWrDRxExtra = 0xffff;
5790 if (IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE) || IS_EITHER_ENABLED(pVM, EXIT_DRX_WRITE))
5791 ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_WRITE_DR0, SVM_EXIT_WRITE_DR15 + 1);
5792
5793 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RDMSR, SVM_EXIT_MSR); /** @todo modify bitmap to intercept almost everything? (Clearing MSR_PROT just means no intercepts.) */
5794 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, SVM_EXIT_MSR);
5795 SET_ONLY_XBM_IF_EITHER_EN(INSTR_WRMSR, SVM_EXIT_MSR); /** @todo ditto */
5796 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, SVM_EXIT_MSR);
5797 SET_INCP_XBM_IF_EITHER_EN(INSTR_MWAIT, SVM_EXIT_MWAIT, SVM_CTRL_INTERCEPT_MWAIT);
5798 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, SVM_EXIT_MWAIT);
5799 if (ASMBitTest(pDbgState->bmExitsToCheck, SVM_EXIT_MWAIT))
5800 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_MWAIT_ARMED);
5801 SET_INCP_XBM_IF_EITHER_EN(INSTR_MONITOR, SVM_EXIT_MONITOR, SVM_CTRL_INTERCEPT_MONITOR);
5802 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, SVM_EXIT_MONITOR);
5803 SET_INCP_XBM_IF_EITHER_EN(INSTR_PAUSE, SVM_EXIT_PAUSE, SVM_CTRL_INTERCEPT_PAUSE);
5804 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, SVM_EXIT_PAUSE);
5805 SET_INCP_XBM_IF_EITHER_EN(INSTR_SIDT, SVM_EXIT_IDTR_READ, SVM_CTRL_INTERCEPT_IDTR_READS);
5806 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, SVM_EXIT_IDTR_READ);
5807 SET_INCP_XBM_IF_EITHER_EN(INSTR_LIDT, SVM_EXIT_IDTR_WRITE, SVM_CTRL_INTERCEPT_IDTR_WRITES);
5808 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, SVM_EXIT_IDTR_WRITE);
5809 SET_INCP_XBM_IF_EITHER_EN(INSTR_SGDT, SVM_EXIT_GDTR_READ, SVM_CTRL_INTERCEPT_GDTR_READS);
5810 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, SVM_EXIT_GDTR_READ);
5811 SET_INCP_XBM_IF_EITHER_EN(INSTR_LGDT, SVM_EXIT_GDTR_WRITE, SVM_CTRL_INTERCEPT_GDTR_WRITES);
5812 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, SVM_EXIT_GDTR_WRITE);
5813 SET_INCP_XBM_IF_EITHER_EN(INSTR_SLDT, SVM_EXIT_LDTR_READ, SVM_CTRL_INTERCEPT_LDTR_READS);
5814 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, SVM_EXIT_LDTR_READ);
5815 SET_INCP_XBM_IF_EITHER_EN(INSTR_LLDT, SVM_EXIT_LDTR_WRITE, SVM_CTRL_INTERCEPT_LDTR_WRITES);
5816 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, SVM_EXIT_LDTR_WRITE);
5817 SET_INCP_XBM_IF_EITHER_EN(INSTR_STR, SVM_EXIT_TR_READ, SVM_CTRL_INTERCEPT_TR_READS);
5818 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, SVM_EXIT_TR_READ);
5819 SET_INCP_XBM_IF_EITHER_EN(INSTR_LTR, SVM_EXIT_TR_WRITE, SVM_CTRL_INTERCEPT_TR_WRITES);
5820 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, SVM_EXIT_TR_WRITE);
5821 SET_INCP_XBM_IF_EITHER_EN(INSTR_WBINVD, SVM_EXIT_WBINVD, SVM_CTRL_INTERCEPT_WBINVD);
5822 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, SVM_EXIT_WBINVD);
5823 SET_INCP_XBM_IF_EITHER_EN(INSTR_XSETBV, SVM_EXIT_XSETBV, SVM_CTRL_INTERCEPT_XSETBV);
5824 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, SVM_EXIT_XSETBV);
5825
5826 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_TRIPLE_FAULT))
5827 ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_SHUTDOWN);
5828
5829#undef IS_EITHER_ENABLED
5830#undef SET_ONLY_XBM_IF_EITHER_EN
5831#undef SET_INCP_XBM_IF_EITHER_EN
5832
5833 /*
5834 * Sanitize the control stuff.
5835 */
5836 /** @todo filter out unsupported stuff? */
5837 if ( pVCpu->hmr0.s.fDebugWantRdTscExit
5838 != RT_BOOL(pDbgState->bmInterceptExtra & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP)))
5839 {
5840 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
5841 /// @todo pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
5842 RT_NOREF(pSvmTransient);
5843 }
5844
5845 Log6(("HM: debug state: bmInterceptExtra=%#RX64 bmXcptExtra=%#RX32%s%s%s%s bmExitsToCheck=%08RX32'%08RX32'%08RX32'%08RX32'%08RX32\n",
5846 pDbgState->bmInterceptExtra, pDbgState->bmXcptExtra,
5847 pDbgState->bmInterceptRdCRxExtra ? " rd-cr" : "",
5848 pDbgState->bmInterceptWrCRxExtra ? " wr-cr" : "",
5849 pDbgState->bmInterceptRdDRxExtra ? " rd-dr" : "",
5850 pDbgState->bmInterceptWrDRxExtra ? " wr-dr" : "",
5851 pDbgState->bmExitsToCheck[0],
5852 pDbgState->bmExitsToCheck[1],
5853 pDbgState->bmExitsToCheck[2],
5854 pDbgState->bmExitsToCheck[3],
5855 pDbgState->bmExitsToCheck[4]));
5856}
5857
5858
5859/**
5860 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
5861 * appropriate.
5862 *
5863 * The caller has checked the VM-exit against the SVMRUNDBGSTATE::bmExitsToCheck
5864 * bitmap.
5865 *
5866 * @returns Strict VBox status code (i.e. informational status codes too).
5867 * @param pVCpu The cross context virtual CPU structure.
5868 * @param pSvmTransient The SVM-transient structure.
5869 * @param uExitCode The VM-exit code.
5870 *
5871 * @remarks The name of this function is displayed by dtrace, so keep it short
5872 * and to the point. No longer than 33 chars long, please.
5873 */
5874static VBOXSTRICTRC hmR0SvmHandleExitDtraceEvents(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, uint64_t uExitCode)
5875{
5876 /*
5877 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
5878 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
5879 *
5880 * Note! This is the reverse operation of what hmR0SvmPreRunGuestDebugStateUpdate
5881 * does. Must add/change/remove both places. Same ordering, please.
5882 *
5883 * Added/removed events must also be reflected in the next section
5884 * where we dispatch dtrace events.
5885 */
5886 bool fDtrace1 = false;
5887 bool fDtrace2 = false;
5888 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
5889 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
5890 uint64_t uEventArg = 0;
5891#define SET_XCPT(a_XcptName) \
5892 do { \
5893 enmEvent2 = RT_CONCAT(DBGFEVENT_XCPT_, a_XcptName); \
5894 fDtrace2 = RT_CONCAT3(VBOXVMM_XCPT_, a_XcptName, _ENABLED)(); \
5895 } while (0)
5896#define SET_EXIT(a_EventSubName) \
5897 do { \
5898 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
5899 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
5900 } while (0)
5901#define SET_BOTH(a_EventSubName) \
5902 do { \
5903 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
5904 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
5905 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
5906 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
5907 } while (0)
5908 switch (uExitCode)
5909 {
5910 case SVM_EXIT_SWINT:
5911 enmEvent2 = DBGFEVENT_INTERRUPT_SOFTWARE;
5912 fDtrace2 = VBOXVMM_INT_SOFTWARE_ENABLED();
5913 uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1;
5914 break;
5915
5916 case SVM_EXIT_XCPT_DE: SET_XCPT(DE); break;
5917 case SVM_EXIT_XCPT_DB: SET_XCPT(DB); break;
5918 case SVM_EXIT_XCPT_BP: SET_XCPT(BP); break;
5919 case SVM_EXIT_XCPT_OF: SET_XCPT(OF); break;
5920 case SVM_EXIT_XCPT_BR: SET_XCPT(BR); break;
5921 case SVM_EXIT_XCPT_UD: SET_XCPT(UD); break;
5922 case SVM_EXIT_XCPT_NM: SET_XCPT(NM); break;
5923 case SVM_EXIT_XCPT_DF: SET_XCPT(DF); break;
5924 case SVM_EXIT_XCPT_TS: SET_XCPT(TS); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5925 case SVM_EXIT_XCPT_NP: SET_XCPT(NP); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5926 case SVM_EXIT_XCPT_SS: SET_XCPT(SS); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5927 case SVM_EXIT_XCPT_GP: SET_XCPT(GP); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5928 case SVM_EXIT_XCPT_PF: SET_XCPT(PF); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5929 case SVM_EXIT_XCPT_MF: SET_XCPT(MF); break;
5930 case SVM_EXIT_XCPT_AC: SET_XCPT(AC); break;
5931 case SVM_EXIT_XCPT_XF: SET_XCPT(XF); break;
5932 case SVM_EXIT_XCPT_VE: SET_XCPT(VE); break;
5933 case SVM_EXIT_XCPT_SX: SET_XCPT(SX); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
5934
5935 case SVM_EXIT_XCPT_2: enmEvent2 = DBGFEVENT_XCPT_02; break;
5936 case SVM_EXIT_XCPT_9: enmEvent2 = DBGFEVENT_XCPT_09; break;
5937 case SVM_EXIT_XCPT_15: enmEvent2 = DBGFEVENT_XCPT_0f; break;
5938 case SVM_EXIT_XCPT_18: enmEvent2 = DBGFEVENT_XCPT_MC; break;
5939 case SVM_EXIT_XCPT_21: enmEvent2 = DBGFEVENT_XCPT_15; break;
5940 case SVM_EXIT_XCPT_22: enmEvent2 = DBGFEVENT_XCPT_16; break;
5941 case SVM_EXIT_XCPT_23: enmEvent2 = DBGFEVENT_XCPT_17; break;
5942 case SVM_EXIT_XCPT_24: enmEvent2 = DBGFEVENT_XCPT_18; break;
5943 case SVM_EXIT_XCPT_25: enmEvent2 = DBGFEVENT_XCPT_19; break;
5944 case SVM_EXIT_XCPT_26: enmEvent2 = DBGFEVENT_XCPT_1a; break;
5945 case SVM_EXIT_XCPT_27: enmEvent2 = DBGFEVENT_XCPT_1b; break;
5946 case SVM_EXIT_XCPT_28: enmEvent2 = DBGFEVENT_XCPT_1c; break;
5947 case SVM_EXIT_XCPT_29: enmEvent2 = DBGFEVENT_XCPT_1d; break;
5948 case SVM_EXIT_XCPT_31: enmEvent2 = DBGFEVENT_XCPT_1f; break;
5949
5950 case SVM_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
5951 case SVM_EXIT_VMMCALL: SET_BOTH(VMM_CALL); break;
5952 case SVM_EXIT_VMRUN: SET_BOTH(SVM_VMRUN); break;
5953 case SVM_EXIT_VMLOAD: SET_BOTH(SVM_VMLOAD); break;
5954 case SVM_EXIT_VMSAVE: SET_BOTH(SVM_VMSAVE); break;
5955 case SVM_EXIT_STGI: SET_BOTH(SVM_STGI); break;
5956 case SVM_EXIT_CLGI: SET_BOTH(SVM_CLGI); break;
5957 case SVM_EXIT_CPUID: SET_BOTH(CPUID); break;
5958 case SVM_EXIT_HLT: SET_BOTH(HALT); break;
5959 case SVM_EXIT_INVD: SET_BOTH(INVD); break;
5960 case SVM_EXIT_INVLPG: SET_BOTH(INVLPG); break;
5961 case SVM_EXIT_RDPMC: SET_BOTH(RDPMC); break;
5962 case SVM_EXIT_RDTSC: SET_BOTH(RDTSC); break;
5963 case SVM_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
5964 case SVM_EXIT_RSM: SET_BOTH(RSM); break;
5965
5966 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
5967 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
5968 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
5969 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
5970 SET_BOTH(CRX_READ);
5971 uEventArg = uExitCode - SVM_EXIT_READ_CR0;
5972 break;
5973 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
5974 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
5975 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
5976 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
5977 case SVM_EXIT_CR0_SEL_WRITE:
5978 SET_BOTH(CRX_WRITE);
5979 uEventArg = uExitCode - SVM_EXIT_WRITE_CR0;
5980 break;
5981 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5982 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
5983 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
5984 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5985 SET_BOTH(DRX_READ);
5986 uEventArg = uExitCode - SVM_EXIT_READ_DR0;
5987 break;
5988 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5989 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
5990 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
5991 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5992 SET_BOTH(DRX_WRITE);
5993 uEventArg = uExitCode - SVM_EXIT_WRITE_DR0;
5994 break;
5995 case SVM_EXIT_MSR:
5996 if (pSvmTransient->pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
5997 SET_BOTH(WRMSR);
5998 else
5999 SET_BOTH(RDMSR);
6000 break;
6001 case SVM_EXIT_MWAIT_ARMED:
6002 case SVM_EXIT_MWAIT: SET_BOTH(MWAIT); break;
6003 case SVM_EXIT_MONITOR: SET_BOTH(MONITOR); break;
6004 case SVM_EXIT_PAUSE: SET_BOTH(PAUSE); break;
6005 case SVM_EXIT_IDTR_READ: SET_BOTH(SIDT); break;
6006 case SVM_EXIT_IDTR_WRITE: SET_BOTH(LIDT); break;
6007 case SVM_EXIT_GDTR_READ: SET_BOTH(SGDT); break;
6008 case SVM_EXIT_GDTR_WRITE: SET_BOTH(LGDT); break;
6009 case SVM_EXIT_LDTR_READ: SET_BOTH(SLDT); break;
6010 case SVM_EXIT_LDTR_WRITE: SET_BOTH(LLDT); break;
6011 case SVM_EXIT_TR_READ: SET_BOTH(STR); break;
6012 case SVM_EXIT_TR_WRITE: SET_BOTH(LTR); break;
6013 case SVM_EXIT_WBINVD: SET_BOTH(WBINVD); break;
6014 case SVM_EXIT_XSETBV: SET_BOTH(XSETBV); break;
6015
6016 case SVM_EXIT_SHUTDOWN:
6017 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
6018 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
6019 break;
6020
6021 default:
6022 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitCode));
6023 break;
6024 }
6025#undef SET_BOTH
6026#undef SET_EXIT
6027
6028 /*
6029 * Dtrace tracepoints go first. We do them here at once so we don't
6030 * have to copy the guest state saving and stuff a few dozen times.
6031 * Down side is that we've got to repeat the switch, though this time
6032 * we use enmEvent since the probes are a subset of what DBGF does.
6033 */
6034 if (fDtrace1 || fDtrace2)
6035 {
6036 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6037 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; RT_NOREF(pCtx); /* Shut up Clang 13. */
6038 switch (enmEvent1)
6039 {
6040 /** @todo consider which extra parameters would be helpful for each probe. */
6041 case DBGFEVENT_END: break;
6042 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6043 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
6044 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
6045 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
6046 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
6047 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
6048 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
6049 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
6050 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
6051 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, (uint32_t)uEventArg); break;
6052 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, (uint32_t)uEventArg); break;
6053 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, (uint32_t)uEventArg); break;
6054 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, (uint32_t)uEventArg); break;
6055 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, (uint32_t)uEventArg, pCtx->cr2); break;
6056 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
6057 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
6058 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
6059 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
6060 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, (uint32_t)uEventArg); break;
6061 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
6062 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
6063 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
6064 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
6065 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
6066 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
6067 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
6068 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6069 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6070 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6071 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6072 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
6073 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
6074 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
6075 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
6076 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
6077 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
6078 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
6079 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
6080 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
6081 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
6082 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
6083 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
6084 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
6085 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
6086 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
6087 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
6088 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
6089 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
6090 case DBGFEVENT_INSTR_SVM_VMRUN: VBOXVMM_INSTR_SVM_VMRUN(pVCpu, pCtx); break;
6091 case DBGFEVENT_INSTR_SVM_VMLOAD: VBOXVMM_INSTR_SVM_VMLOAD(pVCpu, pCtx); break;
6092 case DBGFEVENT_INSTR_SVM_VMSAVE: VBOXVMM_INSTR_SVM_VMSAVE(pVCpu, pCtx); break;
6093 case DBGFEVENT_INSTR_SVM_STGI: VBOXVMM_INSTR_SVM_STGI(pVCpu, pCtx); break;
6094 case DBGFEVENT_INSTR_SVM_CLGI: VBOXVMM_INSTR_SVM_CLGI(pVCpu, pCtx); break;
6095 default: AssertMsgFailed(("enmEvent1=%d uExitCode=%d\n", enmEvent1, uExitCode)); break;
6096 }
6097 switch (enmEvent2)
6098 {
6099 /** @todo consider which extra parameters would be helpful for each probe. */
6100 case DBGFEVENT_END: break;
6101 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
6102 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
6103 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
6104 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
6105 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
6106 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
6107 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
6108 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
6109 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6110 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6111 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
6112 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
6113 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
6114 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
6115 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
6116 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
6117 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
6118 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
6119 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
6120 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
6121 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
6122 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
6123 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
6124 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
6125 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
6126 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
6127 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
6128 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
6129 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
6130 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
6131 case DBGFEVENT_EXIT_SVM_VMRUN: VBOXVMM_EXIT_SVM_VMRUN(pVCpu, pCtx); break;
6132 case DBGFEVENT_EXIT_SVM_VMLOAD: VBOXVMM_EXIT_SVM_VMLOAD(pVCpu, pCtx); break;
6133 case DBGFEVENT_EXIT_SVM_VMSAVE: VBOXVMM_EXIT_SVM_VMSAVE(pVCpu, pCtx); break;
6134 case DBGFEVENT_EXIT_SVM_STGI: VBOXVMM_EXIT_SVM_STGI(pVCpu, pCtx); break;
6135 case DBGFEVENT_EXIT_SVM_CLGI: VBOXVMM_EXIT_SVM_CLGI(pVCpu, pCtx); break;
6136 default: AssertMsgFailed(("enmEvent2=%d uExitCode=%d\n", enmEvent2, uExitCode)); break;
6137 }
6138 }
6139
6140 /*
6141 * Fire of the DBGF event, if enabled (our check here is just a quick one,
6142 * the DBGF call will do a full check).
6143 *
6144 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
6145 * Note! If we have to events, we prioritize the first, i.e. the instruction
6146 * one, in order to avoid event nesting.
6147 */
6148 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6149 VBOXSTRICTRC rcStrict;
6150 if ( enmEvent1 != DBGFEVENT_END
6151 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
6152 {
6153 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6154 rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
6155 }
6156 else if ( enmEvent2 != DBGFEVENT_END
6157 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
6158 {
6159 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6160 rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
6161 }
6162 else
6163 rcStrict = VINF_SUCCESS;
6164 return rcStrict;
6165}
6166
6167
6168/**
6169 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID),
6170 * debug variant.
6171 *
6172 * @returns Strict VBox status code (informational status codes included).
6173 * @param pVCpu The cross context virtual CPU structure.
6174 * @param pSvmTransient Pointer to the SVM transient structure.
6175 * @param pDbgState The runtime debug state.
6176 */
6177static VBOXSTRICTRC hmR0SvmDebugHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
6178{
6179 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
6180 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
6181
6182 /*
6183 * Expensive (saves context) generic dtrace VM-exit probe.
6184 */
6185 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
6186 if (!VBOXVMM_R0_HMSVM_VMEXIT_ENABLED())
6187 { /* more likely */ }
6188 else
6189 {
6190 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6191 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, uExitCode, pSvmTransient->pVmcb);
6192 }
6193
6194 /*
6195 * Check for single stepping event if we're stepping.
6196 */
6197 if (pVCpu->hm.s.fSingleInstruction)
6198 {
6199 switch (uExitCode)
6200 {
6201 /* Various events: */
6202 case SVM_EXIT_XCPT_0: case SVM_EXIT_XCPT_1: case SVM_EXIT_XCPT_2: case SVM_EXIT_XCPT_3:
6203 case SVM_EXIT_XCPT_4: case SVM_EXIT_XCPT_5: case SVM_EXIT_XCPT_6: case SVM_EXIT_XCPT_7:
6204 case SVM_EXIT_XCPT_8: case SVM_EXIT_XCPT_9: case SVM_EXIT_XCPT_10: case SVM_EXIT_XCPT_11:
6205 case SVM_EXIT_XCPT_12: case SVM_EXIT_XCPT_13: case SVM_EXIT_XCPT_14: case SVM_EXIT_XCPT_15:
6206 case SVM_EXIT_XCPT_16: case SVM_EXIT_XCPT_17: case SVM_EXIT_XCPT_18: case SVM_EXIT_XCPT_19:
6207 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
6208 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
6209 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
6210 case SVM_EXIT_INTR:
6211 case SVM_EXIT_NMI:
6212 case SVM_EXIT_VINTR:
6213 case SVM_EXIT_NPF:
6214 case SVM_EXIT_AVIC_NOACCEL:
6215
6216 /* Instruction specific VM-exits: */
6217 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
6218 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
6219 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
6220 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
6221 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
6222 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
6223 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
6224 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
6225 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
6226 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
6227 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
6228 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
6229 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
6230 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
6231 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
6232 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
6233 case SVM_EXIT_CR0_SEL_WRITE:
6234 case SVM_EXIT_IDTR_READ:
6235 case SVM_EXIT_GDTR_READ:
6236 case SVM_EXIT_LDTR_READ:
6237 case SVM_EXIT_TR_READ:
6238 case SVM_EXIT_IDTR_WRITE:
6239 case SVM_EXIT_GDTR_WRITE:
6240 case SVM_EXIT_LDTR_WRITE:
6241 case SVM_EXIT_TR_WRITE:
6242 case SVM_EXIT_RDTSC:
6243 case SVM_EXIT_RDPMC:
6244 case SVM_EXIT_PUSHF:
6245 case SVM_EXIT_POPF:
6246 case SVM_EXIT_CPUID:
6247 case SVM_EXIT_RSM:
6248 case SVM_EXIT_IRET:
6249 case SVM_EXIT_SWINT:
6250 case SVM_EXIT_INVD:
6251 case SVM_EXIT_PAUSE:
6252 case SVM_EXIT_HLT:
6253 case SVM_EXIT_INVLPG:
6254 case SVM_EXIT_INVLPGA:
6255 case SVM_EXIT_IOIO:
6256 case SVM_EXIT_MSR:
6257 case SVM_EXIT_TASK_SWITCH:
6258 case SVM_EXIT_VMRUN:
6259 case SVM_EXIT_VMMCALL:
6260 case SVM_EXIT_VMLOAD:
6261 case SVM_EXIT_VMSAVE:
6262 case SVM_EXIT_STGI:
6263 case SVM_EXIT_CLGI:
6264 case SVM_EXIT_SKINIT:
6265 case SVM_EXIT_RDTSCP:
6266 case SVM_EXIT_ICEBP:
6267 case SVM_EXIT_WBINVD:
6268 case SVM_EXIT_MONITOR:
6269 case SVM_EXIT_MWAIT:
6270 case SVM_EXIT_MWAIT_ARMED:
6271 case SVM_EXIT_XSETBV:
6272 case SVM_EXIT_RDPRU:
6273 case SVM_EXIT_WRITE_EFER_TRAP:
6274 case SVM_EXIT_WRITE_CR0_TRAP: case SVM_EXIT_WRITE_CR1_TRAP: case SVM_EXIT_WRITE_CR2_TRAP: case SVM_EXIT_WRITE_CR3_TRAP:
6275 case SVM_EXIT_WRITE_CR4_TRAP: case SVM_EXIT_WRITE_CR5_TRAP: case SVM_EXIT_WRITE_CR6_TRAP: case SVM_EXIT_WRITE_CR7_TRAP:
6276 case SVM_EXIT_WRITE_CR8_TRAP: case SVM_EXIT_WRITE_CR9_TRAP: case SVM_EXIT_WRITE_CR10_TRAP: case SVM_EXIT_WRITE_CR11_TRAP:
6277 case SVM_EXIT_WRITE_CR12_TRAP: case SVM_EXIT_WRITE_CR13_TRAP: case SVM_EXIT_WRITE_CR14_TRAP: case SVM_EXIT_WRITE_CR15_TRAP:
6278 case SVM_EXIT_MCOMMIT:
6279 {
6280 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6281 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
6282 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
6283 {
6284 Log6Func(("VINF_EM_DBG_STEPPED: %04x:%08RX64 (exit %u)\n",
6285 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode));
6286 return VINF_EM_DBG_STEPPED;
6287 }
6288 break;
6289 }
6290
6291 /* Errors and unexpected events: */
6292 case SVM_EXIT_FERR_FREEZE:
6293 case SVM_EXIT_SHUTDOWN:
6294 case SVM_EXIT_AVIC_INCOMPLETE_IPI:
6295 break;
6296
6297 case SVM_EXIT_SMI:
6298 case SVM_EXIT_INIT:
6299 default:
6300 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitCode));
6301 break;
6302 }
6303 }
6304
6305 /*
6306 * Check for debugger event breakpoints and dtrace probes.
6307 */
6308 if ( uExitCode < sizeof(pDbgState->bmExitsToCheck) * 8U
6309 && ASMBitTest(pDbgState->bmExitsToCheck, uExitCode) )
6310 {
6311 VBOXSTRICTRC rcStrict = hmR0SvmHandleExitDtraceEvents(pVCpu, pSvmTransient, uExitCode);
6312 if (rcStrict != VINF_SUCCESS)
6313 {
6314 Log6Func(("%04x:%08RX64 (exit %u) -> %Rrc\n",
6315 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode, VBOXSTRICTRC_VAL(rcStrict) ));
6316 return rcStrict;
6317 }
6318 }
6319
6320 /*
6321 * Normal processing.
6322 */
6323 return hmR0SvmHandleExit(pVCpu, pSvmTransient);
6324}
6325
6326
6327/**
6328 * Runs the guest code using AMD-V in single step mode.
6329 *
6330 * @returns Strict VBox status code.
6331 * @param pVCpu The cross context virtual CPU structure.
6332 * @param pcLoops Pointer to the number of executed loops.
6333 */
6334static VBOXSTRICTRC hmR0SvmRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops)
6335{
6336 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
6337 Assert(pcLoops);
6338 Assert(*pcLoops <= cMaxResumeLoops);
6339
6340 SVMTRANSIENT SvmTransient;
6341 RT_ZERO(SvmTransient);
6342 SvmTransient.fUpdateTscOffsetting = true;
6343 SvmTransient.pVmcb = pVCpu->hmr0.s.svm.pVmcb;
6344
6345 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6346
6347 /* Set HMCPU indicators. */
6348 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
6349 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
6350 pVCpu->hmr0.s.fDebugWantRdTscExit = false;
6351 pVCpu->hmr0.s.fUsingDebugLoop = true;
6352
6353 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
6354 SVMRUNDBGSTATE DbgState;
6355 hmR0SvmRunDebugStateInit(pVCpu, &SvmTransient, &DbgState);
6356 hmR0SvmPreRunGuestDebugStateUpdate(pVCpu, &SvmTransient, &DbgState);
6357
6358 /*
6359 * The loop.
6360 */
6361 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_5;
6362 for (;;)
6363 {
6364 Assert(!HMR0SuspendPending());
6365 AssertMsg(pVCpu->hmr0.s.idEnteredCpu == RTMpCpuId(),
6366 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hmr0.s.idEnteredCpu,
6367 (unsigned)RTMpCpuId(), *pcLoops));
6368 bool fStepping = pVCpu->hm.s.fSingleInstruction;
6369
6370 /* Set up VM-execution controls the next two can respond to. */
6371 hmR0SvmPreRunGuestDebugStateApply(&SvmTransient, &DbgState);
6372
6373 /* Preparatory work for running nested-guest code, this may force us to return to
6374 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
6375 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
6376 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
6377 if (rc != VINF_SUCCESS)
6378 break;
6379
6380 /*
6381 * No longjmps to ring-3 from this point on!!!
6382 *
6383 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
6384 * better than a kernel panic. This also disables flushing of the R0-logger instance.
6385 */
6386 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
6387
6388 /* Override any obnoxious code in the above two calls. */
6389 hmR0SvmPreRunGuestDebugStateApply(&SvmTransient, &DbgState);
6390#if 0
6391 Log(("%04x:%08RX64 ds=%04x %04x:%08RX64 i=%#RX64\n",
6392 SvmTransient.pVmcb->guest.CS.u16Sel, SvmTransient.pVmcb->guest.u64RIP, SvmTransient.pVmcb->guest.DS.u16Sel,
6393 SvmTransient.pVmcb->guest.SS.u16Sel, SvmTransient.pVmcb->guest.u64RSP, SvmTransient.pVmcb->ctrl.EventInject.u));
6394#endif
6395
6396 /*
6397 * Finally execute guest code.
6398 */
6399 rc = hmR0SvmRunGuest(pVCpu, pVCpu->hmr0.s.svm.HCPhysVmcb);
6400
6401 /* Restore any residual host-state and save any bits shared between host and guest
6402 into the guest-CPU state. Re-enables interrupts! */
6403 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
6404#if 0
6405 Log(("%04x:%08RX64 ds=%04x %04x:%08RX64 i=%#RX64 exit=%d\n",
6406 SvmTransient.pVmcb->guest.CS.u16Sel, SvmTransient.pVmcb->guest.u64RIP, SvmTransient.pVmcb->guest.DS.u16Sel,
6407 SvmTransient.pVmcb->guest.SS.u16Sel, SvmTransient.pVmcb->guest.u64RSP, SvmTransient.pVmcb->ctrl.EventInject.u, SvmTransient.u64ExitCode));
6408#endif
6409
6410 if (RT_LIKELY( rc == VINF_SUCCESS /* Check for VMRUN errors. */
6411 && SvmTransient.u64ExitCode != SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
6412 { /* very likely*/ }
6413 else
6414 {
6415 if (rc == VINF_SUCCESS)
6416 rc = VERR_SVM_INVALID_GUEST_STATE;
6417 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
6418 hmR0SvmReportWorldSwitchError(pVCpu, VBOXSTRICTRC_VAL(rc));
6419 return rc;
6420 }
6421
6422 /* Handle the #VMEXIT. */
6423 HMSVM_DEBUG_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
6424 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
6425 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hmr0.s.svm.pVmcb);
6426 rc = hmR0SvmDebugHandleExit(pVCpu, &SvmTransient, &DbgState);
6427 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
6428 if (rc != VINF_SUCCESS)
6429 break;
6430 if (++(*pcLoops) >= cMaxResumeLoops)
6431 {
6432 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
6433 rc = VINF_EM_RAW_INTERRUPT;
6434 break;
6435 }
6436
6437 /*
6438 * Stepping: Did the RIP change, if so, consider it a single step.
6439 * Otherwise, make sure one of the TFs gets set.
6440 */
6441 if (fStepping)
6442 {
6443 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6444 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart
6445 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
6446 {
6447 Log6Func(("VINF_EM_DBG_STEPPED: %04x:%08RX64 (exit %u)\n",
6448 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, SvmTransient.u64ExitCode));
6449 rc = VINF_EM_DBG_STEPPED;
6450 break;
6451 }
6452 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
6453 }
6454
6455 /*
6456 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
6457 * Revert the state changes afterware so we can drop intercepts no longer needed.
6458 */
6459 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
6460 {
6461 hmR0SvmPreRunGuestDebugStateUpdate(pVCpu, &SvmTransient, &DbgState);
6462 hmR0SvmRunDebugStateRevert(&SvmTransient, &DbgState);
6463 }
6464 }
6465
6466 /*
6467 * Clear the X86_EFL_TF if necessary.
6468 */
6469 if (pVCpu->hmr0.s.fClearTrapFlag)
6470 {
6471 pVCpu->hmr0.s.fClearTrapFlag = false;
6472 pCtx->eflags.Bits.u1TF = 0;
6473 }
6474
6475 /* Restore HMCPU indicators. */
6476 pVCpu->hmr0.s.fUsingDebugLoop = false;
6477 pVCpu->hmr0.s.fDebugWantRdTscExit = false;
6478 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
6479
6480 /* Restore all controls applied by hmR0SvmPreRunGuestDebugStateApply above. */
6481 hmR0SvmRunDebugStateRevert(&SvmTransient, &DbgState);
6482
6483 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
6484 return rc;
6485}
6486
6487/** @} */
6488
6489#undef VMEXIT_CALL_RET
6490
6491
6492#ifdef VBOX_STRICT
6493/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6494# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
6495 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6496
6497# define HMSVM_ASSERT_PREEMPT_CPUID() \
6498 do \
6499 { \
6500 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6501 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6502 } while (0)
6503
6504# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \
6505 do { \
6506 AssertPtr((a_pVCpu)); \
6507 AssertPtr((a_pSvmTransient)); \
6508 Assert(ASMIntAreEnabled()); \
6509 HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \
6510 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
6511 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu)); \
6512 HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \
6513 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6514 HMSVM_ASSERT_PREEMPT_CPUID(); \
6515 } while (0)
6516#else
6517# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \
6518 do { \
6519 RT_NOREF2(a_pVCpu, a_pSvmTransient); \
6520 } while (0)
6521#endif
6522
6523
6524/**
6525 * Gets the IEM exception flags for the specified SVM event.
6526 *
6527 * @returns The IEM exception flags.
6528 * @param pEvent Pointer to the SVM event.
6529 *
6530 * @remarks This function currently only constructs flags required for
6531 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g. error-code
6532 * and CR2 aspects of an exception are not included).
6533 */
6534static uint32_t hmR0SvmGetIemXcptFlags(PCSVMEVENT pEvent)
6535{
6536 uint8_t const uEventType = pEvent->n.u3Type;
6537 uint32_t fIemXcptFlags;
6538 switch (uEventType)
6539 {
6540 case SVM_EVENT_EXCEPTION:
6541 /*
6542 * Only INT3 and INTO instructions can raise #BP and #OF exceptions.
6543 * See AMD spec. Table 8-1. "Interrupt Vector Source and Cause".
6544 */
6545 if (pEvent->n.u8Vector == X86_XCPT_BP)
6546 {
6547 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR;
6548 break;
6549 }
6550 if (pEvent->n.u8Vector == X86_XCPT_OF)
6551 {
6552 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_OF_INSTR;
6553 break;
6554 }
6555 /** @todo How do we distinguish ICEBP \#DB from the regular one? */
6556 RT_FALL_THRU();
6557 case SVM_EVENT_NMI:
6558 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6559 break;
6560
6561 case SVM_EVENT_EXTERNAL_IRQ:
6562 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
6563 break;
6564
6565 case SVM_EVENT_SOFTWARE_INT:
6566 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
6567 break;
6568
6569 default:
6570 fIemXcptFlags = 0;
6571 AssertMsgFailed(("Unexpected event type! uEventType=%#x uVector=%#x", uEventType, pEvent->n.u8Vector));
6572 break;
6573 }
6574 return fIemXcptFlags;
6575}
6576
6577
6578/**
6579 * Handle a condition that occurred while delivering an event through the guest
6580 * IDT.
6581 *
6582 * @returns VBox status code (informational error codes included).
6583 * @retval VINF_SUCCESS if we should continue handling the \#VMEXIT.
6584 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought to
6585 * continue execution of the guest which will delivery the \#DF.
6586 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6587 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6588 *
6589 * @param pVCpu The cross context virtual CPU structure.
6590 * @param pSvmTransient Pointer to the SVM transient structure.
6591 *
6592 * @remarks No-long-jump zone!!!
6593 */
6594static int hmR0SvmCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6595{
6596 /** @todo r=bird: Looks like this is called on many exits and we start by
6597 * loading CR2 on the offchance that we actually have work to do here.
6598 *
6599 * HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY can surely check
6600 * pVmcb->ctrl.ExitIntInfo.n.u1Valid, can't it?
6601 *
6602 * Also, what's the deal with hmR0SvmGetCurrentVmcb() vs pSvmTransient->pVmcb?
6603 */
6604 int rc = VINF_SUCCESS;
6605 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6606 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2);
6607
6608 Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n",
6609 pVmcb->ctrl.ExitIntInfo.u, !!pVmcb->ctrl.ExitIntInfo.n.u1Valid, !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid,
6610 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, pVmcb->ctrl.ExitIntInfo.n.u3Type, pVmcb->ctrl.ExitIntInfo.n.u8Vector));
6611
6612 /*
6613 * The EXITINTINFO (if valid) contains the prior exception (IDT vector) that was trying to
6614 * be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector).
6615 *
6616 * See AMD spec. 15.7.3 "EXITINFO Pseudo-Code".
6617 */
6618 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid)
6619 {
6620 IEMXCPTRAISE enmRaise;
6621 IEMXCPTRAISEINFO fRaiseInfo;
6622 bool const fExitIsHwXcpt = pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0 <= SVM_EXIT_XCPT_31;
6623 uint8_t const uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
6624 if (fExitIsHwXcpt)
6625 {
6626 uint8_t const uExitVector = pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0;
6627 uint32_t const fIdtVectorFlags = hmR0SvmGetIemXcptFlags(&pVmcb->ctrl.ExitIntInfo);
6628 uint32_t const fExitVectorFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6629 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6630 }
6631 else
6632 {
6633 /*
6634 * If delivery of an event caused a #VMEXIT that is not an exception (e.g. #NPF)
6635 * then we end up here.
6636 *
6637 * If the event was:
6638 * - a software interrupt, we can re-execute the instruction which will
6639 * regenerate the event.
6640 * - an NMI, we need to clear NMI blocking and re-inject the NMI.
6641 * - a hardware exception or external interrupt, we re-inject it.
6642 */
6643 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6644 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_SOFTWARE_INT)
6645 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6646 else
6647 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6648 }
6649
6650 switch (enmRaise)
6651 {
6652 case IEMXCPTRAISE_CURRENT_XCPT:
6653 case IEMXCPTRAISE_PREV_EVENT:
6654 {
6655 /* For software interrupts, we shall re-execute the instruction. */
6656 if (!(fRaiseInfo & IEMXCPTRAISEINFO_SOFT_INT_XCPT))
6657 {
6658 RTGCUINTPTR GCPtrFaultAddress = 0;
6659
6660 /* If we are re-injecting an NMI, clear NMI blocking. */
6661 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)
6662 CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
6663
6664 /* Determine a vectoring #PF condition, see comment in hmR0SvmExitXcptPF(). */
6665 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6666 {
6667 pSvmTransient->fVectoringPF = true;
6668 Log4Func(("IDT: Pending vectoring #PF due to delivery of Ext-Int/NMI. uCR2=%#RX64\n",
6669 pVCpu->cpum.GstCtx.cr2));
6670 }
6671 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION
6672 && uIdtVector == X86_XCPT_PF)
6673 {
6674 /*
6675 * If the previous exception was a #PF, we need to recover the CR2 value.
6676 * This can't happen with shadow paging.
6677 */
6678 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
6679 }
6680
6681 /*
6682 * Without nested paging, when uExitVector is #PF, CR2 value will be updated from the VMCB's
6683 * exit info. fields, if it's a guest #PF, see hmR0SvmExitXcptPF().
6684 */
6685 Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);
6686 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflect);
6687 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, GCPtrFaultAddress);
6688
6689 Log4Func(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32 GCPtrFaultAddress=%#RX64\n",
6690 pVmcb->ctrl.ExitIntInfo.u, RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid),
6691 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, GCPtrFaultAddress));
6692 }
6693 break;
6694 }
6695
6696 case IEMXCPTRAISE_REEXEC_INSTR:
6697 {
6698 Assert(rc == VINF_SUCCESS);
6699 break;
6700 }
6701
6702 case IEMXCPTRAISE_DOUBLE_FAULT:
6703 {
6704 /*
6705 * Determing a vectoring double #PF condition. Used later, when PGM evaluates
6706 * the second #PF as a guest #PF (and not a shadow #PF) and needs to be
6707 * converted into a #DF.
6708 */
6709 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6710 {
6711 Log4Func(("IDT: Pending vectoring double #PF uCR2=%#RX64\n", pVCpu->cpum.GstCtx.cr2));
6712 pSvmTransient->fVectoringDoublePF = true;
6713 Assert(rc == VINF_SUCCESS);
6714 }
6715 else
6716 {
6717 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectConvertDF);
6718 hmR0SvmSetPendingXcptDF(pVCpu);
6719 rc = VINF_HM_DOUBLE_FAULT;
6720 }
6721 break;
6722 }
6723
6724 case IEMXCPTRAISE_TRIPLE_FAULT:
6725 {
6726 rc = VINF_EM_RESET;
6727 break;
6728 }
6729
6730 case IEMXCPTRAISE_CPU_HANG:
6731 {
6732 rc = VERR_EM_GUEST_CPU_HANG;
6733 break;
6734 }
6735
6736 default:
6737 AssertMsgFailedBreakStmt(("Bogus enmRaise value: %d (%#x)\n", enmRaise, enmRaise), rc = VERR_SVM_IPE_2);
6738 }
6739 }
6740 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG);
6741 return rc;
6742}
6743
6744
6745/**
6746 * Advances the guest RIP by the number of bytes specified in @a cb.
6747 *
6748 * @param pVCpu The cross context virtual CPU structure.
6749 * @param cb RIP increment value in bytes.
6750 */
6751DECLINLINE(void) hmR0SvmAdvanceRip(PVMCPUCC pVCpu, uint32_t cb)
6752{
6753 pVCpu->cpum.GstCtx.rip += cb;
6754 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
6755 /** @todo clear RF. */
6756}
6757
6758
6759/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6760/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6761/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6762
6763/** @name \#VMEXIT handlers.
6764 * @{
6765 */
6766
6767/**
6768 * \#VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT
6769 * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT).
6770 */
6771HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6772{
6773 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6774
6775 if (pSvmTransient->u64ExitCode == SVM_EXIT_NMI)
6776 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
6777 else if (pSvmTransient->u64ExitCode == SVM_EXIT_INTR)
6778 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
6779
6780 /*
6781 * AMD-V has no preemption timer and the generic periodic preemption timer has no way to
6782 * signal -before- the timer fires if the current interrupt is our own timer or a some
6783 * other host interrupt. We also cannot examine what interrupt it is until the host
6784 * actually take the interrupt.
6785 *
6786 * Going back to executing guest code here unconditionally causes random scheduling
6787 * problems (observed on an AMD Phenom 9850 Quad-Core on Windows 64-bit host).
6788 */
6789 return VINF_EM_RAW_INTERRUPT;
6790}
6791
6792
6793/**
6794 * \#VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional \#VMEXIT.
6795 */
6796HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6797{
6798 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6799
6800 VBOXSTRICTRC rcStrict;
6801 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6802 if (fSupportsNextRipSave)
6803 {
6804 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
6805 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6806 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6807 rcStrict = IEMExecDecodedWbinvd(pVCpu, cbInstr);
6808 }
6809 else
6810 {
6811 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6812 rcStrict = IEMExecOne(pVCpu);
6813 }
6814
6815 if (rcStrict == VINF_IEM_RAISED_XCPT)
6816 {
6817 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6818 rcStrict = VINF_SUCCESS;
6819 }
6820 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6821 return rcStrict;
6822}
6823
6824
6825/**
6826 * \#VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional \#VMEXIT.
6827 */
6828HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6829{
6830 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6831
6832 VBOXSTRICTRC rcStrict;
6833 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6834 if (fSupportsNextRipSave)
6835 {
6836 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
6837 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6838 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6839 rcStrict = IEMExecDecodedInvd(pVCpu, cbInstr);
6840 }
6841 else
6842 {
6843 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6844 rcStrict = IEMExecOne(pVCpu);
6845 }
6846
6847 if (rcStrict == VINF_IEM_RAISED_XCPT)
6848 {
6849 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6850 rcStrict = VINF_SUCCESS;
6851 }
6852 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6853 return rcStrict;
6854}
6855
6856
6857/**
6858 * \#VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional \#VMEXIT.
6859 */
6860HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6861{
6862 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6863
6864 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
6865 VBOXSTRICTRC rcStrict;
6866 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
6867 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
6868 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
6869 if (!pExitRec)
6870 {
6871 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6872 if (fSupportsNextRipSave)
6873 {
6874 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6875 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6876 rcStrict = IEMExecDecodedCpuid(pVCpu, cbInstr);
6877 }
6878 else
6879 {
6880 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6881 rcStrict = IEMExecOne(pVCpu);
6882 }
6883
6884 if (rcStrict == VINF_IEM_RAISED_XCPT)
6885 {
6886 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
6887 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6888 rcStrict = VINF_SUCCESS;
6889 }
6890 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6891 }
6892 else
6893 {
6894 /*
6895 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
6896 */
6897 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6898
6899 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
6900 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
6901
6902 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
6903
6904 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
6905 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6906 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6907 }
6908 return rcStrict;
6909}
6910
6911
6912/**
6913 * \#VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional \#VMEXIT.
6914 */
6915HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6916{
6917 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6918
6919 VBOXSTRICTRC rcStrict;
6920 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6921 if (fSupportsNextRipSave)
6922 {
6923 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
6924 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6925 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6926 rcStrict = IEMExecDecodedRdtsc(pVCpu, cbInstr);
6927 }
6928 else
6929 {
6930 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6931 rcStrict = IEMExecOne(pVCpu);
6932 }
6933
6934 if (rcStrict == VINF_SUCCESS)
6935 pSvmTransient->fUpdateTscOffsetting = true;
6936 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6937 {
6938 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6939 rcStrict = VINF_SUCCESS;
6940 }
6941 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6942 return rcStrict;
6943}
6944
6945
6946/**
6947 * \#VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional \#VMEXIT.
6948 */
6949HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6950{
6951 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6952
6953 VBOXSTRICTRC rcStrict;
6954 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6955 if (fSupportsNextRipSave)
6956 {
6957 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
6958 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6959 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6960 rcStrict = IEMExecDecodedRdtscp(pVCpu, cbInstr);
6961 }
6962 else
6963 {
6964 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6965 rcStrict = IEMExecOne(pVCpu);
6966 }
6967
6968 if (rcStrict == VINF_SUCCESS)
6969 pSvmTransient->fUpdateTscOffsetting = true;
6970 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6971 {
6972 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6973 rcStrict = VINF_SUCCESS;
6974 }
6975 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6976 return rcStrict;
6977}
6978
6979
6980/**
6981 * \#VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional \#VMEXIT.
6982 */
6983HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6984{
6985 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6986
6987 VBOXSTRICTRC rcStrict;
6988 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6989 if (fSupportsNextRipSave)
6990 {
6991 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
6992 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6993 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6994 rcStrict = IEMExecDecodedRdpmc(pVCpu, cbInstr);
6995 }
6996 else
6997 {
6998 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6999 rcStrict = IEMExecOne(pVCpu);
7000 }
7001
7002 if (rcStrict == VINF_IEM_RAISED_XCPT)
7003 {
7004 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7005 rcStrict = VINF_SUCCESS;
7006 }
7007 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7008 return rcStrict;
7009}
7010
7011
7012/**
7013 * \#VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional \#VMEXIT.
7014 */
7015HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7016{
7017 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7018 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
7019
7020 VBOXSTRICTRC rcStrict;
7021 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
7022 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7023 if ( fSupportsDecodeAssists
7024 && fSupportsNextRipSave)
7025 {
7026 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7027 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7028 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7029 RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1;
7030 rcStrict = IEMExecDecodedInvlpg(pVCpu, cbInstr, GCPtrPage);
7031 }
7032 else
7033 {
7034 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7035 rcStrict = IEMExecOne(pVCpu);
7036 }
7037
7038 if (rcStrict == VINF_IEM_RAISED_XCPT)
7039 {
7040 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7041 rcStrict = VINF_SUCCESS;
7042 }
7043 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7044 return VBOXSTRICTRC_VAL(rcStrict);
7045}
7046
7047
7048/**
7049 * \#VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional \#VMEXIT.
7050 */
7051HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7052{
7053 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7054
7055 VBOXSTRICTRC rcStrict;
7056 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7057 if (fSupportsNextRipSave)
7058 {
7059 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7060 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7061 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7062 rcStrict = IEMExecDecodedHlt(pVCpu, cbInstr);
7063 }
7064 else
7065 {
7066 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7067 rcStrict = IEMExecOne(pVCpu);
7068 }
7069
7070 if ( rcStrict == VINF_EM_HALT
7071 || rcStrict == VINF_SUCCESS)
7072 rcStrict = EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx) ? VINF_SUCCESS : VINF_EM_HALT;
7073 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7074 {
7075 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7076 rcStrict = VINF_SUCCESS;
7077 }
7078 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7079 if (rcStrict != VINF_SUCCESS)
7080 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
7081 return VBOXSTRICTRC_VAL(rcStrict);;
7082}
7083
7084
7085/**
7086 * \#VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional \#VMEXIT.
7087 */
7088HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7089{
7090 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7091
7092 /*
7093 * If the instruction length is supplied by the CPU is 3 bytes, we can be certain that no
7094 * segment override prefix is present (and thus use the default segment DS). Otherwise, a
7095 * segment override prefix or other prefixes might be used, in which case we fallback to
7096 * IEMExecOne() to figure out.
7097 */
7098 VBOXSTRICTRC rcStrict;
7099 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7100 uint8_t const cbInstr = hmR0SvmSupportsNextRipSave(pVCpu) ? pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip : 0;
7101 if (cbInstr)
7102 {
7103 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7104 rcStrict = IEMExecDecodedMonitor(pVCpu, cbInstr);
7105 }
7106 else
7107 {
7108 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7109 rcStrict = IEMExecOne(pVCpu);
7110 }
7111
7112 if (rcStrict == VINF_IEM_RAISED_XCPT)
7113 {
7114 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7115 rcStrict = VINF_SUCCESS;
7116 }
7117 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7118 return rcStrict;
7119}
7120
7121
7122/**
7123 * \#VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional \#VMEXIT.
7124 */
7125HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7126{
7127 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7128
7129 VBOXSTRICTRC rcStrict;
7130 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7131 if (fSupportsNextRipSave)
7132 {
7133 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7134 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7135 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7136 rcStrict = IEMExecDecodedMwait(pVCpu, cbInstr);
7137 }
7138 else
7139 {
7140 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7141 rcStrict = IEMExecOne(pVCpu);
7142 }
7143
7144 if ( rcStrict == VINF_EM_HALT
7145 && EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7146 rcStrict = VINF_SUCCESS;
7147 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7148 {
7149 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7150 rcStrict = VINF_SUCCESS;
7151 }
7152 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7153 return rcStrict;
7154}
7155
7156
7157/**
7158 * \#VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN). Conditional
7159 * \#VMEXIT.
7160 */
7161HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7162{
7163 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7164 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7165 return VINF_EM_RESET;
7166}
7167
7168
7169/**
7170 * \#VMEXIT handler for unexpected exits. Conditional \#VMEXIT.
7171 */
7172HMSVM_EXIT_DECL hmR0SvmExitUnexpected(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7173{
7174 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7175 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7176 AssertMsgFailed(("hmR0SvmExitUnexpected: ExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pSvmTransient->u64ExitCode,
7177 pVmcb->ctrl.u64ExitInfo1, pVmcb->ctrl.u64ExitInfo2));
7178 RT_NOREF(pVmcb);
7179 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
7180 return VERR_SVM_UNEXPECTED_EXIT;
7181}
7182
7183
7184/**
7185 * \#VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional \#VMEXIT.
7186 */
7187HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7188{
7189 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7190
7191 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7192 Log4Func(("CS:RIP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
7193#ifdef VBOX_WITH_STATISTICS
7194 switch (pSvmTransient->u64ExitCode)
7195 {
7196 case SVM_EXIT_READ_CR0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
7197 case SVM_EXIT_READ_CR2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
7198 case SVM_EXIT_READ_CR3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
7199 case SVM_EXIT_READ_CR4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
7200 case SVM_EXIT_READ_CR8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
7201 }
7202#endif
7203
7204 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
7205 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7206 if ( fSupportsDecodeAssists
7207 && fSupportsNextRipSave)
7208 {
7209 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7210 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
7211 if (fMovCRx)
7212 {
7213 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR_MASK
7214 | CPUMCTX_EXTRN_APIC_TPR);
7215 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
7216 uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0;
7217 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
7218 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
7219 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7220 return VBOXSTRICTRC_VAL(rcStrict);
7221 }
7222 /* else: SMSW instruction, fall back below to IEM for this. */
7223 }
7224
7225 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7226 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7227 AssertMsg( rcStrict == VINF_SUCCESS
7228 || rcStrict == VINF_PGM_SYNC_CR3
7229 || rcStrict == VINF_IEM_RAISED_XCPT,
7230 ("hmR0SvmExitReadCRx: IEMExecOne failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7231 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15);
7232 if (rcStrict == VINF_IEM_RAISED_XCPT)
7233 {
7234 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7235 rcStrict = VINF_SUCCESS;
7236 }
7237 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7238 return rcStrict;
7239}
7240
7241
7242/**
7243 * \#VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional \#VMEXIT.
7244 */
7245HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7246{
7247 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7248
7249 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
7250 uint8_t const iCrReg = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0);
7251 Assert(iCrReg <= 15);
7252
7253 VBOXSTRICTRC rcStrict = VERR_SVM_IPE_5;
7254 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7255 bool fDecodedInstr = false;
7256 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
7257 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7258 if ( fSupportsDecodeAssists
7259 && fSupportsNextRipSave)
7260 {
7261 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7262 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
7263 if (fMovCRx)
7264 {
7265 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
7266 | CPUMCTX_EXTRN_APIC_TPR);
7267 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
7268 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
7269 Log4Func(("Mov CR%u w/ iGReg=%#x\n", iCrReg, iGReg));
7270 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
7271 fDecodedInstr = true;
7272 }
7273 /* else: LMSW or CLTS instruction, fall back below to IEM for this. */
7274 }
7275
7276 if (!fDecodedInstr)
7277 {
7278 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7279 Log4Func(("iCrReg=%#x\n", iCrReg));
7280 rcStrict = IEMExecOne(pVCpu);
7281 if (RT_UNLIKELY( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
7282 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED))
7283 rcStrict = VERR_EM_INTERPRETER;
7284 }
7285
7286 if (rcStrict == VINF_SUCCESS)
7287 {
7288 switch (iCrReg)
7289 {
7290 case 0:
7291 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
7292 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
7293 break;
7294
7295 case 2:
7296 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2);
7297 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
7298 break;
7299
7300 case 3:
7301 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR3);
7302 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
7303 break;
7304
7305 case 4:
7306 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
7307 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
7308 break;
7309
7310 case 8:
7311 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7312 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
7313 break;
7314
7315 default:
7316 {
7317 AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x\n",
7318 pSvmTransient->u64ExitCode, iCrReg));
7319 break;
7320 }
7321 }
7322 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7323 }
7324 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7325 {
7326 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7327 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7328 rcStrict = VINF_SUCCESS;
7329 }
7330 else
7331 Assert(rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_PGM_SYNC_CR3);
7332 return rcStrict;
7333}
7334
7335
7336/**
7337 * \#VMEXIT helper for read MSRs, see hmR0SvmExitMsr.
7338 *
7339 * @returns Strict VBox status code.
7340 * @param pVCpu The cross context virtual CPU structure.
7341 * @param pVmcb Pointer to the VM control block.
7342 */
7343static VBOXSTRICTRC hmR0SvmExitReadMsr(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
7344{
7345 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
7346 Log4Func(("idMsr=%#RX32\n", pVCpu->cpum.GstCtx.ecx));
7347
7348 VBOXSTRICTRC rcStrict;
7349 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7350 if (fSupportsNextRipSave)
7351 {
7352 /** @todo Optimize this: Only retrieve the MSR bits we need here. CPUMAllMsrs.cpp
7353 * can ask for what it needs instead of using CPUMCTX_EXTRN_ALL_MSRS. */
7354 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7355 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7356 rcStrict = IEMExecDecodedRdmsr(pVCpu, cbInstr);
7357 }
7358 else
7359 {
7360 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7361 rcStrict = IEMExecOne(pVCpu);
7362 }
7363
7364 AssertMsg( rcStrict == VINF_SUCCESS
7365 || rcStrict == VINF_IEM_RAISED_XCPT
7366 || rcStrict == VINF_CPUM_R3_MSR_READ,
7367 ("hmR0SvmExitReadMsr: Unexpected status %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7368
7369 if (rcStrict == VINF_IEM_RAISED_XCPT)
7370 {
7371 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7372 rcStrict = VINF_SUCCESS;
7373 }
7374 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7375 return rcStrict;
7376}
7377
7378
7379/**
7380 * \#VMEXIT helper for write MSRs, see hmR0SvmExitMsr.
7381 *
7382 * @returns Strict VBox status code.
7383 * @param pVCpu The cross context virtual CPU structure.
7384 * @param pVmcb Pointer to the VM control block.
7385 * @param pSvmTransient Pointer to the SVM-transient structure.
7386 */
7387static VBOXSTRICTRC hmR0SvmExitWriteMsr(PVMCPUCC pVCpu, PSVMVMCB pVmcb, PSVMTRANSIENT pSvmTransient)
7388{
7389 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7390 uint32_t const idMsr = pCtx->ecx;
7391 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7392 Log4Func(("idMsr=%#RX32\n", idMsr));
7393
7394 /*
7395 * Handle TPR patching MSR writes.
7396 * We utilitize the LSTAR MSR for patching.
7397 */
7398 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7399 if ( idMsr == MSR_K8_LSTAR
7400 && pVCpu->CTX_SUFF(pVM)->hm.s.fTprPatchingActive)
7401 {
7402 unsigned cbInstr;
7403 if (fSupportsNextRipSave)
7404 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7405 else
7406 {
7407 PDISSTATE pDis = &pVCpu->hmr0.s.svm.Dis;
7408 int rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbInstr);
7409 if ( rc == VINF_SUCCESS
7410 && pDis->pCurInstr->uOpcode == OP_WRMSR)
7411 Assert(cbInstr > 0);
7412 else
7413 cbInstr = 0;
7414 }
7415
7416 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */
7417 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr)
7418 {
7419 int rc = APICSetTpr(pVCpu, pCtx->eax & 0xff);
7420 AssertRCReturn(rc, rc);
7421 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7422 }
7423
7424 int rc = VINF_SUCCESS;
7425 hmR0SvmAdvanceRip(pVCpu, cbInstr);
7426 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
7427 return rc;
7428 }
7429
7430 /*
7431 * Handle regular MSR writes.
7432 */
7433 VBOXSTRICTRC rcStrict;
7434 if (fSupportsNextRipSave)
7435 {
7436 /** @todo Optimize this: We don't need to get much of the MSR state here
7437 * since we're only updating. CPUMAllMsrs.cpp can ask for what it needs and
7438 * clear the applicable extern flags. */
7439 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7440 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7441 rcStrict = IEMExecDecodedWrmsr(pVCpu, cbInstr);
7442 }
7443 else
7444 {
7445 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
7446 rcStrict = IEMExecOne(pVCpu);
7447 }
7448
7449 AssertMsg( rcStrict == VINF_SUCCESS
7450 || rcStrict == VINF_IEM_RAISED_XCPT
7451 || rcStrict == VINF_CPUM_R3_MSR_WRITE,
7452 ("hmR0SvmExitWriteMsr: Unexpected status %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7453
7454 if (rcStrict == VINF_SUCCESS)
7455 {
7456 /* If this is an X2APIC WRMSR access, update the APIC TPR state. */
7457 if ( idMsr >= MSR_IA32_X2APIC_START
7458 && idMsr <= MSR_IA32_X2APIC_END)
7459 {
7460 /*
7461 * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest().
7462 * When full APIC register virtualization is implemented we'll have to make sure
7463 * APIC state is saved from the VMCB before IEM changes it.
7464 */
7465 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7466 }
7467 else
7468 {
7469 switch (idMsr)
7470 {
7471 case MSR_IA32_TSC: pSvmTransient->fUpdateTscOffsetting = true; break;
7472 case MSR_K6_EFER: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR); break;
7473 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
7474 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
7475 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7476 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7477 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7478 }
7479 }
7480 }
7481 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7482 {
7483 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7484 rcStrict = VINF_SUCCESS;
7485 }
7486 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7487 return rcStrict;
7488}
7489
7490
7491/**
7492 * \#VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional
7493 * \#VMEXIT.
7494 */
7495HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7496{
7497 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7498
7499 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7500 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ)
7501 return hmR0SvmExitReadMsr(pVCpu, pVmcb);
7502
7503 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE);
7504 return hmR0SvmExitWriteMsr(pVCpu, pVmcb, pSvmTransient);
7505}
7506
7507
7508/**
7509 * \#VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional \#VMEXIT.
7510 */
7511HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7512{
7513 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7514 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7515
7516 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
7517
7518 /** @todo Stepping with nested-guest. */
7519 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7520 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
7521 {
7522 /* We should -not- get this #VMEXIT if the guest's debug registers were active. */
7523 if (pSvmTransient->fWasGuestDebugStateActive)
7524 {
7525 AssertMsgFailed(("hmR0SvmExitReadDRx: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
7526 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
7527 return VERR_SVM_UNEXPECTED_EXIT;
7528 }
7529
7530 /*
7531 * Lazy DR0-3 loading.
7532 */
7533 if (!pSvmTransient->fWasHyperDebugStateActive)
7534 {
7535 Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
7536 Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
7537
7538 /* Don't intercept DRx read and writes. */
7539 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
7540 pVmcb->ctrl.u16InterceptRdDRx = 0;
7541 pVmcb->ctrl.u16InterceptWrDRx = 0;
7542 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
7543
7544 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
7545 VMMRZCallRing3Disable(pVCpu);
7546 HM_DISABLE_PREEMPT(pVCpu);
7547
7548 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
7549 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
7550 Assert(CPUMIsGuestDebugStateActive(pVCpu));
7551
7552 HM_RESTORE_PREEMPT();
7553 VMMRZCallRing3Enable(pVCpu);
7554
7555 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
7556 return VINF_SUCCESS;
7557 }
7558 }
7559
7560 /*
7561 * Interpret the read/writing of DRx.
7562 */
7563 /** @todo Decode assist. */
7564 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu);
7565 Log5(("hmR0SvmExitReadDRx: Emulated DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
7566 if (RT_LIKELY(rc == VINF_SUCCESS))
7567 {
7568 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
7569 /** @todo CPUM should set this flag! */
7570 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
7571 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
7572 }
7573 else
7574 Assert(rc == VERR_EM_INTERPRETER);
7575 return rc;
7576}
7577
7578
7579/**
7580 * \#VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional \#VMEXIT.
7581 */
7582HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7583{
7584 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7585 /* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
7586 VBOXSTRICTRC rc = hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
7587 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
7588 STAM_COUNTER_DEC(&pVCpu->hm.s.StatExitDRxRead);
7589 return rc;
7590}
7591
7592
7593/**
7594 * \#VMEXIT handler for XCRx write (SVM_EXIT_XSETBV). Conditional \#VMEXIT.
7595 */
7596HMSVM_EXIT_DECL hmR0SvmExitXsetbv(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7597{
7598 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7599 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7600
7601 /** @todo decode assists... */
7602 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7603 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7604 {
7605 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7606 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7607 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], fLoadSaveGuestXcr0, pCtx->cr4));
7608 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7609 {
7610 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7611 hmR0SvmUpdateVmRunFunction(pVCpu);
7612 }
7613 }
7614 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7615 {
7616 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7617 rcStrict = VINF_SUCCESS;
7618 }
7619 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7620 return rcStrict;
7621}
7622
7623
7624/**
7625 * \#VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional \#VMEXIT.
7626 */
7627HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7628{
7629 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7630 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK);
7631
7632 /* I/O operation lookup arrays. */
7633 static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
7634 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
7635 the result (in AL/AX/EAX). */
7636 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7637 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7638 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7639
7640 Log4Func(("CS:RIP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
7641
7642 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
7643 SVMIOIOEXITINFO IoExitInfo;
7644 IoExitInfo.u = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
7645 uint32_t uIOWidth = (IoExitInfo.u >> 4) & 0x7;
7646 uint32_t cbValue = s_aIOSize[uIOWidth];
7647 uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
7648
7649 if (RT_UNLIKELY(!cbValue))
7650 {
7651 AssertMsgFailed(("hmR0SvmExitIOInstr: Invalid IO operation. uIOWidth=%u\n", uIOWidth));
7652 return VERR_EM_INTERPRETER;
7653 }
7654
7655 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
7656 VBOXSTRICTRC rcStrict;
7657 PCEMEXITREC pExitRec = NULL;
7658 if ( !pVCpu->hm.s.fSingleInstruction
7659 && !pVCpu->cpum.GstCtx.eflags.Bits.u1TF)
7660 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7661 !IoExitInfo.n.u1Str
7662 ? IoExitInfo.n.u1Type == SVM_IOIO_READ
7663 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
7664 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
7665 : IoExitInfo.n.u1Type == SVM_IOIO_READ
7666 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
7667 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
7668 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7669 if (!pExitRec)
7670 {
7671 bool fUpdateRipAlready = false;
7672 if (IoExitInfo.n.u1Str)
7673 {
7674 /* INS/OUTS - I/O String instruction. */
7675 /** @todo Huh? why can't we use the segment prefix information given by AMD-V
7676 * in EXITINFO1? Investigate once this thing is up and running. */
7677 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, IoExitInfo.n.u16Port, cbValue,
7678 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? 'w' : 'r'));
7679 AssertReturn(pCtx->dx == IoExitInfo.n.u16Port, VERR_SVM_IPE_2);
7680 static IEMMODE const s_aenmAddrMode[8] =
7681 {
7682 (IEMMODE)-1, IEMMODE_16BIT, IEMMODE_32BIT, (IEMMODE)-1, IEMMODE_64BIT, (IEMMODE)-1, (IEMMODE)-1, (IEMMODE)-1
7683 };
7684 IEMMODE enmAddrMode = s_aenmAddrMode[(IoExitInfo.u >> 7) & 0x7];
7685 if (enmAddrMode != (IEMMODE)-1)
7686 {
7687 uint64_t cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip;
7688 if (cbInstr <= 15 && cbInstr >= 1)
7689 {
7690 Assert(cbInstr >= 1U + IoExitInfo.n.u1Rep);
7691 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
7692 {
7693 /* Don't know exactly how to detect whether u3Seg is valid, currently
7694 only enabling it for Bulldozer and later with NRIP. OS/2 broke on
7695 2384 Opterons when only checking NRIP. */
7696 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7697 if ( fSupportsNextRipSave
7698 && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First)
7699 {
7700 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_DS || cbInstr > 1U + IoExitInfo.n.u1Rep,
7701 ("u32Seg=%d cbInstr=%d u1REP=%d", IoExitInfo.n.u3Seg, cbInstr, IoExitInfo.n.u1Rep));
7702 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
7703 IoExitInfo.n.u3Seg, true /*fIoChecked*/);
7704 }
7705 else if (cbInstr == 1U + IoExitInfo.n.u1Rep)
7706 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
7707 X86_SREG_DS, true /*fIoChecked*/);
7708 else
7709 rcStrict = IEMExecOne(pVCpu);
7710 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
7711 }
7712 else
7713 {
7714 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_ES /*=0*/, ("%#x\n", IoExitInfo.n.u3Seg));
7715 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
7716 true /*fIoChecked*/);
7717 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
7718 }
7719 }
7720 else
7721 {
7722 AssertMsgFailed(("rip=%RX64 nrip=%#RX64 cbInstr=%#RX64\n", pCtx->rip, pVmcb->ctrl.u64ExitInfo2, cbInstr));
7723 rcStrict = IEMExecOne(pVCpu);
7724 }
7725 }
7726 else
7727 {
7728 AssertMsgFailed(("IoExitInfo=%RX64\n", IoExitInfo.u));
7729 rcStrict = IEMExecOne(pVCpu);
7730 }
7731 fUpdateRipAlready = true;
7732 if (rcStrict == VINF_IEM_RAISED_XCPT)
7733 {
7734 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7735 rcStrict = VINF_SUCCESS;
7736 }
7737 }
7738 else
7739 {
7740 /* IN/OUT - I/O instruction. */
7741 Assert(!IoExitInfo.n.u1Rep);
7742
7743 uint8_t const cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip;
7744 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
7745 {
7746 rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue);
7747 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
7748 && !pCtx->eflags.Bits.u1TF)
7749 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, IoExitInfo.n.u16Port, cbInstr, cbValue, pCtx->eax & uAndVal);
7750 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
7751 }
7752 else
7753 {
7754 rcStrict = VERR_GCM_NOT_HANDLED;
7755 if (GCMIsInterceptingIOPortRead(pVCpu, IoExitInfo.n.u16Port, cbValue))
7756 {
7757 rcStrict = GCMInterceptedIOPortRead(pVCpu, pCtx, IoExitInfo.n.u16Port, cbValue);
7758 if (rcStrict == VINF_GCM_HANDLED_ADVANCE_RIP || rcStrict == VINF_GCM_HANDLED)
7759 {
7760 fUpdateRipAlready = rcStrict != VINF_GCM_HANDLED_ADVANCE_RIP;
7761 rcStrict = VINF_SUCCESS;
7762 }
7763 else
7764 Assert(rcStrict == VERR_GCM_NOT_HANDLED);
7765 }
7766
7767 if (RT_LIKELY(rcStrict == VERR_GCM_NOT_HANDLED))
7768 {
7769 uint32_t u32Val = 0;
7770 rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue);
7771 if (IOM_SUCCESS(rcStrict))
7772 {
7773 /* Save result of I/O IN instr. in AL/AX/EAX. */
7774 /** @todo r=bird: 32-bit op size should clear high bits of rax! */
7775 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
7776 }
7777 else if ( rcStrict == VINF_IOM_R3_IOPORT_READ
7778 && !pCtx->eflags.Bits.u1TF)
7779 rcStrict = EMRZSetPendingIoPortRead(pVCpu, IoExitInfo.n.u16Port, cbInstr, cbValue);
7780 }
7781
7782 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
7783 }
7784 }
7785
7786 if (IOM_SUCCESS(rcStrict))
7787 {
7788 /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */
7789 if (!fUpdateRipAlready)
7790 pCtx->rip = pVmcb->ctrl.u64ExitInfo2;
7791
7792 /*
7793 * If any I/O breakpoints are armed, we need to check if one triggered
7794 * and take appropriate action.
7795 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
7796 */
7797 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
7798 * execution engines about whether hyper BPs and such are pending. */
7799 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);
7800 uint32_t const uDr7 = pCtx->dr[7];
7801 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
7802 && X86_DR7_ANY_RW_IO(uDr7)
7803 && (pCtx->cr4 & X86_CR4_DE))
7804 || DBGFBpIsHwIoArmed(pVM)))
7805 {
7806 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
7807 VMMRZCallRing3Disable(pVCpu);
7808 HM_DISABLE_PREEMPT(pVCpu);
7809
7810 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
7811 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/);
7812
7813 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, &pVCpu->cpum.GstCtx, IoExitInfo.n.u16Port, cbValue);
7814 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
7815 {
7816 /* Raise #DB. */
7817 pVmcb->guest.u64DR6 = pCtx->dr[6];
7818 pVmcb->guest.u64DR7 = pCtx->dr[7];
7819 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
7820 hmR0SvmSetPendingXcptDB(pVCpu);
7821 }
7822 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
7823 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
7824 else if ( rcStrict2 != VINF_SUCCESS
7825 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
7826 rcStrict = rcStrict2;
7827 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
7828
7829 HM_RESTORE_PREEMPT();
7830 VMMRZCallRing3Enable(pVCpu);
7831 }
7832
7833 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7834 }
7835#ifdef VBOX_STRICT
7836 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
7837 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
7838 Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ);
7839 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
7840 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
7841 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
7842 Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE);
7843 else
7844 {
7845 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
7846 * statuses, that the VMM device and some others may return. See
7847 * IOM_SUCCESS() for guidance. */
7848 AssertMsg( RT_FAILURE(rcStrict)
7849 || rcStrict == VINF_SUCCESS
7850 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
7851 || rcStrict == VINF_EM_DBG_BREAKPOINT
7852 || rcStrict == VINF_EM_RAW_GUEST_TRAP
7853 || rcStrict == VINF_EM_DBG_STEPPED
7854 || rcStrict == VINF_EM_RAW_TO_R3
7855 || rcStrict == VINF_EM_TRIPLE_FAULT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7856 }
7857#endif
7858 }
7859 else
7860 {
7861 /*
7862 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7863 */
7864 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7865 STAM_COUNTER_INC(!IoExitInfo.n.u1Str
7866 ? IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
7867 : IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
7868 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
7869 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IoExitInfo.n.u1Rep ? "REP " : "",
7870 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? "OUT" : "IN", IoExitInfo.n.u1Str ? "S" : "", IoExitInfo.n.u16Port, uIOWidth));
7871
7872 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7873 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
7874
7875 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7876 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7877 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7878 }
7879 return rcStrict;
7880}
7881
7882
7883/**
7884 * \#VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional \#VMEXIT.
7885 */
7886HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7887{
7888 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7889 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7890 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7891
7892 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7893 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7894 Assert(pVM->hmr0.s.fNestedPaging);
7895
7896 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
7897 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7898 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
7899 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1; /* Note! High bits in EXITINFO1 may contain additional info and are
7900 thus intentionally not copied into u32ErrCode. */
7901
7902 Log4Func(("#NPF at CS:RIP=%04x:%RX64 GCPhysFaultAddr=%RGp ErrCode=%#x cbInstrFetched=%u %.15Rhxs\n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr,
7903 u32ErrCode, pVmcb->ctrl.cbInstrFetched, pVmcb->ctrl.abInstr));
7904
7905 /*
7906 * TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions.
7907 */
7908 if ( pVM->hm.s.fTprPatchingAllowed
7909 && (GCPhysFaultAddr & GUEST_PAGE_OFFSET_MASK) == XAPIC_OFF_TPR
7910 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */
7911 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */
7912 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
7913 && !CPUMIsGuestInLongModeEx(pCtx)
7914 && !CPUMGetGuestCPL(pVCpu)
7915 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
7916 {
7917 RTGCPHYS GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
7918 GCPhysApicBase &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7919
7920 if (GCPhysFaultAddr == GCPhysApicBase + XAPIC_OFF_TPR)
7921 {
7922 /* Only attempt to patch the instruction once. */
7923 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
7924 if (!pPatch)
7925 return VINF_EM_HM_PATCH_TPR_INSTR;
7926 }
7927 }
7928
7929 /*
7930 * Determine the nested paging mode.
7931 */
7932/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
7933 PGMMODE const enmNestedPagingMode = PGMGetHostMode(pVM);
7934
7935 /*
7936 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
7937 */
7938 Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
7939 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
7940 {
7941 /*
7942 * If event delivery causes an MMIO #NPF, go back to instruction emulation as otherwise
7943 * injecting the original pending event would most likely cause the same MMIO #NPF.
7944 */
7945 if (pVCpu->hm.s.Event.fPending)
7946 {
7947 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
7948 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7949 }
7950
7951 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
7952 VBOXSTRICTRC rcStrict;
7953 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7954 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
7955 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7956 if (!pExitRec)
7957 {
7958
7959 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, pCtx, GCPhysFaultAddr, u32ErrCode);
7960
7961 /*
7962 * If we succeed, resume guest execution.
7963 *
7964 * If we fail in interpreting the instruction because we couldn't get the guest
7965 * physical address of the page containing the instruction via the guest's page
7966 * tables (we would invalidate the guest page in the host TLB), resume execution
7967 * which would cause a guest page fault to let the guest handle this weird case.
7968 *
7969 * See @bugref{6043}.
7970 */
7971 if ( rcStrict == VINF_SUCCESS
7972 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
7973 || rcStrict == VERR_PAGE_NOT_PRESENT)
7974 {
7975 /* Successfully handled MMIO operation. */
7976 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7977 rcStrict = VINF_SUCCESS;
7978 }
7979 }
7980 else
7981 {
7982 /*
7983 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7984 */
7985 Assert(pCtx == &pVCpu->cpum.GstCtx);
7986 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7987 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
7988 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysFaultAddr));
7989
7990 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7991 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
7992
7993 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7994 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7995 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7996 }
7997 return rcStrict;
7998 }
7999
8000 /*
8001 * Nested page-fault.
8002 */
8003 TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode);
8004 int rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, pCtx, GCPhysFaultAddr);
8005 TRPMResetTrap(pVCpu);
8006
8007 Log4Func(("#NPF: PGMR0Trap0eHandlerNestedPaging returns %Rrc CS:RIP=%04x:%RX64\n", rc, pCtx->cs.Sel, pCtx->rip));
8008
8009 /*
8010 * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
8011 */
8012 if ( rc == VINF_SUCCESS
8013 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8014 || rc == VERR_PAGE_NOT_PRESENT)
8015 {
8016 /* We've successfully synced our shadow page tables. */
8017 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
8018 rc = VINF_SUCCESS;
8019 }
8020
8021 /*
8022 * If delivering an event causes an #NPF (and not MMIO), we shall resolve the fault and
8023 * re-inject the original event.
8024 */
8025 if (pVCpu->hm.s.Event.fPending)
8026 {
8027 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflectNPF);
8028
8029 /*
8030 * If the #NPF handler requested emulation of the instruction, ignore it.
8031 * We need to re-inject the original event so as to not lose it.
8032 * Reproducible when booting ReactOS 0.4.12 with BTRFS (installed using BootCD,
8033 * LiveCD is broken for other reasons).
8034 */
8035 if (rc == VINF_EM_RAW_EMULATE_INSTR)
8036 rc = VINF_EM_RAW_INJECT_TRPM_EVENT;
8037 }
8038
8039 return rc;
8040}
8041
8042
8043/**
8044 * \#VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional
8045 * \#VMEXIT.
8046 */
8047HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8048{
8049 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8050 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
8051
8052 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
8053 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8054 hmR0SvmClearIntWindowExiting(pVCpu, pVmcb);
8055
8056 /* Deliver the pending interrupt via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
8057 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
8058 return VINF_SUCCESS;
8059}
8060
8061
8062/**
8063 * \#VMEXIT handler for task switches (SVM_EXIT_TASK_SWITCH). Conditional
8064 * \#VMEXIT.
8065 */
8066HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8067{
8068 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8069 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8070
8071#ifndef HMSVM_ALWAYS_TRAP_TASK_SWITCH
8072 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8073#endif
8074
8075 /* Check if this task-switch occurred while delivering an event through the guest IDT. */
8076 if (pVCpu->hm.s.Event.fPending) /* Can happen with exceptions/NMI. See @bugref{8411}. */
8077 {
8078 /*
8079 * AMD-V provides us with the exception which caused the TS; we collect
8080 * the information in the call to hmR0SvmCheckExitDueToEventDelivery().
8081 */
8082 Log4Func(("TS occurred during event delivery\n"));
8083 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8084 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8085 }
8086
8087 /** @todo Emulate task switch someday, currently just going back to ring-3 for
8088 * emulation. */
8089 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8090 return VERR_EM_INTERPRETER;
8091}
8092
8093
8094/**
8095 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
8096 */
8097HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8098{
8099 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8100 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8101
8102 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8103 if (pVM->hm.s.fTprPatchingAllowed)
8104 {
8105 int rc = hmEmulateSvmMovTpr(pVM, pVCpu);
8106 if (rc != VERR_NOT_FOUND)
8107 {
8108 Log4Func(("hmEmulateSvmMovTpr returns %Rrc\n", rc));
8109 return rc;
8110 }
8111 }
8112
8113 if (EMAreHypercallInstructionsEnabled(pVCpu))
8114 {
8115 unsigned cbInstr;
8116 if (hmR0SvmSupportsNextRipSave(pVCpu))
8117 {
8118 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8119 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8120 }
8121 else
8122 {
8123 PDISSTATE pDis = &pVCpu->hmr0.s.svm.Dis;
8124 int rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbInstr);
8125 if ( rc == VINF_SUCCESS
8126 && pDis->pCurInstr->uOpcode == OP_VMMCALL)
8127 Assert(cbInstr > 0);
8128 else
8129 cbInstr = 0;
8130 }
8131
8132 VBOXSTRICTRC rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8133 if (RT_SUCCESS(rcStrict))
8134 {
8135 /* Only update the RIP if we're continuing guest execution and not in the case
8136 of say VINF_GIM_R3_HYPERCALL. */
8137 if (rcStrict == VINF_SUCCESS)
8138 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8139
8140 return VBOXSTRICTRC_VAL(rcStrict);
8141 }
8142 else
8143 Log4Func(("GIMHypercall returns %Rrc -> #UD\n", VBOXSTRICTRC_VAL(rcStrict)));
8144 }
8145
8146 hmR0SvmSetPendingXcptUD(pVCpu);
8147 return VINF_SUCCESS;
8148}
8149
8150
8151/**
8152 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
8153 */
8154HMSVM_EXIT_DECL hmR0SvmExitPause(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8155{
8156 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8157
8158 unsigned cbInstr;
8159 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8160 if (fSupportsNextRipSave)
8161 {
8162 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8163 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8164 }
8165 else
8166 {
8167 PDISSTATE pDis = &pVCpu->hmr0.s.svm.Dis;
8168 int rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbInstr);
8169 if ( rc == VINF_SUCCESS
8170 && pDis->pCurInstr->uOpcode == OP_PAUSE)
8171 Assert(cbInstr > 0);
8172 else
8173 cbInstr = 0;
8174 }
8175
8176 /** @todo The guest has likely hit a contended spinlock. We might want to
8177 * poke a schedule different guest VCPU. */
8178 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8179 return VINF_EM_RAW_INTERRUPT;
8180}
8181
8182
8183/**
8184 * \#VMEXIT handler for FERR intercept (SVM_EXIT_FERR_FREEZE). Conditional
8185 * \#VMEXIT.
8186 */
8187HMSVM_EXIT_DECL hmR0SvmExitFerrFreeze(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8188{
8189 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8190 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);
8191 Assert(!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE));
8192
8193 Log4Func(("Raising IRQ 13 in response to #FERR\n"));
8194 return PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
8195}
8196
8197
8198/**
8199 * \#VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional \#VMEXIT.
8200 */
8201HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8202{
8203 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8204
8205 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now (almost) ready. */
8206 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8207 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_IRET);
8208
8209 /* Emulate the IRET. We have to execute the IRET before an NMI, but must potentially
8210 * deliver a pending NMI right after. If the IRET faults, an NMI can come before the
8211 * handler executes. Yes, x86 is ugly.
8212 */
8213 return VINF_EM_RAW_EMULATE_INSTR;
8214}
8215
8216
8217/**
8218 * \#VMEXIT handler for page-fault exceptions (SVM_EXIT_XCPT_14).
8219 * Conditional \#VMEXIT.
8220 */
8221HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8222{
8223 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8224 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8225 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8226
8227 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
8228 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8229 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8230 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8231 uint32_t uErrCode = pVmcb->ctrl.u64ExitInfo1;
8232 uint64_t const uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
8233
8234#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(HMSVM_ALWAYS_TRAP_PF)
8235 if (pVM->hmr0.s.fNestedPaging)
8236 {
8237 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
8238 if ( !pSvmTransient->fVectoringDoublePF
8239 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
8240 {
8241 /* A genuine guest #PF, reflect it to the guest. */
8242 hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress);
8243 Log4Func(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RX64 ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip,
8244 uFaultAddress, uErrCode));
8245 }
8246 else
8247 {
8248 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8249 hmR0SvmSetPendingXcptDF(pVCpu);
8250 Log4Func(("Pending #DF due to vectoring #PF. NP\n"));
8251 }
8252 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8253 return VINF_SUCCESS;
8254 }
8255#endif
8256
8257 Assert(!pVM->hmr0.s.fNestedPaging);
8258
8259 /*
8260 * TPR patching shortcut for APIC TPR reads and writes; only applicable to 32-bit guests.
8261 */
8262 if ( pVM->hm.s.fTprPatchingAllowed
8263 && (uFaultAddress & 0xfff) == XAPIC_OFF_TPR
8264 && !(uErrCode & X86_TRAP_PF_P) /* Not present. */
8265 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
8266 && !CPUMIsGuestInLongModeEx(pCtx)
8267 && !CPUMGetGuestCPL(pVCpu)
8268 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
8269 {
8270 RTGCPHYS GCPhysApicBase;
8271 GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
8272 GCPhysApicBase &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8273
8274 /* Check if the page at the fault-address is the APIC base. */
8275 PGMPTWALK Walk;
8276 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, &Walk);
8277 if ( rc2 == VINF_SUCCESS
8278 && Walk.GCPhys == GCPhysApicBase)
8279 {
8280 /* Only attempt to patch the instruction once. */
8281 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
8282 if (!pPatch)
8283 return VINF_EM_HM_PATCH_TPR_INSTR;
8284 }
8285 }
8286
8287 Log4Func(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
8288 pCtx->rip, uErrCode, pCtx->cr3));
8289
8290 /*
8291 * If it's a vectoring #PF, emulate injecting the original event injection as
8292 * PGMTrap0eHandler() is incapable of differentiating between instruction emulation and
8293 * event injection that caused a #PF. See @bugref{6607}.
8294 */
8295 if (pSvmTransient->fVectoringPF)
8296 {
8297 Assert(pVCpu->hm.s.Event.fPending);
8298 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8299 }
8300
8301 TRPMAssertXcptPF(pVCpu, uFaultAddress, uErrCode);
8302 int rc = PGMTrap0eHandler(pVCpu, uErrCode, pCtx, (RTGCPTR)uFaultAddress);
8303
8304 Log4Func(("#PF: rc=%Rrc\n", rc));
8305
8306 if (rc == VINF_SUCCESS)
8307 {
8308 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
8309 TRPMResetTrap(pVCpu);
8310 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
8311 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
8312 return rc;
8313 }
8314
8315 if (rc == VINF_EM_RAW_GUEST_TRAP)
8316 {
8317 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
8318
8319 /*
8320 * If a nested-guest delivers a #PF and that causes a #PF which is -not- a shadow #PF,
8321 * we should simply forward the #PF to the guest and is up to the nested-hypervisor to
8322 * determine whether it is a nested-shadow #PF or a #DF, see @bugref{7243#c121}.
8323 */
8324 if ( !pSvmTransient->fVectoringDoublePF
8325 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
8326 {
8327 /* It's a guest (or nested-guest) page fault and needs to be reflected. */
8328 uErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */
8329 TRPMResetTrap(pVCpu);
8330
8331#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
8332 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
8333 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
8334 && CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
8335 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_XCPT_PF, uErrCode, uFaultAddress);
8336#endif
8337
8338 hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress);
8339 }
8340 else
8341 {
8342 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8343 TRPMResetTrap(pVCpu);
8344 hmR0SvmSetPendingXcptDF(pVCpu);
8345 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
8346 }
8347
8348 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8349 return VINF_SUCCESS;
8350 }
8351
8352 TRPMResetTrap(pVCpu);
8353 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
8354 return rc;
8355}
8356
8357
8358
8359/**
8360 * \#VMEXIT handler for division overflow exceptions (SVM_EXIT_XCPT_1).
8361 * Conditional \#VMEXIT.
8362 */
8363HMSVM_EXIT_DECL hmR0SvmExitXcptDE(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8364{
8365 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8366 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
8367 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
8368
8369 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
8370 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8371 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
8372
8373 int rc = VERR_SVM_UNEXPECTED_XCPT_EXIT;
8374 if (pVCpu->hm.s.fGCMTrapXcptDE)
8375 {
8376 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8377 rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
8378 AssertMsg(rc == VINF_SUCCESS /* restart */ || rc == VERR_NOT_FOUND /* deliver exception */, ("rc=%Rrc\n", rc));
8379 }
8380
8381 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
8382 if (RT_FAILURE(rc))
8383 hmR0SvmSetPendingXcptDE(pVCpu);
8384
8385 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
8386 return VINF_SUCCESS;
8387}
8388
8389
8390/**
8391 * \#VMEXIT handler for undefined opcode (SVM_EXIT_XCPT_6).
8392 * Conditional \#VMEXIT.
8393 */
8394HMSVM_EXIT_DECL hmR0SvmExitXcptUD(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8395{
8396 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8397 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
8398 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8399
8400 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
8401 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
8402 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
8403
8404 /** @todo if we accumulate more optional stuff here, we ought to combine the
8405 * reading of opcode bytes to avoid doing more than once. */
8406
8407 VBOXSTRICTRC rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
8408 if (pVCpu->hm.s.fGIMTrapXcptUD)
8409 {
8410 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8411 uint8_t cbInstr = 0;
8412 rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
8413 if (rcStrict == VINF_SUCCESS)
8414 {
8415 /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */
8416 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8417 rcStrict = VINF_SUCCESS;
8418 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
8419 }
8420 else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
8421 rcStrict = VINF_SUCCESS;
8422 else if (rcStrict == VINF_GIM_R3_HYPERCALL)
8423 rcStrict = VINF_GIM_R3_HYPERCALL;
8424 else
8425 {
8426 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
8427 rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
8428 }
8429 }
8430
8431 if (pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
8432 {
8433 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
8434 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
8435 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
8436 {
8437 /* Ideally, IEM should just handle all these special #UD situations, but
8438 we don't quite trust things to behave optimially when doing that. So,
8439 for now we'll restrict ourselves to a handful of possible sysenter and
8440 sysexit encodings that we filter right here. */
8441 uint8_t abInstr[SVM_CTRL_GUEST_INSTR_BYTES_MAX];
8442 uint8_t cbInstr = pVmcb->ctrl.cbInstrFetched;
8443 uint32_t const uCpl = CPUMGetGuestCPL(pVCpu);
8444 uint8_t const cbMin = uCpl != 0 ? 2 : 1 + 2;
8445 RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
8446 if (cbInstr < cbMin || cbInstr > SVM_CTRL_GUEST_INSTR_BYTES_MAX)
8447 {
8448 cbInstr = cbMin;
8449 int rc2 = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, GCPtrInstr, cbInstr);
8450 AssertRCStmt(rc2, cbInstr = 0);
8451 }
8452 else
8453 memcpy(abInstr, pVmcb->ctrl.abInstr, cbInstr); /* unlikely */
8454 if ( cbInstr == 0 /* read error */
8455 || (cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x34) /* sysenter */
8456 || ( uCpl == 0
8457 && ( ( cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x35) /* sysexit */
8458 || ( cbInstr >= 3 && abInstr[1] == 0x0f && abInstr[2] == 0x35 /* rex.w sysexit */
8459 && (abInstr[0] & (X86_OP_REX_W | 0xf0)) == X86_OP_REX_W))))
8460 {
8461 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK
8462 | CPUMCTX_EXTRN_SREG_MASK /* without ES+DS+GS the app will #GP later - go figure */);
8463 Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
8464 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, GCPtrInstr, abInstr, cbInstr);
8465 Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: rcStrict=%Rrc %04x:%08RX64 %08RX64 %04x:%08RX64\n",
8466 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u,
8467 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp));
8468 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8469 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); /** @todo Lazy bird. */
8470 if (rcStrict == VINF_IEM_RAISED_XCPT)
8471 rcStrict = VINF_SUCCESS;
8472 return rcStrict;
8473 }
8474 Log6(("hmR0SvmExitXcptUD: not sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
8475 }
8476 else
8477 Log6(("hmR0SvmExitXcptUD: not in long mode at %04x:%llx\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8478 }
8479
8480 /* If the GIM #UD exception handler didn't succeed for some reason or wasn't needed, raise #UD. */
8481 if (RT_FAILURE(rcStrict))
8482 {
8483 hmR0SvmSetPendingXcptUD(pVCpu);
8484 rcStrict = VINF_SUCCESS;
8485 }
8486
8487 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8488 return rcStrict;
8489}
8490
8491
8492/**
8493 * \#VMEXIT handler for math-fault exceptions (SVM_EXIT_XCPT_16).
8494 * Conditional \#VMEXIT.
8495 */
8496HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8497{
8498 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8499 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8500 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8501
8502 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8503 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8504
8505 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
8506 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
8507
8508 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8509
8510 if (!(pCtx->cr0 & X86_CR0_NE))
8511 {
8512 PDISSTATE pDis = &pVCpu->hmr0.s.svm.Dis;
8513 unsigned cbInstr;
8514 int rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbInstr);
8515 if (RT_SUCCESS(rc))
8516 {
8517 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
8518 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
8519 if (RT_SUCCESS(rc))
8520 hmR0SvmAdvanceRip(pVCpu, cbInstr);
8521 }
8522 else
8523 Log4Func(("EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
8524 return rc;
8525 }
8526
8527 hmR0SvmSetPendingXcptMF(pVCpu);
8528 return VINF_SUCCESS;
8529}
8530
8531
8532/**
8533 * \#VMEXIT handler for debug exceptions (SVM_EXIT_XCPT_1). Conditional
8534 * \#VMEXIT.
8535 */
8536HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8537{
8538 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8539 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8540 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8541 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8542
8543 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
8544 {
8545 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
8546 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8547 }
8548
8549 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8550
8551 /*
8552 * This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data
8553 * breakpoint). However, for both cases DR6 and DR7 are updated to what the exception
8554 * handler expects. See AMD spec. 15.12.2 "#DB (Debug)".
8555 */
8556 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8557 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
8558 int rc = DBGFTrap01Handler(pVM, pVCpu, &pVCpu->cpum.GstCtx, pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
8559 if (rc == VINF_EM_RAW_GUEST_TRAP)
8560 {
8561 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
8562 if (CPUMIsHyperDebugStateActive(pVCpu))
8563 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
8564
8565 /* Reflect the exception back to the guest. */
8566 hmR0SvmSetPendingXcptDB(pVCpu);
8567 rc = VINF_SUCCESS;
8568 }
8569
8570 /*
8571 * Update DR6.
8572 */
8573 if (CPUMIsHyperDebugStateActive(pVCpu))
8574 {
8575 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
8576 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
8577 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
8578 }
8579 else
8580 {
8581 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
8582 Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu));
8583 }
8584
8585 return rc;
8586}
8587
8588
8589/**
8590 * \#VMEXIT handler for alignment check exceptions (SVM_EXIT_XCPT_17).
8591 * Conditional \#VMEXIT.
8592 */
8593HMSVM_EXIT_DECL hmR0SvmExitXcptAC(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8594{
8595 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8596 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8597 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC);
8598
8599 SVMEVENT Event;
8600 Event.u = 0;
8601 Event.n.u1Valid = 1;
8602 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8603 Event.n.u8Vector = X86_XCPT_AC;
8604 Event.n.u1ErrorCodeValid = 1;
8605 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8606 return VINF_SUCCESS;
8607}
8608
8609
8610/**
8611 * \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_XCPT_3).
8612 * Conditional \#VMEXIT.
8613 */
8614HMSVM_EXIT_DECL hmR0SvmExitXcptBP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8615{
8616 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8617 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
8618 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8619 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
8620
8621 VBOXSTRICTRC rc = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
8622 if (rc == VINF_EM_RAW_GUEST_TRAP)
8623 {
8624 SVMEVENT Event;
8625 Event.u = 0;
8626 Event.n.u1Valid = 1;
8627 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8628 Event.n.u8Vector = X86_XCPT_BP;
8629 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8630 rc = VINF_SUCCESS;
8631 }
8632
8633 Assert(rc == VINF_SUCCESS || rc == VINF_EM_DBG_BREAKPOINT);
8634 return rc;
8635}
8636
8637
8638/**
8639 * Hacks its way around the lovely mesa driver's backdoor accesses.
8640 *
8641 * @sa hmR0VmxHandleMesaDrvGp
8642 */
8643static int hmR0SvmHandleMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb)
8644{
8645 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK);
8646 Log(("hmR0SvmHandleMesaDrvGp: at %04x:%08RX64 rcx=%RX64 rbx=%RX64\n",
8647 pVmcb->guest.CS.u16Sel, pVmcb->guest.u64RIP, pCtx->rcx, pCtx->rbx));
8648 RT_NOREF(pCtx, pVmcb);
8649
8650 /* For now we'll just skip the instruction. */
8651 hmR0SvmAdvanceRip(pVCpu, 1);
8652 return VINF_SUCCESS;
8653}
8654
8655
8656/**
8657 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
8658 * backdoor logging w/o checking what it is running inside.
8659 *
8660 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
8661 * backdoor port and magic numbers loaded in registers.
8662 *
8663 * @returns true if it is, false if it isn't.
8664 * @sa hmR0VmxIsMesaDrvGp
8665 */
8666DECLINLINE(bool) hmR0SvmIsMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb)
8667{
8668 /* Check magic and port. */
8669 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
8670 /*Log8(("hmR0SvmIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->fExtrn & CPUMCTX_EXTRN_RAX ? pVmcb->guest.u64RAX : pCtx->rax, pCtx->rdx));*/
8671 if (pCtx->dx != UINT32_C(0x5658))
8672 return false;
8673 if ((pCtx->fExtrn & CPUMCTX_EXTRN_RAX ? pVmcb->guest.u64RAX : pCtx->rax) != UINT32_C(0x564d5868))
8674 return false;
8675
8676 /* Check that it is #GP(0). */
8677 if (pVmcb->ctrl.u64ExitInfo1 != 0)
8678 return false;
8679
8680 /* Flat ring-3 CS. */
8681 /*Log8(("hmR0SvmIsMesaDrvGp: u8CPL=%d base=%RX64\n", pVmcb->guest.u8CPL, pCtx->fExtrn & CPUMCTX_EXTRN_CS ? pVmcb->guest.CS.u64Base : pCtx->cs.u64Base));*/
8682 if (pVmcb->guest.u8CPL != 3)
8683 return false;
8684 if ((pCtx->fExtrn & CPUMCTX_EXTRN_CS ? pVmcb->guest.CS.u64Base : pCtx->cs.u64Base) != 0)
8685 return false;
8686
8687 /* 0xed: IN eAX,dx */
8688 if (pVmcb->ctrl.cbInstrFetched < 1) /* unlikely, it turns out. */
8689 {
8690 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_GPRS_MASK
8691 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
8692 uint8_t abInstr[1];
8693 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
8694 /*Log8(("hmR0SvmIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0])); */
8695 if (RT_FAILURE(rc))
8696 return false;
8697 if (abInstr[0] != 0xed)
8698 return false;
8699 }
8700 else
8701 {
8702 /*Log8(("hmR0SvmIsMesaDrvGp: %#x\n", pVmcb->ctrl.abInstr));*/
8703 if (pVmcb->ctrl.abInstr[0] != 0xed)
8704 return false;
8705 }
8706 return true;
8707}
8708
8709
8710/**
8711 * \#VMEXIT handler for general protection faults (SVM_EXIT_XCPT_BP).
8712 * Conditional \#VMEXIT.
8713 */
8714HMSVM_EXIT_DECL hmR0SvmExitXcptGP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8715{
8716 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8717 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8718 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
8719
8720 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8721 Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode);
8722
8723 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8724 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
8725 || !hmR0SvmIsMesaDrvGp(pVCpu, pCtx, pVmcb))
8726 {
8727 SVMEVENT Event;
8728 Event.u = 0;
8729 Event.n.u1Valid = 1;
8730 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8731 Event.n.u8Vector = X86_XCPT_GP;
8732 Event.n.u1ErrorCodeValid = 1;
8733 Event.n.u32ErrorCode = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
8734 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8735 return VINF_SUCCESS;
8736 }
8737 return hmR0SvmHandleMesaDrvGp(pVCpu, pCtx, pVmcb);
8738}
8739
8740
8741/**
8742 * \#VMEXIT handler for generic exceptions. Conditional \#VMEXIT.
8743 */
8744HMSVM_EXIT_DECL hmR0SvmExitXcptGeneric(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8745{
8746 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8747 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8748
8749 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8750 uint8_t const uVector = pVmcb->ctrl.u64ExitCode - SVM_EXIT_XCPT_0;
8751 uint32_t const uErrCode = pVmcb->ctrl.u64ExitInfo1;
8752 Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode);
8753 Assert(uVector <= X86_XCPT_LAST);
8754 Log4Func(("uVector=%#x uErrCode=%u\n", uVector, uErrCode));
8755
8756 SVMEVENT Event;
8757 Event.u = 0;
8758 Event.n.u1Valid = 1;
8759 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8760 Event.n.u8Vector = uVector;
8761 switch (uVector)
8762 {
8763 /* Shouldn't be here for reflecting #PFs (among other things, the fault address isn't passed along). */
8764 case X86_XCPT_PF: AssertMsgFailed(("hmR0SvmExitXcptGeneric: Unexpected exception")); return VERR_SVM_IPE_5;
8765 case X86_XCPT_DF:
8766 case X86_XCPT_TS:
8767 case X86_XCPT_NP:
8768 case X86_XCPT_SS:
8769 case X86_XCPT_GP:
8770 case X86_XCPT_AC:
8771 {
8772 Event.n.u1ErrorCodeValid = 1;
8773 Event.n.u32ErrorCode = uErrCode;
8774 break;
8775 }
8776 }
8777
8778#ifdef VBOX_WITH_STATISTICS
8779 switch (uVector)
8780 {
8781 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); break;
8782 case X86_XCPT_DB: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); break;
8783 case X86_XCPT_BP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); break;
8784 case X86_XCPT_OF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF); break;
8785 case X86_XCPT_BR: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBR); break;
8786 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); break;
8787 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF); break;
8788 case X86_XCPT_DF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDF); break;
8789 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS); break;
8790 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); break;
8791 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); break;
8792 case X86_XCPT_GP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); break;
8793 case X86_XCPT_PF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); break;
8794 case X86_XCPT_MF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); break;
8795 case X86_XCPT_AC: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC); break;
8796 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); break;
8797 default:
8798 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
8799 break;
8800 }
8801#endif
8802
8803 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8804 return VINF_SUCCESS;
8805}
8806
8807
8808/**
8809 * \#VMEXIT handler for software interrupt (INTn). Conditional \#VMEXIT (debug).
8810 */
8811HMSVM_EXIT_DECL hmR0SvmExitSwInt(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8812{
8813 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8814 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8815
8816 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8817 SVMEVENT Event;
8818 Event.u = 0;
8819 Event.n.u1Valid = 1;
8820 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
8821 Event.n.u8Vector = pVmcb->ctrl.u64ExitInfo1 & 0xff;
8822 Log4Func(("uVector=%#x\n", Event.n.u8Vector));
8823 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8824 return VINF_SUCCESS;
8825}
8826
8827
8828/**
8829 * Generic exit handler that interprets the current instruction
8830 *
8831 * Useful exit that only gets triggered by dtrace and the debugger. Caller does
8832 * the exit logging, and this function does the rest.
8833 */
8834static VBOXSTRICTRC hmR0SvmExitInterpretInstruction(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient,
8835 uint64_t fExtraImport, uint64_t fHmChanged)
8836{
8837#if 1
8838 RT_NOREF(pSvmTransient);
8839 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fExtraImport);
8840 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
8841 if (rcStrict == VINF_SUCCESS)
8842 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fHmChanged | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RIP);
8843 else
8844 {
8845 Log4Func(("IEMExecOne -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
8846 if (rcStrict == VINF_IEM_RAISED_XCPT)
8847 {
8848 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK | fHmChanged);
8849 rcStrict = VINF_SUCCESS;
8850 }
8851 else
8852 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fHmChanged);
8853 }
8854 return rcStrict;
8855#else
8856 RT_NOREF(pVCpu, pSvmTransient, fExtraImport, fHmChanged);
8857 return VINF_EM_RAW_EMULATE_INSTR;
8858#endif
8859}
8860
8861
8862/**
8863 * \#VMEXIT handler for STR. Conditional \#VMEXIT (debug).
8864 */
8865HMSVM_EXIT_DECL hmR0SvmExitTrRead(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8866{
8867 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8868 Log4Func(("%04x:%08RX64\n", pSvmTransient->pVmcb->guest.CS.u16Sel, pSvmTransient->pVmcb->guest.u64RIP));
8869 return hmR0SvmExitInterpretInstruction(pVCpu, pSvmTransient, CPUMCTX_EXTRN_TR, 0);
8870}
8871
8872
8873/**
8874 * \#VMEXIT handler for LTR. Conditional \#VMEXIT (OS/2 TLB workaround, debug).
8875 */
8876HMSVM_EXIT_DECL hmR0SvmExitTrWrite(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8877{
8878 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8879
8880 /* Workaround for lack of TLB flushing in OS/2 when returning to protected
8881 mode after a real mode call (like a BIOS call). See ticketref:20625
8882 comment 14. */
8883 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8884 if (pVM->hm.s.fMissingOS2TlbFlushWorkaround)
8885 {
8886 Log4Func(("%04x:%08RX64 TLB flush\n", pSvmTransient->pVmcb->guest.CS.u16Sel, pSvmTransient->pVmcb->guest.u64RIP));
8887 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
8888 }
8889 else
8890 Log4Func(("%04x:%08RX64\n", pSvmTransient->pVmcb->guest.CS.u16Sel, pSvmTransient->pVmcb->guest.u64RIP));
8891
8892 return hmR0SvmExitInterpretInstruction(pVCpu, pSvmTransient, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR, HM_CHANGED_GUEST_TR);
8893}
8894
8895
8896#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
8897/**
8898 * \#VMEXIT handler for CLGI (SVM_EXIT_CLGI). Conditional \#VMEXIT.
8899 */
8900HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8901{
8902 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8903
8904 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8905 Assert(pVmcb);
8906 Assert(!pVmcb->ctrl.IntCtrl.n.u1VGifEnable);
8907
8908 VBOXSTRICTRC rcStrict;
8909 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8910 uint64_t const fImport = CPUMCTX_EXTRN_HWVIRT;
8911 if (fSupportsNextRipSave)
8912 {
8913 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
8914 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8915 rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr);
8916 }
8917 else
8918 {
8919 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
8920 rcStrict = IEMExecOne(pVCpu);
8921 }
8922
8923 if (rcStrict == VINF_SUCCESS)
8924 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
8925 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8926 {
8927 rcStrict = VINF_SUCCESS;
8928 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8929 }
8930 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
8931 return rcStrict;
8932}
8933
8934
8935/**
8936 * \#VMEXIT handler for STGI (SVM_EXIT_STGI). Conditional \#VMEXIT.
8937 */
8938HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8939{
8940 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8941
8942 /*
8943 * When VGIF is not used we always intercept STGI instructions. When VGIF is used,
8944 * we only intercept STGI when events are pending for GIF to become 1.
8945 */
8946 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8947 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
8948 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_STGI);
8949
8950 VBOXSTRICTRC rcStrict;
8951 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8952 uint64_t const fImport = CPUMCTX_EXTRN_HWVIRT;
8953 if (fSupportsNextRipSave)
8954 {
8955 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
8956 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8957 rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr);
8958 }
8959 else
8960 {
8961 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
8962 rcStrict = IEMExecOne(pVCpu);
8963 }
8964
8965 if (rcStrict == VINF_SUCCESS)
8966 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
8967 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8968 {
8969 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8970 rcStrict = VINF_SUCCESS;
8971 }
8972 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
8973 return rcStrict;
8974}
8975
8976
8977/**
8978 * \#VMEXIT handler for VMLOAD (SVM_EXIT_VMLOAD). Conditional \#VMEXIT.
8979 */
8980HMSVM_EXIT_DECL hmR0SvmExitVmload(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8981{
8982 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8983
8984 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
8985 Assert(pVmcb);
8986 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
8987
8988 VBOXSTRICTRC rcStrict;
8989 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
8990 uint64_t const fImport = CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_KERNEL_GS_BASE
8991 | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_SYSCALL_MSRS
8992 | CPUMCTX_EXTRN_SYSENTER_MSRS;
8993 if (fSupportsNextRipSave)
8994 {
8995 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
8996 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
8997 rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr);
8998 }
8999 else
9000 {
9001 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
9002 rcStrict = IEMExecOne(pVCpu);
9003 }
9004
9005 if (rcStrict == VINF_SUCCESS)
9006 {
9007 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS | HM_CHANGED_GUEST_GS
9008 | HM_CHANGED_GUEST_TR | HM_CHANGED_GUEST_LDTR
9009 | HM_CHANGED_GUEST_KERNEL_GS_BASE | HM_CHANGED_GUEST_SYSCALL_MSRS
9010 | HM_CHANGED_GUEST_SYSENTER_MSR_MASK);
9011 }
9012 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9013 {
9014 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9015 rcStrict = VINF_SUCCESS;
9016 }
9017 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9018 return rcStrict;
9019}
9020
9021
9022/**
9023 * \#VMEXIT handler for VMSAVE (SVM_EXIT_VMSAVE). Conditional \#VMEXIT.
9024 */
9025HMSVM_EXIT_DECL hmR0SvmExitVmsave(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9026{
9027 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9028
9029 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
9030 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
9031
9032 VBOXSTRICTRC rcStrict;
9033 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
9034 if (fSupportsNextRipSave)
9035 {
9036 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9037 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
9038 rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr);
9039 }
9040 else
9041 {
9042 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
9043 rcStrict = IEMExecOne(pVCpu);
9044 }
9045
9046 if (rcStrict == VINF_IEM_RAISED_XCPT)
9047 {
9048 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9049 rcStrict = VINF_SUCCESS;
9050 }
9051 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9052 return rcStrict;
9053}
9054
9055
9056/**
9057 * \#VMEXIT handler for INVLPGA (SVM_EXIT_INVLPGA). Conditional \#VMEXIT.
9058 */
9059HMSVM_EXIT_DECL hmR0SvmExitInvlpga(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9060{
9061 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9062
9063 VBOXSTRICTRC rcStrict;
9064 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
9065 if (fSupportsNextRipSave)
9066 {
9067 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9068 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
9069 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
9070 rcStrict = IEMExecDecodedInvlpga(pVCpu, cbInstr);
9071 }
9072 else
9073 {
9074 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
9075 rcStrict = IEMExecOne(pVCpu);
9076 }
9077
9078 if (rcStrict == VINF_IEM_RAISED_XCPT)
9079 {
9080 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9081 rcStrict = VINF_SUCCESS;
9082 }
9083 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9084 return rcStrict;
9085}
9086
9087
9088/**
9089 * \#VMEXIT handler for STGI (SVM_EXIT_VMRUN). Conditional \#VMEXIT.
9090 */
9091HMSVM_EXIT_DECL hmR0SvmExitVmrun(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9092{
9093 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9094 /* We shall import the entire state here, just in case we enter and continue execution of
9095 the nested-guest with hardware-assisted SVM in ring-0, we would be switching VMCBs and
9096 could lose lose part of CPU state. */
9097 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
9098
9099 VBOXSTRICTRC rcStrict;
9100 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
9101 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitVmentry, z);
9102 if (fSupportsNextRipSave)
9103 {
9104 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
9105 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
9106 rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr);
9107 }
9108 else
9109 {
9110 /* We use IEMExecOneBypassEx() here as it supresses attempt to continue emulating any
9111 instruction(s) when interrupt inhibition is set as part of emulating the VMRUN
9112 instruction itself, see @bugref{7243#c126} */
9113 rcStrict = IEMExecOneBypassEx(pVCpu, NULL /* pcbWritten */);
9114 }
9115 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitVmentry, z);
9116
9117 if (rcStrict == VINF_SUCCESS)
9118 {
9119 rcStrict = VINF_SVM_VMRUN;
9120 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_VMRUN_MASK);
9121 }
9122 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9123 {
9124 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9125 rcStrict = VINF_SUCCESS;
9126 }
9127 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
9128 return rcStrict;
9129}
9130
9131
9132/**
9133 * Nested-guest \#VMEXIT handler for debug exceptions (SVM_EXIT_XCPT_1).
9134 * Unconditional \#VMEXIT.
9135 */
9136HMSVM_EXIT_DECL hmR0SvmNestedExitXcptDB(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9137{
9138 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9139 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
9140
9141 if (pVCpu->hm.s.Event.fPending)
9142 {
9143 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
9144 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9145 }
9146
9147 hmR0SvmSetPendingXcptDB(pVCpu);
9148 return VINF_SUCCESS;
9149}
9150
9151
9152/**
9153 * Nested-guest \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_XCPT_3).
9154 * Conditional \#VMEXIT.
9155 */
9156HMSVM_EXIT_DECL hmR0SvmNestedExitXcptBP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
9157{
9158 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
9159 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
9160
9161 SVMEVENT Event;
9162 Event.u = 0;
9163 Event.n.u1Valid = 1;
9164 Event.n.u3Type = SVM_EVENT_EXCEPTION;
9165 Event.n.u8Vector = X86_XCPT_BP;
9166 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
9167 return VINF_SUCCESS;
9168}
9169#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
9170
9171/** @} */
9172
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette