VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp@ 95085

Last change on this file since 95085 was 94800, checked in by vboxsync, 3 years ago

VMM/IEM,PGM: TLB work, esp. on the data one. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 67.5 KB
Line 
1/* $Id: IEMAllCImplSvmInstr.cpp 94800 2022-05-03 21:49:43Z vboxsync $ */
2/** @file
3 * IEM - AMD-V (Secure Virtual Machine) instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <VBox/vmm/iem.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/apic.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/hm.h>
30#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
31# include <VBox/vmm/hm_svm.h>
32#endif
33#include <VBox/vmm/gim.h>
34#include <VBox/vmm/tm.h>
35#include "IEMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include <VBox/log.h>
38#include <VBox/disopcode.h> /* for OP_VMMCALL */
39#include <VBox/err.h>
40#include <VBox/param.h>
41#include <iprt/assert.h>
42#include <iprt/string.h>
43#include <iprt/x86.h>
44
45#include "IEMInline.h"
46
47
48/*********************************************************************************************************************************
49* Defined Constants And Macros *
50*********************************************************************************************************************************/
51#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
52/**
53 * Check the common SVM instruction preconditions.
54 */
55# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
56 do { \
57 if (!CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu))) \
58 { \
59 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
60 return iemRaiseUndefinedOpcode(a_pVCpu); \
61 } \
62 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
63 { \
64 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
65 return iemRaiseUndefinedOpcode(a_pVCpu); \
66 } \
67 if ((a_pVCpu)->iem.s.uCpl != 0) \
68 { \
69 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
70 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
71 } \
72 } while (0)
73
74
75/**
76 * Converts an IEM exception event type to an SVM event type.
77 *
78 * @returns The SVM event type.
79 * @retval UINT8_MAX if the specified type of event isn't among the set
80 * of recognized IEM event types.
81 *
82 * @param uVector The vector of the event.
83 * @param fIemXcptFlags The IEM exception / interrupt flags.
84 */
85IEM_STATIC uint8_t iemGetSvmEventType(uint32_t uVector, uint32_t fIemXcptFlags)
86{
87 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
88 {
89 if (uVector != X86_XCPT_NMI)
90 return SVM_EVENT_EXCEPTION;
91 return SVM_EVENT_NMI;
92 }
93
94 /* See AMD spec. Table 15-1. "Guest Exception or Interrupt Types". */
95 if (fIemXcptFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
96 return SVM_EVENT_EXCEPTION;
97
98 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_EXT_INT)
99 return SVM_EVENT_EXTERNAL_IRQ;
100
101 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
102 return SVM_EVENT_SOFTWARE_INT;
103
104 AssertMsgFailed(("iemGetSvmEventType: Invalid IEM xcpt/int. type %#x, uVector=%#x\n", fIemXcptFlags, uVector));
105 return UINT8_MAX;
106}
107
108
109/**
110 * Performs an SVM world-switch (VMRUN, \#VMEXIT) updating PGM and IEM internals.
111 *
112 * @returns Strict VBox status code.
113 * @param pVCpu The cross context virtual CPU structure.
114 */
115DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu)
116{
117 /*
118 * Inform PGM about paging mode changes.
119 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
120 * see comment in iemMemPageTranslateAndCheckAccess().
121 */
122 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
123 true /* fForce */);
124 AssertRCReturn(rc, rc);
125
126 /* Invalidate IEM TLBs now that we've forced a PGM mode change. */
127 IEMTlbInvalidateAll(pVCpu);
128
129 /* Inform CPUM (recompiler), can later be removed. */
130 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
131
132 /* Re-initialize IEM cache/state after the drastic mode switch. */
133 iemReInitExec(pVCpu);
134 return rc;
135}
136
137
138/**
139 * SVM \#VMEXIT handler.
140 *
141 * @returns Strict VBox status code.
142 * @retval VINF_SVM_VMEXIT when the \#VMEXIT is successful.
143 * @retval VERR_SVM_VMEXIT_FAILED when the \#VMEXIT failed restoring the guest's
144 * "host state" and a shutdown is required.
145 *
146 * @param pVCpu The cross context virtual CPU structure.
147 * @param uExitCode The exit code.
148 * @param uExitInfo1 The exit info. 1 field.
149 * @param uExitInfo2 The exit info. 2 field.
150 */
151VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT
152{
153 VBOXSTRICTRC rcStrict;
154 if ( CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
155 || uExitCode == SVM_EXIT_INVALID)
156 {
157 LogFlow(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n",
158 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode, uExitInfo1, uExitInfo2));
159
160 /*
161 * Disable the global-interrupt flag to prevent interrupts during the 'atomic' world switch.
162 */
163 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, false);
164
165 /*
166 * Map the nested-guest VMCB from its location in guest memory.
167 * Write exactly what the CPU does on #VMEXIT thereby preserving most other bits in the
168 * guest's VMCB in memory, see @bugref{7243#c113} and related comment on iemSvmVmrun().
169 */
170 PSVMVMCB pVmcbMem;
171 PGMPAGEMAPLOCK PgLockMem;
172 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
173 rcStrict = iemMemPageMap(pVCpu, pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, (void **)&pVmcbMem,
174 &PgLockMem);
175 if (rcStrict == VINF_SUCCESS)
176 {
177 /*
178 * Notify HM in case the nested-guest was executed using hardware-assisted SVM (which
179 * would have modified some VMCB state) that might need to be restored on #VMEXIT before
180 * writing the VMCB back to guest memory.
181 */
182 HMNotifySvmNstGstVmexit(pVCpu, IEM_GET_CTX(pVCpu));
183
184 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
185 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
187 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
188
189 /*
190 * Save the nested-guest state into the VMCB state-save area.
191 */
192 PSVMVMCBSTATESAVE pVmcbMemState = &pVmcbMem->guest;
193 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, ES, es);
194 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, CS, cs);
195 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, SS, ss);
196 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, DS, ds);
197 pVmcbMemState->GDTR.u32Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
198 pVmcbMemState->GDTR.u64Base = pVCpu->cpum.GstCtx.gdtr.pGdt;
199 pVmcbMemState->IDTR.u32Limit = pVCpu->cpum.GstCtx.idtr.cbIdt;
200 pVmcbMemState->IDTR.u64Base = pVCpu->cpum.GstCtx.idtr.pIdt;
201 pVmcbMemState->u64EFER = pVCpu->cpum.GstCtx.msrEFER;
202 pVmcbMemState->u64CR4 = pVCpu->cpum.GstCtx.cr4;
203 pVmcbMemState->u64CR3 = pVCpu->cpum.GstCtx.cr3;
204 pVmcbMemState->u64CR2 = pVCpu->cpum.GstCtx.cr2;
205 pVmcbMemState->u64CR0 = pVCpu->cpum.GstCtx.cr0;
206 /** @todo Nested paging. */
207 pVmcbMemState->u64RFlags = pVCpu->cpum.GstCtx.rflags.u64;
208 pVmcbMemState->u64RIP = pVCpu->cpum.GstCtx.rip;
209 pVmcbMemState->u64RSP = pVCpu->cpum.GstCtx.rsp;
210 pVmcbMemState->u64RAX = pVCpu->cpum.GstCtx.rax;
211 pVmcbMemState->u64DR7 = pVCpu->cpum.GstCtx.dr[7];
212 pVmcbMemState->u64DR6 = pVCpu->cpum.GstCtx.dr[6];
213 pVmcbMemState->u8CPL = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */
214 Assert(CPUMGetGuestCPL(pVCpu) == pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl);
215 if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, IEM_GET_CTX(pVCpu)))
216 pVmcbMemState->u64PAT = pVCpu->cpum.GstCtx.msrPAT;
217
218 /*
219 * Save additional state and intercept information.
220 *
221 * - V_IRQ: Tracked using VMCPU_FF_INTERRUPT_NESTED_GUEST force-flag and updated below.
222 * - V_TPR: Updated by iemCImpl_load_CrX or by the physical CPU for hardware-assisted
223 * SVM execution.
224 * - Interrupt shadow: Tracked using VMCPU_FF_INHIBIT_INTERRUPTS and RIP.
225 */
226 PSVMVMCBCTRL pVmcbMemCtrl = &pVmcbMem->ctrl;
227 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) /* V_IRQ. */
228 pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 0;
229 else
230 {
231 Assert(pVmcbCtrl->IntCtrl.n.u1VIrqPending);
232 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
233 }
234
235 pVmcbMemCtrl->IntCtrl.n.u8VTPR = pVmcbCtrl->IntCtrl.n.u8VTPR; /* V_TPR. */
236
237 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadow. */
238 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
239 {
240 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 1;
241 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
242 LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", pVCpu->cpum.GstCtx.rip));
243 }
244 else
245 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 0;
246
247 /*
248 * Save nRIP, instruction length and byte fields.
249 */
250 pVmcbMemCtrl->u64NextRIP = pVmcbCtrl->u64NextRIP;
251 pVmcbMemCtrl->cbInstrFetched = pVmcbCtrl->cbInstrFetched;
252 memcpy(&pVmcbMemCtrl->abInstr[0], &pVmcbCtrl->abInstr[0], sizeof(pVmcbMemCtrl->abInstr));
253
254 /*
255 * Save exit information.
256 */
257 pVmcbMemCtrl->u64ExitCode = uExitCode;
258 pVmcbMemCtrl->u64ExitInfo1 = uExitInfo1;
259 pVmcbMemCtrl->u64ExitInfo2 = uExitInfo2;
260
261 /*
262 * Update the exit interrupt-information field if this #VMEXIT happened as a result
263 * of delivering an event through IEM.
264 *
265 * Don't update the exit interrupt-information field if the event wasn't being injected
266 * through IEM, as it would have been updated by real hardware if the nested-guest was
267 * executed using hardware-assisted SVM.
268 */
269 {
270 uint8_t uExitIntVector;
271 uint32_t uExitIntErr;
272 uint32_t fExitIntFlags;
273 bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,
274 NULL /* uExitIntCr2 */);
275 if (fRaisingEvent)
276 {
277 pVmcbCtrl->ExitIntInfo.n.u1Valid = 1;
278 pVmcbCtrl->ExitIntInfo.n.u8Vector = uExitIntVector;
279 pVmcbCtrl->ExitIntInfo.n.u3Type = iemGetSvmEventType(uExitIntVector, fExitIntFlags);
280 if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)
281 {
282 pVmcbCtrl->ExitIntInfo.n.u1ErrorCodeValid = true;
283 pVmcbCtrl->ExitIntInfo.n.u32ErrorCode = uExitIntErr;
284 }
285 }
286 }
287
288 /*
289 * Save the exit interrupt-information field.
290 *
291 * We write the whole field including overwriting reserved bits as it was observed on an
292 * AMD Ryzen 5 Pro 1500 that the CPU does not preserve reserved bits in EXITINTINFO.
293 */
294 pVmcbMemCtrl->ExitIntInfo = pVmcbCtrl->ExitIntInfo;
295
296 /*
297 * Clear event injection.
298 */
299 pVmcbMemCtrl->EventInject.n.u1Valid = 0;
300
301 iemMemPageUnmap(pVCpu, pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, pVmcbMem, &PgLockMem);
302 }
303
304 /*
305 * Prepare for guest's "host mode" by clearing internal processor state bits.
306 *
307 * We don't need to zero out the state-save area, just the controls should be
308 * sufficient because it has the critical bit of indicating whether we're inside
309 * the nested-guest or not.
310 */
311 memset(pVmcbCtrl, 0, sizeof(*pVmcbCtrl));
312 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
313
314 /*
315 * Restore the subset of force-flags that were preserved.
316 */
317 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
318 {
319 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
320 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
321 }
322
323 if (rcStrict == VINF_SUCCESS)
324 {
325 /** @todo Nested paging. */
326 /** @todo ASID. */
327
328 /*
329 * If we are switching to PAE mode host, validate the PDPEs first.
330 * Any invalid PDPEs here causes a VCPU shutdown.
331 */
332 PCSVMHOSTSTATE pHostState = &pVCpu->cpum.GstCtx.hwvirt.svm.HostState;
333 bool const fHostInPaeMode = CPUMIsPaePagingEnabled(pHostState->uCr0, pHostState->uCr4, pHostState->uEferMsr);
334 if (fHostInPaeMode)
335 rcStrict = PGMGstMapPaePdpesAtCr3(pVCpu, pHostState->uCr3);
336 if (RT_SUCCESS(rcStrict))
337 {
338 /*
339 * Reload the host state.
340 */
341 CPUMSvmVmExitRestoreHostState(pVCpu, IEM_GET_CTX(pVCpu));
342
343 /*
344 * Update PGM, IEM and others of a world-switch.
345 */
346 rcStrict = iemSvmWorldSwitch(pVCpu);
347 if (rcStrict == VINF_SUCCESS)
348 rcStrict = VINF_SVM_VMEXIT;
349 else if (RT_SUCCESS(rcStrict))
350 {
351 LogFlow(("iemSvmVmexit: Setting passup status from iemSvmWorldSwitch %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
352 iemSetPassUpStatus(pVCpu, rcStrict);
353 rcStrict = VINF_SVM_VMEXIT;
354 }
355 else
356 LogFlow(("iemSvmVmexit: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
357 }
358 else
359 {
360 Log(("iemSvmVmexit: PAE PDPEs invalid while restoring host state. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
361 rcStrict = VINF_EM_TRIPLE_FAULT;
362 }
363 }
364 else
365 {
366 AssertMsgFailed(("iemSvmVmexit: Mapping VMCB at %#RGp failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, VBOXSTRICTRC_VAL(rcStrict)));
367 rcStrict = VINF_EM_TRIPLE_FAULT;
368 }
369 }
370 else
371 {
372 AssertMsgFailed(("iemSvmVmexit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode, uExitInfo1, uExitInfo2));
373 rcStrict = VERR_SVM_IPE_3;
374 }
375
376# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
377 /* CLGI/STGI may not have been intercepted and thus not executed in IEM. */
378 if ( HMIsEnabled(pVCpu->CTX_SUFF(pVM))
379 && HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM)))
380 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
381# endif
382 return rcStrict;
383}
384
385
386/**
387 * Interface for HM and EM to emulate \#VMEXIT.
388 *
389 * @returns Strict VBox status code.
390 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
391 * @param uExitCode The exit code.
392 * @param uExitInfo1 The exit info. 1 field.
393 * @param uExitInfo2 The exit info. 2 field.
394 * @thread EMT(pVCpu)
395 */
396VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
397{
398 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
399 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
400 if (pVCpu->iem.s.cActiveMappings)
401 iemMemRollback(pVCpu);
402 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
403}
404
405
406/**
407 * Performs the operations necessary that are part of the vmrun instruction
408 * execution in the guest.
409 *
410 * @returns Strict VBox status code (i.e. informational status codes too).
411 * @retval VINF_SUCCESS successfully executed VMRUN and entered nested-guest
412 * code execution.
413 * @retval VINF_SVM_VMEXIT when executing VMRUN causes a \#VMEXIT
414 * (SVM_EXIT_INVALID most likely).
415 *
416 * @param pVCpu The cross context virtual CPU structure.
417 * @param cbInstr The length of the VMRUN instruction.
418 * @param GCPhysVmcb Guest physical address of the VMCB to run.
419 */
420static VBOXSTRICTRC iemSvmVmrun(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPHYS GCPhysVmcb) RT_NOEXCEPT
421{
422 LogFlow(("iemSvmVmrun\n"));
423
424 /*
425 * Cache the physical address of the VMCB for #VMEXIT exceptions.
426 */
427 pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb = GCPhysVmcb;
428
429 /*
430 * Save the host state.
431 */
432 CPUMSvmVmRunSaveHostState(IEM_GET_CTX(pVCpu), cbInstr);
433
434 /*
435 * Read the guest VMCB.
436 */
437 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
438 int rc = PGMPhysSimpleReadGCPhys(pVM, &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb, GCPhysVmcb, sizeof(SVMVMCB));
439 if (RT_SUCCESS(rc))
440 {
441 /*
442 * AMD-V seems to preserve reserved fields and only writes back selected, recognized
443 * fields on #VMEXIT. However, not all reserved bits are preserved (e.g, EXITINTINFO)
444 * but in our implementation we try to preserve as much as we possibly can.
445 *
446 * We could read the entire page here and only write back the relevant fields on
447 * #VMEXIT but since our internal VMCB is also being used by HM during hardware-assisted
448 * SVM execution, it creates a potential for a nested-hypervisor to set bits that are
449 * currently reserved but may be recognized as features bits in future CPUs causing
450 * unexpected & undesired results. Hence, we zero out unrecognized fields here as we
451 * typically enter hardware-assisted SVM soon anyway, see @bugref{7243#c113}.
452 */
453 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
454 PSVMVMCBSTATESAVE pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.guest;
455
456 RT_ZERO(pVmcbCtrl->u8Reserved0);
457 RT_ZERO(pVmcbCtrl->u8Reserved1);
458 RT_ZERO(pVmcbCtrl->u8Reserved2);
459 RT_ZERO(pVmcbNstGst->u8Reserved0);
460 RT_ZERO(pVmcbNstGst->u8Reserved1);
461 RT_ZERO(pVmcbNstGst->u8Reserved2);
462 RT_ZERO(pVmcbNstGst->u8Reserved3);
463 RT_ZERO(pVmcbNstGst->u8Reserved4);
464 RT_ZERO(pVmcbNstGst->u8Reserved5);
465 pVmcbCtrl->u32Reserved0 = 0;
466 pVmcbCtrl->TLBCtrl.n.u24Reserved = 0;
467 pVmcbCtrl->IntCtrl.n.u6Reserved = 0;
468 pVmcbCtrl->IntCtrl.n.u3Reserved = 0;
469 pVmcbCtrl->IntCtrl.n.u5Reserved = 0;
470 pVmcbCtrl->IntCtrl.n.u24Reserved = 0;
471 pVmcbCtrl->IntShadow.n.u30Reserved = 0;
472 pVmcbCtrl->ExitIntInfo.n.u19Reserved = 0;
473 pVmcbCtrl->NestedPagingCtrl.n.u29Reserved = 0;
474 pVmcbCtrl->EventInject.n.u19Reserved = 0;
475 pVmcbCtrl->LbrVirt.n.u30Reserved = 0;
476
477 /*
478 * Validate guest-state and controls.
479 */
480 /* VMRUN must always be intercepted. */
481 if (!CPUMIsGuestSvmCtrlInterceptSet(pVCpu, IEM_GET_CTX(pVCpu), SVM_CTRL_INTERCEPT_VMRUN))
482 {
483 Log(("iemSvmVmrun: VMRUN instruction not intercepted -> #VMEXIT\n"));
484 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
485 }
486
487 /* Nested paging. */
488 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
489 && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
490 {
491 Log(("iemSvmVmrun: Nested paging not supported -> Disabling\n"));
492 pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging = 0;
493 }
494
495 /* AVIC. */
496 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
497 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
498 {
499 Log(("iemSvmVmrun: AVIC not supported -> Disabling\n"));
500 pVmcbCtrl->IntCtrl.n.u1AvicEnable = 0;
501 }
502
503 /* Last branch record (LBR) virtualization. */
504 if ( pVmcbCtrl->LbrVirt.n.u1LbrVirt
505 && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
506 {
507 Log(("iemSvmVmrun: LBR virtualization not supported -> Disabling\n"));
508 pVmcbCtrl->LbrVirt.n.u1LbrVirt = 0;
509 }
510
511 /* Virtualized VMSAVE/VMLOAD. */
512 if ( pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload
513 && !pVM->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
514 {
515 Log(("iemSvmVmrun: Virtualized VMSAVE/VMLOAD not supported -> Disabling\n"));
516 pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload = 0;
517 }
518
519 /* Virtual GIF. */
520 if ( pVmcbCtrl->IntCtrl.n.u1VGifEnable
521 && !pVM->cpum.ro.GuestFeatures.fSvmVGif)
522 {
523 Log(("iemSvmVmrun: Virtual GIF not supported -> Disabling\n"));
524 pVmcbCtrl->IntCtrl.n.u1VGifEnable = 0;
525 }
526
527 /* Guest ASID. */
528 if (!pVmcbCtrl->TLBCtrl.n.u32ASID)
529 {
530 Log(("iemSvmVmrun: Guest ASID is invalid -> #VMEXIT\n"));
531 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
532 }
533
534 /* Guest AVIC. */
535 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
536 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
537 {
538 Log(("iemSvmVmrun: AVIC not supported -> Disabling\n"));
539 pVmcbCtrl->IntCtrl.n.u1AvicEnable = 0;
540 }
541
542 /* Guest Secure Encrypted Virtualization. */
543 if ( ( pVmcbCtrl->NestedPagingCtrl.n.u1Sev
544 || pVmcbCtrl->NestedPagingCtrl.n.u1SevEs)
545 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
546 {
547 Log(("iemSvmVmrun: SEV not supported -> Disabling\n"));
548 pVmcbCtrl->NestedPagingCtrl.n.u1Sev = 0;
549 pVmcbCtrl->NestedPagingCtrl.n.u1SevEs = 0;
550 }
551
552 /* Flush by ASID. */
553 if ( !pVM->cpum.ro.GuestFeatures.fSvmFlusbByAsid
554 && pVmcbCtrl->TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_NOTHING
555 && pVmcbCtrl->TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_ENTIRE)
556 {
557 Log(("iemSvmVmrun: Flush-by-ASID not supported -> #VMEXIT\n"));
558 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
559 }
560
561 /* IO permission bitmap. */
562 RTGCPHYS const GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
563 if ( (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)
564 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)
565 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + X86_PAGE_4K_SIZE)
566 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + (X86_PAGE_4K_SIZE << 1)))
567 {
568 Log(("iemSvmVmrun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));
569 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
570 }
571
572 /* MSR permission bitmap. */
573 RTGCPHYS const GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
574 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
575 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)
576 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap + X86_PAGE_4K_SIZE))
577 {
578 Log(("iemSvmVmrun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));
579 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
580 }
581
582 /* CR0. */
583 if ( !(pVmcbNstGst->u64CR0 & X86_CR0_CD)
584 && (pVmcbNstGst->u64CR0 & X86_CR0_NW))
585 {
586 Log(("iemSvmVmrun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
587 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
588 }
589 if (pVmcbNstGst->u64CR0 >> 32)
590 {
591 Log(("iemSvmVmrun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
592 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
593 }
594 /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */
595
596 /* DR6 and DR7. */
597 if ( pVmcbNstGst->u64DR6 >> 32
598 || pVmcbNstGst->u64DR7 >> 32)
599 {
600 Log(("iemSvmVmrun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64DR6,
601 pVmcbNstGst->u64DR6));
602 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
603 }
604
605 /*
606 * PAT (Page Attribute Table) MSR.
607 *
608 * The CPU only validates and loads it when nested-paging is enabled.
609 * See AMD spec. "15.25.4 Nested Paging and VMRUN/#VMEXIT".
610 */
611 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
612 && !CPUMIsPatMsrValid(pVmcbNstGst->u64PAT))
613 {
614 Log(("iemSvmVmrun: PAT invalid. u64PAT=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64PAT));
615 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
616 }
617
618 /*
619 * Copy the IO permission bitmap into the cache.
620 */
621 AssertCompile(sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap) == SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);
622 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap, GCPhysIOBitmap,
623 sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap));
624 if (RT_FAILURE(rc))
625 {
626 Log(("iemSvmVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));
627 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
628 }
629
630 /*
631 * Copy the MSR permission bitmap into the cache.
632 */
633 AssertCompile(sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap) == SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);
634 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap, GCPhysMsrBitmap,
635 sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap));
636 if (RT_FAILURE(rc))
637 {
638 Log(("iemSvmVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
639 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
640 }
641
642 /*
643 * Copy segments from nested-guest VMCB state to the guest-CPU state.
644 *
645 * We do this here as we need to use the CS attributes and it's easier this way
646 * then using the VMCB format selectors. It doesn't really matter where we copy
647 * the state, we restore the guest-CPU context state on the \#VMEXIT anyway.
648 */
649 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, ES, es);
650 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, CS, cs);
651 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, SS, ss);
652 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, DS, ds);
653
654 /** @todo Segment attribute overrides by VMRUN. */
655
656 /*
657 * CPL adjustments and overrides.
658 *
659 * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL().
660 * We shall thus adjust both CS.DPL and SS.DPL here.
661 */
662 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = pVmcbNstGst->u8CPL;
663 if (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(pVCpu)))
664 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = 3;
665 if (CPUMIsGuestInRealModeEx(IEM_GET_CTX(pVCpu)))
666 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = 0;
667 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
668
669 /*
670 * Continue validating guest-state and controls.
671 *
672 * We pass CR0 as 0 to CPUMIsGuestEferMsrWriteValid() below to skip the illegal
673 * EFER.LME bit transition check. We pass the nested-guest's EFER as both the
674 * old and new EFER value to not have any guest EFER bits influence the new
675 * nested-guest EFER.
676 */
677 uint64_t uValidEfer;
678 rc = CPUMIsGuestEferMsrWriteValid(pVM, 0 /* CR0 */, pVmcbNstGst->u64EFER, pVmcbNstGst->u64EFER, &uValidEfer);
679 if (RT_FAILURE(rc))
680 {
681 Log(("iemSvmVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64EFER));
682 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
683 }
684
685 /* Validate paging and CPU mode bits. */
686 bool const fSvm = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME);
687 bool const fLongModeSupported = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);
688 bool const fLongModeEnabled = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
689 bool const fPaging = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PG);
690 bool const fPae = RT_BOOL(pVmcbNstGst->u64CR4 & X86_CR4_PAE);
691 bool const fProtMode = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PE);
692 bool const fLongModeWithPaging = fLongModeEnabled && fPaging;
693 bool const fLongModeConformCS = pVCpu->cpum.GstCtx.cs.Attr.n.u1Long && pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig;
694 /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0). */
695 if (fLongModeWithPaging)
696 uValidEfer |= MSR_K6_EFER_LMA;
697 bool const fLongModeActiveOrEnabled = RT_BOOL(uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA));
698 if ( !fSvm
699 || (!fLongModeSupported && fLongModeActiveOrEnabled)
700 || (fLongModeWithPaging && !fPae)
701 || (fLongModeWithPaging && !fProtMode)
702 || ( fLongModeEnabled
703 && fPaging
704 && fPae
705 && fLongModeConformCS))
706 {
707 Log(("iemSvmVmrun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer));
708 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
709 }
710
711 /*
712 * Preserve the required force-flags.
713 *
714 * We only preserve the force-flags that would affect the execution of the
715 * nested-guest (or the guest).
716 *
717 * - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
718 * execution of a subsequent IRET instruction in the guest.
719 *
720 * The remaining FFs (e.g. timers) can stay in place so that we will be able to
721 * generate interrupts that should cause #VMEXITs for the nested-guest.
722 *
723 * VMRUN has implicit GIF (Global Interrupt Flag) handling, we don't need to
724 * preserve VMCPU_FF_INHIBIT_INTERRUPTS.
725 */
726 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
727 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
728
729 /*
730 * Pause filter.
731 */
732 if (pVM->cpum.ro.GuestFeatures.fSvmPauseFilter)
733 {
734 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = pVmcbCtrl->u16PauseFilterCount;
735 if (pVM->cpum.ro.GuestFeatures.fSvmPauseFilterThreshold)
736 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold = pVmcbCtrl->u16PauseFilterCount;
737 }
738
739 /*
740 * Interrupt shadow.
741 */
742 if (pVmcbCtrl->IntShadow.n.u1IntShadow)
743 {
744 LogFlow(("iemSvmVmrun: setting interrupt shadow. inhibit PC=%#RX64\n", pVmcbNstGst->u64RIP));
745 /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */
746 EMSetInhibitInterruptsPC(pVCpu, pVmcbNstGst->u64RIP);
747 }
748
749 /*
750 * TLB flush control.
751 * Currently disabled since it's redundant as we unconditionally flush the TLB
752 * in iemSvmWorldSwitch() below.
753 */
754# if 0
755 /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
756 if ( pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
757 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
758 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
759 PGMFlushTLB(pVCpu, pVmcbNstGst->u64CR3, true /* fGlobal */);
760# endif
761
762 /*
763 * Validate and map PAE PDPEs if the guest will be using PAE paging.
764 * Invalid PAE PDPEs here causes a #VMEXIT.
765 */
766 if ( !pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
767 && CPUMIsPaePagingEnabled(pVmcbNstGst->u64CR0, pVmcbNstGst->u64CR4, uValidEfer))
768 {
769 rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcbNstGst->u64CR3);
770 if (RT_SUCCESS(rc))
771 { /* likely */ }
772 else
773 {
774 Log(("iemSvmVmrun: PAE PDPEs invalid -> #VMEXIT\n"));
775 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
776 }
777 }
778
779 /*
780 * Copy the remaining guest state from the VMCB to the guest-CPU context.
781 */
782 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcbNstGst->GDTR.u32Limit;
783 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcbNstGst->GDTR.u64Base;
784 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcbNstGst->IDTR.u32Limit;
785 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcbNstGst->IDTR.u64Base;
786 CPUMSetGuestCR0(pVCpu, pVmcbNstGst->u64CR0);
787 CPUMSetGuestCR4(pVCpu, pVmcbNstGst->u64CR4);
788 pVCpu->cpum.GstCtx.cr3 = pVmcbNstGst->u64CR3;
789 pVCpu->cpum.GstCtx.cr2 = pVmcbNstGst->u64CR2;
790 pVCpu->cpum.GstCtx.dr[6] = pVmcbNstGst->u64DR6;
791 pVCpu->cpum.GstCtx.dr[7] = pVmcbNstGst->u64DR7;
792 pVCpu->cpum.GstCtx.rflags.u64 = pVmcbNstGst->u64RFlags;
793 pVCpu->cpum.GstCtx.rax = pVmcbNstGst->u64RAX;
794 pVCpu->cpum.GstCtx.rsp = pVmcbNstGst->u64RSP;
795 pVCpu->cpum.GstCtx.rip = pVmcbNstGst->u64RIP;
796 CPUMSetGuestEferMsrNoChecks(pVCpu, pVCpu->cpum.GstCtx.msrEFER, uValidEfer);
797 if (pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging)
798 pVCpu->cpum.GstCtx.msrPAT = pVmcbNstGst->u64PAT;
799
800 /* Mask DR6, DR7 bits mandatory set/clear bits. */
801 pVCpu->cpum.GstCtx.dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
802 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_RA1_MASK;
803 pVCpu->cpum.GstCtx.dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
804 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
805
806 /*
807 * Check for pending virtual interrupts.
808 */
809 if (pVmcbCtrl->IntCtrl.n.u1VIrqPending)
810 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
811 else
812 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
813
814 /*
815 * Update PGM, IEM and others of a world-switch.
816 */
817 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu);
818 if (rcStrict == VINF_SUCCESS)
819 { /* likely */ }
820 else if (RT_SUCCESS(rcStrict))
821 {
822 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch returned %Rrc, setting passup status\n", VBOXSTRICTRC_VAL(rcStrict)));
823 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
824 }
825 else
826 {
827 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
828 return rcStrict;
829 }
830
831 /*
832 * Set the global-interrupt flag to allow interrupts in the guest.
833 */
834 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, true);
835
836 /*
837 * Event injection.
838 */
839 PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject;
840 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = !pEventInject->n.u1Valid;
841 if (pEventInject->n.u1Valid)
842 {
843 uint8_t const uVector = pEventInject->n.u8Vector;
844 TRPMEVENT const enmType = HMSvmEventToTrpmEventType(pEventInject, uVector);
845 uint16_t const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0;
846
847 /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */
848 if (RT_UNLIKELY(enmType == TRPM_32BIT_HACK))
849 {
850 Log(("iemSvmVmrun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type));
851 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
852 }
853 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION)
854 {
855 if ( uVector == X86_XCPT_NMI
856 || uVector > X86_XCPT_LAST)
857 {
858 Log(("iemSvmVmrun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector));
859 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
860 }
861 if ( uVector == X86_XCPT_BR
862 && CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
863 {
864 Log(("iemSvmVmrun: Cannot inject #BR when not in long mode -> #VMEXIT\n"));
865 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
866 }
867 /** @todo any others? */
868 }
869
870 /*
871 * Invalidate the exit interrupt-information field here. This field is fully updated
872 * on #VMEXIT as events other than the one below can also cause intercepts during
873 * their injection (e.g. exceptions).
874 */
875 pVmcbCtrl->ExitIntInfo.n.u1Valid = 0;
876
877 /*
878 * Clear the event injection valid bit here. While the AMD spec. mentions that the CPU
879 * clears this bit from the VMCB unconditionally on #VMEXIT, internally the CPU could be
880 * clearing it at any time, most likely before/after injecting the event. Since VirtualBox
881 * doesn't have any virtual-CPU internal representation of this bit, we clear/update the
882 * VMCB here. This also has the added benefit that we avoid the risk of injecting the event
883 * twice if we fallback to executing the nested-guest using hardware-assisted SVM after
884 * injecting the event through IEM here.
885 */
886 pVmcbCtrl->EventInject.n.u1Valid = 0;
887
888 /** @todo NRIP: Software interrupts can only be pushed properly if we support
889 * NRIP for the nested-guest to calculate the instruction length
890 * below. */
891 LogFlow(("iemSvmVmrun: Injecting event: %04x:%08RX64 vec=%#x type=%d uErr=%u cr2=%#RX64 cr3=%#RX64 efer=%#RX64\n",
892 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uVector, enmType, uErrorCode, pVCpu->cpum.GstCtx.cr2,
893 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.msrEFER));
894
895 /*
896 * We shall not inject the event here right away. There may be paging mode related updates
897 * as a result of the world-switch above that are yet to be honored. Instead flag the event
898 * as pending for injection.
899 */
900 TRPMAssertTrap(pVCpu, uVector, enmType);
901 if (pEventInject->n.u1ErrorCodeValid)
902 TRPMSetErrorCode(pVCpu, uErrorCode);
903 if ( enmType == TRPM_TRAP
904 && uVector == X86_XCPT_PF)
905 TRPMSetFaultAddress(pVCpu, pVCpu->cpum.GstCtx.cr2);
906 }
907 else
908 LogFlow(("iemSvmVmrun: Entering nested-guest: %04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 efer=%#RX64 efl=%#x\n",
909 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3,
910 pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER, pVCpu->cpum.GstCtx.rflags.u64));
911
912 LogFlow(("iemSvmVmrun: returns %d\n", VBOXSTRICTRC_VAL(rcStrict)));
913
914# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
915 /* If CLGI/STGI isn't intercepted we force IEM-only nested-guest execution here. */
916 if ( HMIsEnabled(pVM)
917 && HMIsSvmVGifActive(pVM))
918 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
919# endif
920
921 return rcStrict;
922 }
923
924 /* Shouldn't really happen as the caller should've validated the physical address already. */
925 Log(("iemSvmVmrun: Failed to read nested-guest VMCB at %#RGp (rc=%Rrc) -> #VMEXIT\n", GCPhysVmcb, rc));
926 return rc;
927}
928
929
930/**
931 * Checks if the event intercepts and performs the \#VMEXIT if the corresponding
932 * intercept is active.
933 *
934 * @returns Strict VBox status code.
935 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
936 * we're not executing a nested-guest.
937 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
938 * successfully.
939 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
940 * failed and a shutdown needs to be initiated for the guest.
941 *
942 * @returns VBox strict status code.
943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
944 * @param u8Vector The interrupt or exception vector.
945 * @param fFlags The exception flags (see IEM_XCPT_FLAGS_XXX).
946 * @param uErr The error-code associated with the exception.
947 * @param uCr2 The CR2 value in case of a \#PF exception.
948 */
949VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT
950{
951 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
952
953 /*
954 * Handle SVM exception and software interrupt intercepts, see AMD spec. 15.12 "Exception Intercepts".
955 *
956 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_XCPT_2 #VMEXITs.
957 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
958 * even when they use a vector in the range 0 to 31.
959 * - ICEBP should not trigger #DB intercept, but its own intercept.
960 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
961 */
962 /* Check NMI intercept */
963 if ( u8Vector == X86_XCPT_NMI
964 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
965 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
966 {
967 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
968 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
969 }
970
971 /* Check ICEBP intercept. */
972 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
973 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
974 {
975 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
976 IEM_SVM_UPDATE_NRIP(pVCpu);
977 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
978 }
979
980 /* Check CPU exception intercepts. */
981 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
982 && IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
983 {
984 Assert(u8Vector <= X86_XCPT_LAST);
985 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
986 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
987 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists
988 && u8Vector == X86_XCPT_PF
989 && !(uErr & X86_TRAP_PF_ID))
990 {
991 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
992# ifdef IEM_WITH_CODE_TLB
993 uint8_t const *pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
994 uint8_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
995 pVmcbCtrl->cbInstrFetched = RT_MIN(cbInstrBuf, SVM_CTRL_GUEST_INSTR_BYTES_MAX);
996 if ( pbInstrBuf
997 && cbInstrBuf > 0)
998 memcpy(&pVmcbCtrl->abInstr[0], pbInstrBuf, pVmcbCtrl->cbInstrFetched);
999# else
1000 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1001 pVmcbCtrl->cbInstrFetched = RT_MIN(cbOpcode, SVM_CTRL_GUEST_INSTR_BYTES_MAX);
1002 if (cbOpcode > 0)
1003 memcpy(&pVmcbCtrl->abInstr[0], &pVCpu->iem.s.abOpcode[0], pVmcbCtrl->cbInstrFetched);
1004# endif
1005 }
1006 if (u8Vector == X86_XCPT_BR)
1007 IEM_SVM_UPDATE_NRIP(pVCpu);
1008 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept u32InterceptXcpt=%#RX32 u8Vector=%#x "
1009 "uExitInfo1=%#RX64 uExitInfo2=%#RX64 -> #VMEXIT\n", pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u32InterceptXcpt,
1010 u8Vector, uExitInfo1, uExitInfo2));
1011 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_0 + u8Vector, uExitInfo1, uExitInfo2);
1012 }
1013
1014 /* Check software interrupt (INTn) intercepts. */
1015 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
1016 | IEM_XCPT_FLAGS_BP_INSTR
1017 | IEM_XCPT_FLAGS_ICEBP_INSTR
1018 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1019 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
1020 {
1021 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? u8Vector : 0;
1022 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
1023 IEM_SVM_UPDATE_NRIP(pVCpu);
1024 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
1025 }
1026
1027 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1028}
1029
1030
1031/**
1032 * Checks the SVM IO permission bitmap and performs the \#VMEXIT if the
1033 * corresponding intercept is active.
1034 *
1035 * @returns Strict VBox status code.
1036 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
1037 * we're not executing a nested-guest.
1038 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
1039 * successfully.
1040 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
1041 * failed and a shutdown needs to be initiated for the guest.
1042 *
1043 * @returns VBox strict status code.
1044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1045 * @param u16Port The IO port being accessed.
1046 * @param enmIoType The type of IO access.
1047 * @param cbReg The IO operand size in bytes.
1048 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
1049 * @param iEffSeg The effective segment number.
1050 * @param fRep Whether this is a repeating IO instruction (REP prefix).
1051 * @param fStrIo Whether this is a string IO instruction.
1052 * @param cbInstr The length of the IO instruction in bytes.
1053 */
1054VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1055 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT
1056{
1057 Assert(IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
1058 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
1059 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
1060
1061 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u)\n", u16Port, u16Port));
1062
1063 SVMIOIOEXITINFO IoExitInfo;
1064 bool const fIntercept = CPUMIsSvmIoInterceptSet(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap, u16Port, enmIoType, cbReg,
1065 cAddrSizeBits, iEffSeg, fRep, fStrIo, &IoExitInfo);
1066 if (fIntercept)
1067 {
1068 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u) -> #VMEXIT\n", u16Port, u16Port));
1069 IEM_SVM_UPDATE_NRIP(pVCpu);
1070 return iemSvmVmexit(pVCpu, SVM_EXIT_IOIO, IoExitInfo.u, pVCpu->cpum.GstCtx.rip + cbInstr);
1071 }
1072
1073 /** @todo remove later (for debugging as VirtualBox always traps all IO
1074 * intercepts). */
1075 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n"));
1076 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1077}
1078
1079
1080/**
1081 * Checks the SVM MSR permission bitmap and performs the \#VMEXIT if the
1082 * corresponding intercept is active.
1083 *
1084 * @returns Strict VBox status code.
1085 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the MSR permission bitmap does not
1086 * specify interception of the accessed MSR @a idMsr.
1087 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
1088 * successfully.
1089 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
1090 * failed and a shutdown needs to be initiated for the guest.
1091 *
1092 * @param pVCpu The cross context virtual CPU structure.
1093 * @param idMsr The MSR being accessed in the nested-guest.
1094 * @param fWrite Whether this is an MSR write access, @c false implies an
1095 * MSR read.
1096 * @param cbInstr The length of the MSR read/write instruction in bytes.
1097 */
1098VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite) RT_NOEXCEPT
1099{
1100 /*
1101 * Check if any MSRs are being intercepted.
1102 */
1103 Assert(CPUMIsGuestSvmCtrlInterceptSet(pVCpu, IEM_GET_CTX(pVCpu), SVM_CTRL_INTERCEPT_MSR_PROT));
1104 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
1105
1106 uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
1107
1108 /*
1109 * Get the byte and bit offset of the permission bits corresponding to the MSR.
1110 */
1111 uint16_t offMsrpm;
1112 uint8_t uMsrpmBit;
1113 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
1114 if (RT_SUCCESS(rc))
1115 {
1116 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
1117 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
1118 if (fWrite)
1119 ++uMsrpmBit;
1120
1121 /*
1122 * Check if the bit is set, if so, trigger a #VMEXIT.
1123 */
1124 if (pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit))
1125 {
1126 IEM_SVM_UPDATE_NRIP(pVCpu);
1127 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
1128 }
1129 }
1130 else
1131 {
1132 /*
1133 * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (nested hypervisor) deal with it.
1134 */
1135 Log(("iemSvmHandleMsrIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool -> #VMEXIT\n", idMsr, fWrite));
1136 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
1137 }
1138 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1139}
1140
1141
1142
1143/**
1144 * Implements 'VMRUN'.
1145 */
1146IEM_CIMPL_DEF_0(iemCImpl_vmrun)
1147{
1148# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1149 RT_NOREF2(pVCpu, cbInstr);
1150 return VINF_EM_RAW_EMULATE_INSTR;
1151# else
1152 LogFlow(("iemCImpl_vmrun\n"));
1153 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmrun);
1154
1155 /** @todo Check effective address size using address size prefix. */
1156 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1157 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1158 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1159 {
1160 Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1161 return iemRaiseGeneralProtectionFault0(pVCpu);
1162 }
1163
1164 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
1165 {
1166 Log(("vmrun: Guest intercept -> #VMEXIT\n"));
1167 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1168 }
1169
1170 VBOXSTRICTRC rcStrict = iemSvmVmrun(pVCpu, cbInstr, GCPhysVmcb);
1171 if (rcStrict == VERR_SVM_VMEXIT_FAILED)
1172 {
1173 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
1174 rcStrict = VINF_EM_TRIPLE_FAULT;
1175 }
1176 return rcStrict;
1177# endif
1178}
1179
1180
1181/**
1182 * Interface for HM and EM to emulate the VMRUN instruction.
1183 *
1184 * @returns Strict VBox status code.
1185 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1186 * @param cbInstr The instruction length in bytes.
1187 * @thread EMT(pVCpu)
1188 */
1189VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
1190{
1191 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1192 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
1193
1194 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1195 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
1196 Assert(!pVCpu->iem.s.cActiveMappings);
1197 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1198}
1199
1200
1201/**
1202 * Implements 'VMLOAD'.
1203 */
1204IEM_CIMPL_DEF_0(iemCImpl_vmload)
1205{
1206# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1207 RT_NOREF2(pVCpu, cbInstr);
1208 return VINF_EM_RAW_EMULATE_INSTR;
1209# else
1210 LogFlow(("iemCImpl_vmload\n"));
1211 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
1212
1213 /** @todo Check effective address size using address size prefix. */
1214 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1215 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1216 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1217 {
1218 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1219 return iemRaiseGeneralProtectionFault0(pVCpu);
1220 }
1221
1222 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
1223 {
1224 Log(("vmload: Guest intercept -> #VMEXIT\n"));
1225 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1226 }
1227
1228 SVMVMCBSTATESAVE VmcbNstGst;
1229 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
1230 sizeof(SVMVMCBSTATESAVE));
1231 if (rcStrict == VINF_SUCCESS)
1232 {
1233 LogFlow(("vmload: Loading VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
1234 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
1235 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
1236 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
1237 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
1238
1239 pVCpu->cpum.GstCtx.msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase;
1240 pVCpu->cpum.GstCtx.msrSTAR = VmcbNstGst.u64STAR;
1241 pVCpu->cpum.GstCtx.msrLSTAR = VmcbNstGst.u64LSTAR;
1242 pVCpu->cpum.GstCtx.msrCSTAR = VmcbNstGst.u64CSTAR;
1243 pVCpu->cpum.GstCtx.msrSFMASK = VmcbNstGst.u64SFMASK;
1244
1245 pVCpu->cpum.GstCtx.SysEnter.cs = VmcbNstGst.u64SysEnterCS;
1246 pVCpu->cpum.GstCtx.SysEnter.esp = VmcbNstGst.u64SysEnterESP;
1247 pVCpu->cpum.GstCtx.SysEnter.eip = VmcbNstGst.u64SysEnterEIP;
1248
1249 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1250 }
1251 return rcStrict;
1252# endif
1253}
1254
1255
1256/**
1257 * Interface for HM and EM to emulate the VMLOAD instruction.
1258 *
1259 * @returns Strict VBox status code.
1260 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1261 * @param cbInstr The instruction length in bytes.
1262 * @thread EMT(pVCpu)
1263 */
1264VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
1265{
1266 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1267
1268 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1269 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
1270 Assert(!pVCpu->iem.s.cActiveMappings);
1271 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1272}
1273
1274
1275/**
1276 * Implements 'VMSAVE'.
1277 */
1278IEM_CIMPL_DEF_0(iemCImpl_vmsave)
1279{
1280# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1281 RT_NOREF2(pVCpu, cbInstr);
1282 return VINF_EM_RAW_EMULATE_INSTR;
1283# else
1284 LogFlow(("iemCImpl_vmsave\n"));
1285 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
1286
1287 /** @todo Check effective address size using address size prefix. */
1288 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1289 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1290 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1291 {
1292 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1293 return iemRaiseGeneralProtectionFault0(pVCpu);
1294 }
1295
1296 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
1297 {
1298 Log(("vmsave: Guest intercept -> #VMEXIT\n"));
1299 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1300 }
1301
1302 SVMVMCBSTATESAVE VmcbNstGst;
1303 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
1304 sizeof(SVMVMCBSTATESAVE));
1305 if (rcStrict == VINF_SUCCESS)
1306 {
1307 LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
1308 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR
1309 | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS);
1310
1311 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
1312 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
1313 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
1314 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
1315
1316 VmcbNstGst.u64KernelGSBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
1317 VmcbNstGst.u64STAR = pVCpu->cpum.GstCtx.msrSTAR;
1318 VmcbNstGst.u64LSTAR = pVCpu->cpum.GstCtx.msrLSTAR;
1319 VmcbNstGst.u64CSTAR = pVCpu->cpum.GstCtx.msrCSTAR;
1320 VmcbNstGst.u64SFMASK = pVCpu->cpum.GstCtx.msrSFMASK;
1321
1322 VmcbNstGst.u64SysEnterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1323 VmcbNstGst.u64SysEnterESP = pVCpu->cpum.GstCtx.SysEnter.esp;
1324 VmcbNstGst.u64SysEnterEIP = pVCpu->cpum.GstCtx.SysEnter.eip;
1325
1326 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest), &VmcbNstGst,
1327 sizeof(SVMVMCBSTATESAVE));
1328 if (rcStrict == VINF_SUCCESS)
1329 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1330 }
1331 return rcStrict;
1332# endif
1333}
1334
1335
1336/**
1337 * Interface for HM and EM to emulate the VMSAVE instruction.
1338 *
1339 * @returns Strict VBox status code.
1340 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1341 * @param cbInstr The instruction length in bytes.
1342 * @thread EMT(pVCpu)
1343 */
1344VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
1345{
1346 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1347
1348 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1349 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
1350 Assert(!pVCpu->iem.s.cActiveMappings);
1351 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1352}
1353
1354
1355/**
1356 * Implements 'CLGI'.
1357 */
1358IEM_CIMPL_DEF_0(iemCImpl_clgi)
1359{
1360# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1361 RT_NOREF2(pVCpu, cbInstr);
1362 return VINF_EM_RAW_EMULATE_INSTR;
1363# else
1364 LogFlow(("iemCImpl_clgi\n"));
1365 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
1366 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
1367 {
1368 Log(("clgi: Guest intercept -> #VMEXIT\n"));
1369 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1370 }
1371
1372 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, false);
1373 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1374
1375# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1376 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
1377# else
1378 return VINF_SUCCESS;
1379# endif
1380# endif
1381}
1382
1383
1384/**
1385 * Interface for HM and EM to emulate the CLGI instruction.
1386 *
1387 * @returns Strict VBox status code.
1388 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1389 * @param cbInstr The instruction length in bytes.
1390 * @thread EMT(pVCpu)
1391 */
1392VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
1393{
1394 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1395
1396 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1397 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
1398 Assert(!pVCpu->iem.s.cActiveMappings);
1399 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1400}
1401
1402
1403/**
1404 * Implements 'STGI'.
1405 */
1406IEM_CIMPL_DEF_0(iemCImpl_stgi)
1407{
1408# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1409 RT_NOREF2(pVCpu, cbInstr);
1410 return VINF_EM_RAW_EMULATE_INSTR;
1411# else
1412 LogFlow(("iemCImpl_stgi\n"));
1413 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
1414 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
1415 {
1416 Log2(("stgi: Guest intercept -> #VMEXIT\n"));
1417 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1418 }
1419
1420 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, true);
1421 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1422
1423# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1424 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
1425# else
1426 return VINF_SUCCESS;
1427# endif
1428# endif
1429}
1430
1431
1432/**
1433 * Interface for HM and EM to emulate the STGI instruction.
1434 *
1435 * @returns Strict VBox status code.
1436 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1437 * @param cbInstr The instruction length in bytes.
1438 * @thread EMT(pVCpu)
1439 */
1440VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
1441{
1442 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1443
1444 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1445 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
1446 Assert(!pVCpu->iem.s.cActiveMappings);
1447 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1448}
1449
1450
1451/**
1452 * Implements 'INVLPGA'.
1453 */
1454IEM_CIMPL_DEF_0(iemCImpl_invlpga)
1455{
1456 /** @todo Check effective address size using address size prefix. */
1457 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1458 /** @todo PGM needs virtual ASID support. */
1459# if 0
1460 uint32_t const uAsid = pVCpu->cpum.GstCtx.ecx;
1461# endif
1462
1463 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
1464 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
1465 {
1466 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
1467 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1468 }
1469
1470 PGMInvalidatePage(pVCpu, GCPtrPage);
1471 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1472 return VINF_SUCCESS;
1473}
1474
1475
1476/**
1477 * Interface for HM and EM to emulate the INVLPGA instruction.
1478 *
1479 * @returns Strict VBox status code.
1480 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1481 * @param cbInstr The instruction length in bytes.
1482 * @thread EMT(pVCpu)
1483 */
1484VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
1485{
1486 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1487
1488 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1489 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
1490 Assert(!pVCpu->iem.s.cActiveMappings);
1491 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1492}
1493
1494
1495/**
1496 * Implements 'SKINIT'.
1497 */
1498IEM_CIMPL_DEF_0(iemCImpl_skinit)
1499{
1500 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
1501
1502 uint32_t uIgnore;
1503 uint32_t fFeaturesECX;
1504 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0 /* iSubLeaf */, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
1505 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
1506 return iemRaiseUndefinedOpcode(pVCpu);
1507
1508 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
1509 {
1510 Log2(("skinit: Guest intercept -> #VMEXIT\n"));
1511 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1512 }
1513
1514 RT_NOREF(cbInstr);
1515 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
1516}
1517
1518
1519/**
1520 * Implements SVM's implementation of PAUSE.
1521 */
1522IEM_CIMPL_DEF_0(iemCImpl_svm_pause)
1523{
1524 bool fCheckIntercept = true;
1525 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter)
1526 {
1527 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
1528
1529 /* TSC based pause-filter thresholding. */
1530 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold
1531 && pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold > 0)
1532 {
1533 uint64_t const uTick = TMCpuTickGet(pVCpu);
1534 if (uTick - pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick > pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold)
1535 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = CPUMGetGuestSvmPauseFilterCount(pVCpu, IEM_GET_CTX(pVCpu));
1536 pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick = uTick;
1537 }
1538
1539 /* Simple pause-filter counter. */
1540 if (pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter > 0)
1541 {
1542 --pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter;
1543 fCheckIntercept = false;
1544 }
1545 }
1546
1547 if (fCheckIntercept)
1548 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0);
1549
1550 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1551 return VINF_SUCCESS;
1552}
1553
1554#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
1555
1556/**
1557 * Common code for iemCImpl_vmmcall and iemCImpl_vmcall (latter in IEMAllCImplVmxInstr.cpp.h).
1558 */
1559IEM_CIMPL_DEF_1(iemCImpl_Hypercall, uint16_t, uDisOpcode)
1560{
1561 if (EMAreHypercallInstructionsEnabled(pVCpu))
1562 {
1563 NOREF(uDisOpcode);
1564 VBOXSTRICTRC rcStrict = GIMHypercallEx(pVCpu, IEM_GET_CTX(pVCpu), uDisOpcode, cbInstr);
1565 if (RT_SUCCESS(rcStrict))
1566 {
1567 if (rcStrict == VINF_SUCCESS)
1568 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1569 if ( rcStrict == VINF_SUCCESS
1570 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
1571 return VINF_SUCCESS;
1572 AssertMsgReturn(rcStrict == VINF_GIM_R3_HYPERCALL, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IEM_IPE_4);
1573 return rcStrict;
1574 }
1575 AssertMsgReturn( rcStrict == VERR_GIM_HYPERCALL_ACCESS_DENIED
1576 || rcStrict == VERR_GIM_HYPERCALLS_NOT_AVAILABLE
1577 || rcStrict == VERR_GIM_NOT_ENABLED
1578 || rcStrict == VERR_GIM_HYPERCALL_MEMORY_READ_FAILED
1579 || rcStrict == VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED,
1580 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IEM_IPE_4);
1581
1582 /* Raise #UD on all failures. */
1583 }
1584 return iemRaiseUndefinedOpcode(pVCpu);
1585}
1586
1587
1588/**
1589 * Implements 'VMMCALL'.
1590 */
1591IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
1592{
1593 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
1594 {
1595 Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
1596 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1597 }
1598
1599 /* This is a little bit more complicated than the VT-x version because HM/SVM may
1600 patch MOV CR8 instructions to speed up APIC.TPR access for 32-bit windows guests. */
1601 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1602 if (VM_IS_HM_ENABLED(pVM))
1603 {
1604 int rc = HMHCMaybeMovTprSvmHypercall(pVM, pVCpu);
1605 if (RT_SUCCESS(rc))
1606 {
1607 Log(("vmmcall: MovTpr\n"));
1608 return VINF_SUCCESS;
1609 }
1610 }
1611
1612 /* Join forces with vmcall. */
1613 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMMCALL);
1614}
1615
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette