VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp@ 19420

Last change on this file since 19420 was 19417, checked in by vboxsync, 15 years ago

Also check VMCPU_FF_REQUEST

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 95.4 KB
Line 
1/* $Id: HWSVMR0.cpp 19417 2009-05-06 09:40:37Z vboxsync $ */
2/** @file
3 * HWACCM SVM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_svm.h>
32#include <VBox/pgm.h>
33#include <VBox/pdm.h>
34#include <VBox/err.h>
35#include <VBox/log.h>
36#include <VBox/selm.h>
37#include <VBox/iom.h>
38#include <VBox/dis.h>
39#include <VBox/dbgf.h>
40#include <VBox/disopcode.h>
41#include <iprt/param.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/cpuset.h>
45#include <iprt/mp.h>
46#include "HWSVMR0.h"
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51static int SVMR0InterpretInvpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID);
52
53/*******************************************************************************
54* Global Variables *
55*******************************************************************************/
56/* IO operation lookup arrays. */
57static uint32_t const g_aIOSize[4] = {1, 2, 0, 4};
58
59/**
60 * Sets up and activates AMD-V on the current CPU
61 *
62 * @returns VBox status code.
63 * @param pCpu CPU info struct
64 * @param pVM The VM to operate on. (can be NULL after a resume!!)
65 * @param pvPageCpu Pointer to the global cpu page
66 * @param pPageCpuPhys Physical address of the global cpu page
67 */
68VMMR0DECL(int) SVMR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
69{
70 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
71 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
72
73 /* We must turn on AMD-V and setup the host state physical address, as those MSRs are per-cpu/core. */
74
75#if defined(LOG_ENABLED) && !defined(DEBUG_bird)
76 SUPR0Printf("SVMR0EnableCpu cpu %d page (%x) %x\n", pCpu->idCpu, pvPageCpu, (uint32_t)pPageCpuPhys);
77#endif
78
79 /* Turn on AMD-V in the EFER MSR. */
80 uint64_t val = ASMRdMsr(MSR_K6_EFER);
81 if (!(val & MSR_K6_EFER_SVME))
82 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
83
84 /* Write the physical page address where the CPU will store the host state while executing the VM. */
85 ASMWrMsr(MSR_K8_VM_HSAVE_PA, pPageCpuPhys);
86
87 return VINF_SUCCESS;
88}
89
90/**
91 * Deactivates AMD-V on the current CPU
92 *
93 * @returns VBox status code.
94 * @param pCpu CPU info struct
95 * @param pvPageCpu Pointer to the global cpu page
96 * @param pPageCpuPhys Physical address of the global cpu page
97 */
98VMMR0DECL(int) SVMR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
99{
100 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
101 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
102
103#if defined(LOG_ENABLED) && !defined(DEBUG_bird)
104 SUPR0Printf("SVMR0DisableCpu cpu %d\n", pCpu->idCpu);
105#endif
106
107 /* Turn off AMD-V in the EFER MSR. */
108 uint64_t val = ASMRdMsr(MSR_K6_EFER);
109 ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
110
111 /* Invalidate host state physical address. */
112 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
113
114 return VINF_SUCCESS;
115}
116
117/**
118 * Does Ring-0 per VM AMD-V init.
119 *
120 * @returns VBox status code.
121 * @param pVM The VM to operate on.
122 */
123VMMR0DECL(int) SVMR0InitVM(PVM pVM)
124{
125 int rc;
126
127 pVM->hwaccm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ;
128 pVM->hwaccm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;
129 pVM->hwaccm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
130
131 /* Allocate one page for the host context */
132 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
133 if (RT_FAILURE(rc))
134 return rc;
135
136 pVM->hwaccm.s.svm.pVMCBHost = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjVMCBHost);
137 pVM->hwaccm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjVMCBHost, 0);
138 ASMMemZeroPage(pVM->hwaccm.s.svm.pVMCBHost);
139
140 /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
141 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, true /* executable R0 mapping */);
142 if (RT_FAILURE(rc))
143 return rc;
144
145 pVM->hwaccm.s.svm.pIOBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjIOBitmap);
146 pVM->hwaccm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjIOBitmap, 0);
147 /* Set all bits to intercept all IO accesses. */
148 ASMMemFill32(pVM->hwaccm.s.svm.pIOBitmap, PAGE_SIZE*3, 0xffffffff);
149
150 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
151 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, true /* executable R0 mapping */);
152 if (RT_FAILURE(rc))
153 return rc;
154
155 pVM->hwaccm.s.svm.pMSRBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjMSRBitmap);
156 pVM->hwaccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjMSRBitmap, 0);
157 /* Set all bits to intercept all MSR accesses. */
158 ASMMemFill32(pVM->hwaccm.s.svm.pMSRBitmap, PAGE_SIZE*2, 0xffffffff);
159
160 /* Erratum 170 which requires a forced TLB flush for each world switch:
161 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
162 *
163 * All BH-G1/2 and DH-G1/2 models include a fix:
164 * Athlon X2: 0x6b 1/2
165 * 0x68 1/2
166 * Athlon 64: 0x7f 1
167 * 0x6f 2
168 * Sempron: 0x7f 1/2
169 * 0x6f 2
170 * 0x6c 2
171 * 0x7c 2
172 * Turion 64: 0x68 2
173 *
174 */
175 uint32_t u32Dummy;
176 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
177 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
178 u32BaseFamily= (u32Version >> 8) & 0xf;
179 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
180 u32Model = ((u32Version >> 4) & 0xf);
181 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
182 u32Stepping = u32Version & 0xf;
183 if ( u32Family == 0xf
184 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
185 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
186 {
187 Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
188 pVM->hwaccm.s.svm.fAlwaysFlushTLB = true;
189 }
190
191 /* Allocate VMCBs for all guest CPUs. */
192 for (unsigned i=0;i<pVM->cCPUs;i++)
193 {
194 pVM->aCpus[i].hwaccm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ;
195
196 /* Allocate one page for the VM control block (VMCB). */
197 rc = RTR0MemObjAllocCont(&pVM->aCpus[i].hwaccm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
198 if (RT_FAILURE(rc))
199 return rc;
200
201 pVM->aCpus[i].hwaccm.s.svm.pVMCB = RTR0MemObjAddress(pVM->aCpus[i].hwaccm.s.svm.pMemObjVMCB);
202 pVM->aCpus[i].hwaccm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVM->aCpus[i].hwaccm.s.svm.pMemObjVMCB, 0);
203 ASMMemZeroPage(pVM->aCpus[i].hwaccm.s.svm.pVMCB);
204 }
205
206 return VINF_SUCCESS;
207}
208
209/**
210 * Does Ring-0 per VM AMD-V termination.
211 *
212 * @returns VBox status code.
213 * @param pVM The VM to operate on.
214 */
215VMMR0DECL(int) SVMR0TermVM(PVM pVM)
216{
217 for (unsigned i=0;i<pVM->cCPUs;i++)
218 {
219 if (pVM->aCpus[i].hwaccm.s.svm.pMemObjVMCB != NIL_RTR0MEMOBJ)
220 {
221 RTR0MemObjFree(pVM->aCpus[i].hwaccm.s.svm.pMemObjVMCB, false);
222 pVM->aCpus[i].hwaccm.s.svm.pVMCB = 0;
223 pVM->aCpus[i].hwaccm.s.svm.pVMCBPhys = 0;
224 pVM->aCpus[i].hwaccm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ;
225 }
226 }
227 if (pVM->hwaccm.s.svm.pMemObjVMCBHost != NIL_RTR0MEMOBJ)
228 {
229 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjVMCBHost, false);
230 pVM->hwaccm.s.svm.pVMCBHost = 0;
231 pVM->hwaccm.s.svm.pVMCBHostPhys = 0;
232 pVM->hwaccm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ;
233 }
234 if (pVM->hwaccm.s.svm.pMemObjIOBitmap != NIL_RTR0MEMOBJ)
235 {
236 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjIOBitmap, false);
237 pVM->hwaccm.s.svm.pIOBitmap = 0;
238 pVM->hwaccm.s.svm.pIOBitmapPhys = 0;
239 pVM->hwaccm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;
240 }
241 if (pVM->hwaccm.s.svm.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
242 {
243 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjMSRBitmap, false);
244 pVM->hwaccm.s.svm.pMSRBitmap = 0;
245 pVM->hwaccm.s.svm.pMSRBitmapPhys = 0;
246 pVM->hwaccm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
247 }
248 return VINF_SUCCESS;
249}
250
251/**
252 * Sets up AMD-V for the specified VM
253 *
254 * @returns VBox status code.
255 * @param pVM The VM to operate on.
256 */
257VMMR0DECL(int) SVMR0SetupVM(PVM pVM)
258{
259 int rc = VINF_SUCCESS;
260 SVM_VMCB *pVMCB;
261
262 AssertReturn(pVM, VERR_INVALID_PARAMETER);
263
264 Assert(pVM->hwaccm.s.svm.fSupported);
265
266 for (unsigned i=0;i<pVM->cCPUs;i++)
267 {
268 pVMCB = (SVM_VMCB *)pVM->aCpus[i].hwaccm.s.svm.pVMCB;
269 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
270
271 /* Program the control fields. Most of them never have to be changed again. */
272 /* CR0/3/4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
273 /* Note: CR0 & CR4 can be safely read when guest and shadow copies are identical. */
274 if (!pVM->hwaccm.s.fNestedPaging)
275 pVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(3) | RT_BIT(4);
276 else
277 pVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
278
279 /*
280 * CR0/3/4 writes must be intercepted for obvious reasons.
281 */
282 if (!pVM->hwaccm.s.fNestedPaging)
283 pVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(3) | RT_BIT(4);
284 else
285 pVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4) | RT_BIT(8);
286
287 /* Intercept all DRx reads and writes by default. Changed later on. */
288 pVMCB->ctrl.u16InterceptRdDRx = 0xFFFF;
289 pVMCB->ctrl.u16InterceptWrDRx = 0xFFFF;
290
291 /* Currently we don't care about DRx reads or writes. DRx registers are trashed.
292 * All breakpoints are automatically cleared when the VM exits.
293 */
294
295 pVMCB->ctrl.u32InterceptException = HWACCM_SVM_TRAP_MASK;
296#ifndef DEBUG
297 if (pVM->hwaccm.s.fNestedPaging)
298 pVMCB->ctrl.u32InterceptException &= ~RT_BIT(X86_XCPT_PF); /* no longer need to intercept #PF. */
299#endif
300
301 pVMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR
302 | SVM_CTRL1_INTERCEPT_VINTR
303 | SVM_CTRL1_INTERCEPT_NMI
304 | SVM_CTRL1_INTERCEPT_SMI
305 | SVM_CTRL1_INTERCEPT_INIT
306 | SVM_CTRL1_INTERCEPT_RDPMC
307 | SVM_CTRL1_INTERCEPT_CPUID
308 | SVM_CTRL1_INTERCEPT_RSM
309 | SVM_CTRL1_INTERCEPT_HLT
310 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP
311 | SVM_CTRL1_INTERCEPT_MSR_SHADOW
312 | SVM_CTRL1_INTERCEPT_INVLPG
313 | SVM_CTRL1_INTERCEPT_INVLPGA /* AMD only */
314 | SVM_CTRL1_INTERCEPT_TASK_SWITCH
315 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* fatal */
316 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Legacy FPU FERR handling. */
317 ;
318 /* With nested paging we don't care about invlpg anymore. */
319 if (pVM->hwaccm.s.fNestedPaging)
320 pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_INVLPG;
321
322 pVMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */
323 | SVM_CTRL2_INTERCEPT_VMMCALL
324 | SVM_CTRL2_INTERCEPT_VMLOAD
325 | SVM_CTRL2_INTERCEPT_VMSAVE
326 | SVM_CTRL2_INTERCEPT_STGI
327 | SVM_CTRL2_INTERCEPT_CLGI
328 | SVM_CTRL2_INTERCEPT_SKINIT
329 | SVM_CTRL2_INTERCEPT_WBINVD
330 | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
331 ;
332 Log(("pVMCB->ctrl.u32InterceptException = %x\n", pVMCB->ctrl.u32InterceptException));
333 Log(("pVMCB->ctrl.u32InterceptCtrl1 = %x\n", pVMCB->ctrl.u32InterceptCtrl1));
334 Log(("pVMCB->ctrl.u32InterceptCtrl2 = %x\n", pVMCB->ctrl.u32InterceptCtrl2));
335
336 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
337 pVMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1;
338 /* Ignore the priority in the TPR; just deliver it when we tell it to. */
339 pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 1;
340
341 /* Set IO and MSR bitmap addresses. */
342 pVMCB->ctrl.u64IOPMPhysAddr = pVM->hwaccm.s.svm.pIOBitmapPhys;
343 pVMCB->ctrl.u64MSRPMPhysAddr = pVM->hwaccm.s.svm.pMSRBitmapPhys;
344
345 /* No LBR virtualization. */
346 pVMCB->ctrl.u64LBRVirt = 0;
347
348 /** The ASID must start at 1; the host uses 0. */
349 pVMCB->ctrl.TLBCtrl.n.u32ASID = 1;
350
351 /** Setup the PAT msr (nested paging only) */
352 pVMCB->guest.u64GPAT = 0x0007040600070406ULL;
353 }
354 return rc;
355}
356
357
358/**
359 * Injects an event (trap or external interrupt)
360 *
361 * @param pVM The VM to operate on.
362 * @param pVMCB SVM control block
363 * @param pCtx CPU Context
364 * @param pIntInfo SVM interrupt info
365 */
366inline void SVMR0InjectEvent(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx, SVM_EVENT* pEvent)
367{
368#ifdef VBOX_STRICT
369 if (pEvent->n.u8Vector == 0xE)
370 Log(("SVM: Inject int %d at %RGv error code=%02x CR2=%RGv intInfo=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode, (RTGCPTR)pCtx->cr2, pEvent->au64[0]));
371 else
372 if (pEvent->n.u8Vector < 0x20)
373 Log(("SVM: Inject int %d at %RGv error code=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode));
374 else
375 {
376 Log(("INJ-EI: %x at %RGv\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip));
377 Assert(!VMCPU_FF_ISSET(VMMGetCpu(pVM), VMCPU_FF_INHIBIT_INTERRUPTS));
378 Assert(pCtx->eflags.u32 & X86_EFL_IF);
379 }
380#endif
381
382 /* Set event injection state. */
383 pVMCB->ctrl.EventInject.au64[0] = pEvent->au64[0];
384}
385
386
387/**
388 * Checks for pending guest interrupts and injects them
389 *
390 * @returns VBox status code.
391 * @param pVM The VM to operate on.
392 * @param pVCpu The VM CPU to operate on.
393 * @param pVMCB SVM control block
394 * @param pCtx CPU Context
395 */
396static int SVMR0CheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *pVMCB, CPUMCTX *pCtx)
397{
398 int rc;
399
400 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
401 if (pVCpu->hwaccm.s.Event.fPending)
402 {
403 SVM_EVENT Event;
404
405 Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip));
406 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntReinject);
407 Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
408 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
409
410 pVCpu->hwaccm.s.Event.fPending = false;
411 return VINF_SUCCESS;
412 }
413
414 if (pVM->hwaccm.s.fInjectNMI)
415 {
416 SVM_EVENT Event;
417
418 Event.n.u8Vector = X86_XCPT_NMI;
419 Event.n.u1Valid = 1;
420 Event.n.u32ErrorCode = 0;
421 Event.n.u3Type = SVM_EVENT_NMI;
422
423 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
424 pVM->hwaccm.s.fInjectNMI = false;
425 return VINF_SUCCESS;
426 }
427
428 /* When external interrupts are pending, we should exit the VM when IF is set. */
429 if ( !TRPMHasTrap(pVCpu)
430 && VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
431 {
432 if ( !(pCtx->eflags.u32 & X86_EFL_IF)
433 || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
434 {
435 if (!pVMCB->ctrl.IntCtrl.n.u1VIrqValid)
436 {
437 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
438 LogFlow(("Enable irq window exit!\n"));
439 else
440 Log(("Pending interrupt blocked at %RGv by VM_FF_INHIBIT_INTERRUPTS -> irq window exit\n", (RTGCPTR)pCtx->rip));
441
442 /** @todo use virtual interrupt method to inject a pending irq; dispatched as soon as guest.IF is set. */
443 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
444 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 1;
445 pVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */
446 }
447 }
448 else
449 {
450 uint8_t u8Interrupt;
451
452 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
453 Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
454 if (RT_SUCCESS(rc))
455 {
456 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
457 AssertRC(rc);
458 }
459 else
460 {
461 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
462 Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
463 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchGuestIrq);
464 /* Just continue */
465 }
466 }
467 }
468
469#ifdef VBOX_STRICT
470 if (TRPMHasTrap(pVCpu))
471 {
472 uint8_t u8Vector;
473 rc = TRPMQueryTrapAll(pVCpu, &u8Vector, 0, 0, 0);
474 AssertRC(rc);
475 }
476#endif
477
478 if ( pCtx->eflags.u32 & X86_EFL_IF
479 && (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
480 && TRPMHasTrap(pVCpu)
481 )
482 {
483 uint8_t u8Vector;
484 int rc;
485 TRPMEVENT enmType;
486 SVM_EVENT Event;
487 RTGCUINT u32ErrorCode;
488
489 Event.au64[0] = 0;
490
491 /* If a new event is pending, then dispatch it now. */
492 rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmType, &u32ErrorCode, 0);
493 AssertRC(rc);
494 Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
495 Assert(enmType != TRPM_SOFTWARE_INT);
496
497 /* Clear the pending trap. */
498 rc = TRPMResetTrap(pVCpu);
499 AssertRC(rc);
500
501 Event.n.u8Vector = u8Vector;
502 Event.n.u1Valid = 1;
503 Event.n.u32ErrorCode = u32ErrorCode;
504
505 if (enmType == TRPM_TRAP)
506 {
507 switch (u8Vector) {
508 case 8:
509 case 10:
510 case 11:
511 case 12:
512 case 13:
513 case 14:
514 case 17:
515 /* Valid error codes. */
516 Event.n.u1ErrorCodeValid = 1;
517 break;
518 default:
519 break;
520 }
521 if (u8Vector == X86_XCPT_NMI)
522 Event.n.u3Type = SVM_EVENT_NMI;
523 else
524 Event.n.u3Type = SVM_EVENT_EXCEPTION;
525 }
526 else
527 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
528
529 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntInject);
530 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
531 } /* if (interrupts can be dispatched) */
532
533 return VINF_SUCCESS;
534}
535
536/**
537 * Save the host state
538 *
539 * @returns VBox status code.
540 * @param pVM The VM to operate on.
541 * @param pVCpu The VM CPU to operate on.
542 */
543VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
544{
545 NOREF(pVM);
546 NOREF(pVCpu);
547 /* Nothing to do here. */
548 return VINF_SUCCESS;
549}
550
551/**
552 * Loads the guest state
553 *
554 * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!!
555 *
556 * @returns VBox status code.
557 * @param pVM The VM to operate on.
558 * @param pVCpu The VM CPU to operate on.
559 * @param pCtx Guest context
560 */
561VMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
562{
563 RTGCUINTPTR val;
564 SVM_VMCB *pVMCB;
565
566 if (pVM == NULL)
567 return VERR_INVALID_PARAMETER;
568
569 /* Setup AMD SVM. */
570 Assert(pVM->hwaccm.s.svm.fSupported);
571
572 pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
573 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
574
575 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
576 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
577 {
578 SVM_WRITE_SELREG(CS, cs);
579 SVM_WRITE_SELREG(SS, ss);
580 SVM_WRITE_SELREG(DS, ds);
581 SVM_WRITE_SELREG(ES, es);
582 SVM_WRITE_SELREG(FS, fs);
583 SVM_WRITE_SELREG(GS, gs);
584 }
585
586 /* Guest CPU context: LDTR. */
587 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
588 {
589 SVM_WRITE_SELREG(LDTR, ldtr);
590 }
591
592 /* Guest CPU context: TR. */
593 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
594 {
595 SVM_WRITE_SELREG(TR, tr);
596 }
597
598 /* Guest CPU context: GDTR. */
599 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
600 {
601 pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
602 pVMCB->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
603 }
604
605 /* Guest CPU context: IDTR. */
606 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
607 {
608 pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
609 pVMCB->guest.IDTR.u64Base = pCtx->idtr.pIdt;
610 }
611
612 /*
613 * Sysenter MSRs (unconditional)
614 */
615 pVMCB->guest.u64SysEnterCS = pCtx->SysEnter.cs;
616 pVMCB->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
617 pVMCB->guest.u64SysEnterESP = pCtx->SysEnter.esp;
618
619 /* Control registers */
620 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
621 {
622 val = pCtx->cr0;
623 if (!CPUMIsGuestFPUStateActive(pVCpu))
624 {
625 /* Always use #NM exceptions to load the FPU/XMM state on demand. */
626 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
627 }
628 else
629 {
630 /** @todo check if we support the old style mess correctly. */
631 if (!(val & X86_CR0_NE))
632 {
633 Log(("Forcing X86_CR0_NE!!!\n"));
634
635 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
636 if (!pVCpu->hwaccm.s.fFPUOldStyleOverride)
637 {
638 pVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);
639 pVCpu->hwaccm.s.fFPUOldStyleOverride = true;
640 }
641 }
642 val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
643 }
644 /* Always enable caching. */
645 val &= ~(X86_CR0_CD|X86_CR0_NW);
646
647 /* Note: WP is not relevant in nested paging mode as we catch accesses on the (guest) physical level. */
648 /* Note: In nested paging mode the guest is allowed to run with paging disabled; the guest physical to host physical translation will remain active. */
649 if (!pVM->hwaccm.s.fNestedPaging)
650 {
651 val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
652 val |= X86_CR0_WP; /* Must set this as we rely on protect various pages and supervisor writes must be caught. */
653 }
654 pVMCB->guest.u64CR0 = val;
655 }
656 /* CR2 as well */
657 pVMCB->guest.u64CR2 = pCtx->cr2;
658
659 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
660 {
661 /* Save our shadow CR3 register. */
662 if (pVM->hwaccm.s.fNestedPaging)
663 {
664 PGMMODE enmShwPagingMode;
665
666#if HC_ARCH_BITS == 32
667 if (CPUMIsGuestInLongModeEx(pCtx))
668 enmShwPagingMode = PGMMODE_AMD64_NX;
669 else
670#endif
671 enmShwPagingMode = PGMGetHostMode(pVM);
672
673 pVMCB->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
674 Assert(pVMCB->ctrl.u64NestedPagingCR3);
675 pVMCB->guest.u64CR3 = pCtx->cr3;
676 }
677 else
678 {
679 pVMCB->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
680 Assert(pVMCB->guest.u64CR3 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
681 }
682 }
683
684 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
685 {
686 val = pCtx->cr4;
687 if (!pVM->hwaccm.s.fNestedPaging)
688 {
689 switch(pVCpu->hwaccm.s.enmShadowMode)
690 {
691 case PGMMODE_REAL:
692 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
693 AssertFailed();
694 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
695
696 case PGMMODE_32_BIT: /* 32-bit paging. */
697 val &= ~X86_CR4_PAE;
698 break;
699
700 case PGMMODE_PAE: /* PAE paging. */
701 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
702 /** @todo use normal 32 bits paging */
703 val |= X86_CR4_PAE;
704 break;
705
706 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
707 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
708#ifdef VBOX_ENABLE_64_BITS_GUESTS
709 break;
710#else
711 AssertFailed();
712 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
713#endif
714
715 default: /* shut up gcc */
716 AssertFailed();
717 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
718 }
719 }
720 pVMCB->guest.u64CR4 = val;
721 }
722
723 /* Debug registers. */
724 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
725 {
726 pCtx->dr[6] |= X86_DR6_INIT_VAL; /* set all reserved bits to 1. */
727 pCtx->dr[6] &= ~RT_BIT(12); /* must be zero. */
728
729 pCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
730 pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
731 pCtx->dr[7] |= 0x400; /* must be one */
732
733 pVMCB->guest.u64DR7 = pCtx->dr[7];
734 pVMCB->guest.u64DR6 = pCtx->dr[6];
735
736 /* Sync the debug state now if any breakpoint is armed. */
737 if ( (pCtx->dr[7] & (X86_DR7_ENABLED_MASK|X86_DR7_GD))
738 && !CPUMIsGuestDebugStateActive(pVCpu)
739 && !DBGFIsStepping(pVCpu))
740 {
741 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxArmed);
742
743 /* Disable drx move intercepts. */
744 pVMCB->ctrl.u16InterceptRdDRx = 0;
745 pVMCB->ctrl.u16InterceptWrDRx = 0;
746
747 /* Save the host and load the guest debug state. */
748 int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, false /* exclude DR6 */);
749 AssertRC(rc);
750 }
751 }
752
753 /* EIP, ESP and EFLAGS */
754 pVMCB->guest.u64RIP = pCtx->rip;
755 pVMCB->guest.u64RSP = pCtx->rsp;
756 pVMCB->guest.u64RFlags = pCtx->eflags.u32;
757
758 /* Set CPL */
759 pVMCB->guest.u8CPL = pCtx->csHid.Attr.n.u2Dpl;
760
761 /* RAX/EAX too, as VMRUN uses RAX as an implicit parameter. */
762 pVMCB->guest.u64RAX = pCtx->rax;
763
764 /* vmrun will fail without MSR_K6_EFER_SVME. */
765 pVMCB->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
766
767 /* 64 bits guest mode? */
768 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
769 {
770#if !defined(VBOX_ENABLE_64_BITS_GUESTS)
771 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
772#elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
773 pVCpu->hwaccm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
774#else
775# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
776 if (!pVM->hwaccm.s.fAllow64BitGuests)
777 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
778# endif
779 pVCpu->hwaccm.s.svm.pfnVMRun = SVMR0VMRun64;
780#endif
781 /* Unconditionally update these as wrmsr might have changed them. (HWACCM_CHANGED_GUEST_SEGMENT_REGS will not be set) */
782 pVMCB->guest.FS.u64Base = pCtx->fsHid.u64Base;
783 pVMCB->guest.GS.u64Base = pCtx->gsHid.u64Base;
784 }
785 else
786 {
787 /* Filter out the MSR_K6_LME bit or else AMD-V expects amd64 shadow paging. */
788 pVMCB->guest.u64EFER &= ~MSR_K6_EFER_LME;
789
790 pVCpu->hwaccm.s.svm.pfnVMRun = SVMR0VMRun;
791 }
792
793 /* TSC offset. */
794 if (TMCpuTickCanUseRealTSC(pVCpu, &pVMCB->ctrl.u64TSCOffset))
795 {
796 pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
797 pVMCB->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
798 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCOffset);
799 }
800 else
801 {
802 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
803 pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
804 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCIntercept);
805 }
806
807 /* Sync the various msrs for 64 bits mode. */
808 pVMCB->guest.u64STAR = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */
809 pVMCB->guest.u64LSTAR = pCtx->msrLSTAR; /* 64 bits mode syscall rip */
810 pVMCB->guest.u64CSTAR = pCtx->msrCSTAR; /* compatibility mode syscall rip */
811 pVMCB->guest.u64SFMASK = pCtx->msrSFMASK; /* syscall flag mask */
812 pVMCB->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; /* swapgs exchange value */
813
814#ifdef DEBUG
815 /* Intercept X86_XCPT_DB if stepping is enabled */
816 if (DBGFIsStepping(pVCpu))
817 pVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_DB);
818 else
819 pVMCB->ctrl.u32InterceptException &= ~RT_BIT(X86_XCPT_DB);
820#endif
821
822 /* Done. */
823 pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
824
825 return VINF_SUCCESS;
826}
827
828
829/**
830 * Runs guest code in an AMD-V VM.
831 *
832 * @returns VBox status code.
833 * @param pVM The VM to operate on.
834 * @param pVCpu The VM CPU to operate on.
835 * @param pCtx Guest context
836 */
837VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
838{
839 int rc = VINF_SUCCESS;
840 uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID;
841 SVM_VMCB *pVMCB;
842 bool fSyncTPR = false;
843 unsigned cResume = 0;
844 uint8_t u8LastVTPR;
845 PHWACCM_CPUINFO pCpu = 0;
846#ifdef VBOX_STRICT
847 RTCPUID idCpuCheck;
848#endif
849
850 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatEntry, x);
851
852 pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
853 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
854
855 /* We can jump to this point to resume execution after determining that a VM-exit is innocent.
856 */
857ResumeExecution:
858 Assert(!HWACCMR0SuspendPending());
859
860 /* Safety precaution; looping for too long here can have a very bad effect on the host */
861 if (++cResume > HWACCM_MAX_RESUME_LOOPS)
862 {
863 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume);
864 rc = VINF_EM_RAW_INTERRUPT;
865 goto end;
866 }
867
868 /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
869 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
870 {
871 Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
872 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
873 {
874 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
875 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
876 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
877 * break the guest. Sounds very unlikely, but such timing sensitive problems are not as rare as you might think.
878 */
879 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
880 /* Irq inhibition is no longer active; clear the corresponding SVM state. */
881 pVMCB->ctrl.u64IntShadow = 0;
882 }
883 }
884 else
885 {
886 /* Irq inhibition is no longer active; clear the corresponding SVM state. */
887 pVMCB->ctrl.u64IntShadow = 0;
888 }
889
890 /* Check for pending actions that force us to go back to ring 3. */
891#ifdef DEBUG
892 /* Intercept X86_XCPT_DB if stepping is enabled */
893 if (!DBGFIsStepping(pVCpu))
894#endif
895 {
896 if ( VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK)
897 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK))
898 {
899 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
900 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchToR3);
901 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
902 rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
903 goto end;
904 }
905 }
906
907 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
908 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST)
909 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
910 {
911 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
912 rc = VINF_EM_PENDING_REQUEST;
913 goto end;
914 }
915
916 /* When external interrupts are pending, we should exit the VM when IF is set. */
917 /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */
918 rc = SVMR0CheckPendingInterrupt(pVM, pVCpu, pVMCB, pCtx);
919 if (RT_FAILURE(rc))
920 {
921 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
922 goto end;
923 }
924
925 /* TPR caching using CR8 is only available in 64 bits mode */
926 /* Note the 32 bits exception for AMD (X86_CPUID_AMD_FEATURE_ECX_CR8L), but that appears missing in Intel CPUs */
927 /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! */
928 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
929 {
930 bool fPending;
931
932 /* TPR caching in CR8 */
933 int rc = PDMApicGetTPR(pVM, &u8LastVTPR, &fPending);
934 AssertRC(rc);
935 pVMCB->ctrl.IntCtrl.n.u8VTPR = u8LastVTPR;
936
937 if (fPending)
938 {
939 /* A TPR change could activate a pending interrupt, so catch cr8 writes. */
940 pVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(8);
941 }
942 else
943 /* No interrupts are pending, so we don't need to be explicitely notified.
944 * There are enough world switches for detecting pending interrupts.
945 */
946 pVMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
947
948 fSyncTPR = !fPending;
949 }
950
951 /* All done! Let's start VM execution. */
952 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatInGC, x);
953
954 /* Enable nested paging if necessary (disabled each time after #VMEXIT). */
955 pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hwaccm.s.fNestedPaging;
956
957#ifdef LOG_ENABLED
958 pCpu = HWACCMR0GetCurrentCpu();
959 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
960 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
961 {
962 if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu)
963 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu));
964 else
965 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
966 }
967 if (pCpu->fFlushTLB)
968 Log(("Force TLB flush: first time cpu %d is used -> flush\n", pCpu->idCpu));
969#endif
970
971 /*
972 * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3!
973 * (until the actual world switch)
974 */
975
976#ifdef VBOX_STRICT
977 idCpuCheck = RTMpCpuId();
978#endif
979
980 /* Load the guest state; *must* be here as it sets up the shadow cr0 for lazy fpu syncing! */
981 rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx);
982 if (rc != VINF_SUCCESS)
983 {
984 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
985 goto end;
986 }
987
988 pCpu = HWACCMR0GetCurrentCpu();
989 /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
990 /* Note that this can happen both for start and resume due to long jumps back to ring 3. */
991 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
992 /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
993 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
994 {
995 /* Force a TLB flush on VM entry. */
996 pVCpu->hwaccm.s.fForceTLBFlush = true;
997 }
998 else
999 Assert(!pCpu->fFlushTLB || pVM->hwaccm.s.svm.fAlwaysFlushTLB);
1000
1001 pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
1002
1003 /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */
1004 if ( pVCpu->hwaccm.s.fForceTLBFlush
1005 && !pVM->hwaccm.s.svm.fAlwaysFlushTLB)
1006 {
1007 if ( ++pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID
1008 || pCpu->fFlushTLB)
1009 {
1010 pCpu->fFlushTLB = false;
1011 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */
1012 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = 1; /* wrap around; flush TLB */
1013 pCpu->cTLBFlushes++;
1014 }
1015 else
1016 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
1017
1018 pVCpu->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes;
1019 pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
1020 }
1021 else
1022 {
1023 Assert(!pCpu->fFlushTLB || pVM->hwaccm.s.svm.fAlwaysFlushTLB);
1024
1025 /* We never increase uCurrentASID in the fAlwaysFlushTLB (erratum 170) case. */
1026 if (!pCpu->uCurrentASID || !pVCpu->hwaccm.s.uCurrentASID)
1027 pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1;
1028
1029 Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pVCpu->hwaccm.s.fForceTLBFlush);
1030 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVCpu->hwaccm.s.fForceTLBFlush;
1031
1032 if ( !pVM->hwaccm.s.svm.fAlwaysFlushTLB
1033 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1034 {
1035 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
1036 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
1037 for (unsigned i=0;i<pVCpu->hwaccm.s.cTlbShootdownPages;i++)
1038 SVMR0InvlpgA(pVCpu->hwaccm.s.aTlbShootdownPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID);
1039 }
1040 }
1041 pVCpu->hwaccm.s.cTlbShootdownPages = 0;
1042 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1043
1044 AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
1045 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
1046 AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
1047 pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hwaccm.s.uCurrentASID;
1048
1049#ifdef VBOX_WITH_STATISTICS
1050 if (pVMCB->ctrl.TLBCtrl.n.u1TLBFlush)
1051 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
1052 else
1053 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
1054#endif
1055
1056 /* In case we execute a goto ResumeExecution later on. */
1057 pVCpu->hwaccm.s.fResumeVM = true;
1058 pVCpu->hwaccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;
1059
1060 Assert(sizeof(pVCpu->hwaccm.s.svm.pVMCBPhys) == 8);
1061 Assert(pVMCB->ctrl.IntCtrl.n.u1VIrqMasking);
1062 Assert(pVMCB->ctrl.u64IOPMPhysAddr == pVM->hwaccm.s.svm.pIOBitmapPhys);
1063 Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVM->hwaccm.s.svm.pMSRBitmapPhys);
1064 Assert(pVMCB->ctrl.u64LBRVirt == 0);
1065
1066#ifdef VBOX_STRICT
1067 Assert(idCpuCheck == RTMpCpuId());
1068#endif
1069 TMNotifyStartOfExecution(pVCpu);
1070 pVCpu->hwaccm.s.svm.pfnVMRun(pVM->hwaccm.s.svm.pVMCBHostPhys, pVCpu->hwaccm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu);
1071 TMNotifyEndOfExecution(pVCpu);
1072 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatInGC, x);
1073
1074 /*
1075 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1076 * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING 3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
1077 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1078 */
1079
1080 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatExit1, x);
1081
1082 /* Reason for the VM exit */
1083 exitCode = pVMCB->ctrl.u64ExitCode;
1084
1085 if (exitCode == (uint64_t)SVM_EXIT_INVALID) /* Invalid guest state. */
1086 {
1087 HWACCMDumpRegs(pVM, pVCpu, pCtx);
1088#ifdef DEBUG
1089 Log(("ctrl.u16InterceptRdCRx %x\n", pVMCB->ctrl.u16InterceptRdCRx));
1090 Log(("ctrl.u16InterceptWrCRx %x\n", pVMCB->ctrl.u16InterceptWrCRx));
1091 Log(("ctrl.u16InterceptRdDRx %x\n", pVMCB->ctrl.u16InterceptRdDRx));
1092 Log(("ctrl.u16InterceptWrDRx %x\n", pVMCB->ctrl.u16InterceptWrDRx));
1093 Log(("ctrl.u32InterceptException %x\n", pVMCB->ctrl.u32InterceptException));
1094 Log(("ctrl.u32InterceptCtrl1 %x\n", pVMCB->ctrl.u32InterceptCtrl1));
1095 Log(("ctrl.u32InterceptCtrl2 %x\n", pVMCB->ctrl.u32InterceptCtrl2));
1096 Log(("ctrl.u64IOPMPhysAddr %RX64\n", pVMCB->ctrl.u64IOPMPhysAddr));
1097 Log(("ctrl.u64MSRPMPhysAddr %RX64\n", pVMCB->ctrl.u64MSRPMPhysAddr));
1098 Log(("ctrl.u64TSCOffset %RX64\n", pVMCB->ctrl.u64TSCOffset));
1099
1100 Log(("ctrl.TLBCtrl.u32ASID %x\n", pVMCB->ctrl.TLBCtrl.n.u32ASID));
1101 Log(("ctrl.TLBCtrl.u1TLBFlush %x\n", pVMCB->ctrl.TLBCtrl.n.u1TLBFlush));
1102 Log(("ctrl.TLBCtrl.u7Reserved %x\n", pVMCB->ctrl.TLBCtrl.n.u7Reserved));
1103 Log(("ctrl.TLBCtrl.u24Reserved %x\n", pVMCB->ctrl.TLBCtrl.n.u24Reserved));
1104
1105 Log(("ctrl.IntCtrl.u8VTPR %x\n", pVMCB->ctrl.IntCtrl.n.u8VTPR));
1106 Log(("ctrl.IntCtrl.u1VIrqValid %x\n", pVMCB->ctrl.IntCtrl.n.u1VIrqValid));
1107 Log(("ctrl.IntCtrl.u7Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u7Reserved));
1108 Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", pVMCB->ctrl.IntCtrl.n.u4VIrqPriority));
1109 Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR));
1110 Log(("ctrl.IntCtrl.u3Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u3Reserved));
1111 Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", pVMCB->ctrl.IntCtrl.n.u1VIrqMasking));
1112 Log(("ctrl.IntCtrl.u7Reserved2 %x\n", pVMCB->ctrl.IntCtrl.n.u7Reserved2));
1113 Log(("ctrl.IntCtrl.u8VIrqVector %x\n", pVMCB->ctrl.IntCtrl.n.u8VIrqVector));
1114 Log(("ctrl.IntCtrl.u24Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u24Reserved));
1115
1116 Log(("ctrl.u64IntShadow %RX64\n", pVMCB->ctrl.u64IntShadow));
1117 Log(("ctrl.u64ExitCode %RX64\n", pVMCB->ctrl.u64ExitCode));
1118 Log(("ctrl.u64ExitInfo1 %RX64\n", pVMCB->ctrl.u64ExitInfo1));
1119 Log(("ctrl.u64ExitInfo2 %RX64\n", pVMCB->ctrl.u64ExitInfo2));
1120 Log(("ctrl.ExitIntInfo.u8Vector %x\n", pVMCB->ctrl.ExitIntInfo.n.u8Vector));
1121 Log(("ctrl.ExitIntInfo.u3Type %x\n", pVMCB->ctrl.ExitIntInfo.n.u3Type));
1122 Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
1123 Log(("ctrl.ExitIntInfo.u19Reserved %x\n", pVMCB->ctrl.ExitIntInfo.n.u19Reserved));
1124 Log(("ctrl.ExitIntInfo.u1Valid %x\n", pVMCB->ctrl.ExitIntInfo.n.u1Valid));
1125 Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode));
1126 Log(("ctrl.NestedPaging %RX64\n", pVMCB->ctrl.NestedPaging.au64));
1127 Log(("ctrl.EventInject.u8Vector %x\n", pVMCB->ctrl.EventInject.n.u8Vector));
1128 Log(("ctrl.EventInject.u3Type %x\n", pVMCB->ctrl.EventInject.n.u3Type));
1129 Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", pVMCB->ctrl.EventInject.n.u1ErrorCodeValid));
1130 Log(("ctrl.EventInject.u19Reserved %x\n", pVMCB->ctrl.EventInject.n.u19Reserved));
1131 Log(("ctrl.EventInject.u1Valid %x\n", pVMCB->ctrl.EventInject.n.u1Valid));
1132 Log(("ctrl.EventInject.u32ErrorCode %x\n", pVMCB->ctrl.EventInject.n.u32ErrorCode));
1133
1134 Log(("ctrl.u64NestedPagingCR3 %RX64\n", pVMCB->ctrl.u64NestedPagingCR3));
1135 Log(("ctrl.u64LBRVirt %RX64\n", pVMCB->ctrl.u64LBRVirt));
1136
1137 Log(("guest.CS.u16Sel %04X\n", pVMCB->guest.CS.u16Sel));
1138 Log(("guest.CS.u16Attr %04X\n", pVMCB->guest.CS.u16Attr));
1139 Log(("guest.CS.u32Limit %X\n", pVMCB->guest.CS.u32Limit));
1140 Log(("guest.CS.u64Base %RX64\n", pVMCB->guest.CS.u64Base));
1141 Log(("guest.DS.u16Sel %04X\n", pVMCB->guest.DS.u16Sel));
1142 Log(("guest.DS.u16Attr %04X\n", pVMCB->guest.DS.u16Attr));
1143 Log(("guest.DS.u32Limit %X\n", pVMCB->guest.DS.u32Limit));
1144 Log(("guest.DS.u64Base %RX64\n", pVMCB->guest.DS.u64Base));
1145 Log(("guest.ES.u16Sel %04X\n", pVMCB->guest.ES.u16Sel));
1146 Log(("guest.ES.u16Attr %04X\n", pVMCB->guest.ES.u16Attr));
1147 Log(("guest.ES.u32Limit %X\n", pVMCB->guest.ES.u32Limit));
1148 Log(("guest.ES.u64Base %RX64\n", pVMCB->guest.ES.u64Base));
1149 Log(("guest.FS.u16Sel %04X\n", pVMCB->guest.FS.u16Sel));
1150 Log(("guest.FS.u16Attr %04X\n", pVMCB->guest.FS.u16Attr));
1151 Log(("guest.FS.u32Limit %X\n", pVMCB->guest.FS.u32Limit));
1152 Log(("guest.FS.u64Base %RX64\n", pVMCB->guest.FS.u64Base));
1153 Log(("guest.GS.u16Sel %04X\n", pVMCB->guest.GS.u16Sel));
1154 Log(("guest.GS.u16Attr %04X\n", pVMCB->guest.GS.u16Attr));
1155 Log(("guest.GS.u32Limit %X\n", pVMCB->guest.GS.u32Limit));
1156 Log(("guest.GS.u64Base %RX64\n", pVMCB->guest.GS.u64Base));
1157
1158 Log(("guest.GDTR.u32Limit %X\n", pVMCB->guest.GDTR.u32Limit));
1159 Log(("guest.GDTR.u64Base %RX64\n", pVMCB->guest.GDTR.u64Base));
1160
1161 Log(("guest.LDTR.u16Sel %04X\n", pVMCB->guest.LDTR.u16Sel));
1162 Log(("guest.LDTR.u16Attr %04X\n", pVMCB->guest.LDTR.u16Attr));
1163 Log(("guest.LDTR.u32Limit %X\n", pVMCB->guest.LDTR.u32Limit));
1164 Log(("guest.LDTR.u64Base %RX64\n", pVMCB->guest.LDTR.u64Base));
1165
1166 Log(("guest.IDTR.u32Limit %X\n", pVMCB->guest.IDTR.u32Limit));
1167 Log(("guest.IDTR.u64Base %RX64\n", pVMCB->guest.IDTR.u64Base));
1168
1169 Log(("guest.TR.u16Sel %04X\n", pVMCB->guest.TR.u16Sel));
1170 Log(("guest.TR.u16Attr %04X\n", pVMCB->guest.TR.u16Attr));
1171 Log(("guest.TR.u32Limit %X\n", pVMCB->guest.TR.u32Limit));
1172 Log(("guest.TR.u64Base %RX64\n", pVMCB->guest.TR.u64Base));
1173
1174 Log(("guest.u8CPL %X\n", pVMCB->guest.u8CPL));
1175 Log(("guest.u64CR0 %RX64\n", pVMCB->guest.u64CR0));
1176 Log(("guest.u64CR2 %RX64\n", pVMCB->guest.u64CR2));
1177 Log(("guest.u64CR3 %RX64\n", pVMCB->guest.u64CR3));
1178 Log(("guest.u64CR4 %RX64\n", pVMCB->guest.u64CR4));
1179 Log(("guest.u64DR6 %RX64\n", pVMCB->guest.u64DR6));
1180 Log(("guest.u64DR7 %RX64\n", pVMCB->guest.u64DR7));
1181
1182 Log(("guest.u64RIP %RX64\n", pVMCB->guest.u64RIP));
1183 Log(("guest.u64RSP %RX64\n", pVMCB->guest.u64RSP));
1184 Log(("guest.u64RAX %RX64\n", pVMCB->guest.u64RAX));
1185 Log(("guest.u64RFlags %RX64\n", pVMCB->guest.u64RFlags));
1186
1187 Log(("guest.u64SysEnterCS %RX64\n", pVMCB->guest.u64SysEnterCS));
1188 Log(("guest.u64SysEnterEIP %RX64\n", pVMCB->guest.u64SysEnterEIP));
1189 Log(("guest.u64SysEnterESP %RX64\n", pVMCB->guest.u64SysEnterESP));
1190
1191 Log(("guest.u64EFER %RX64\n", pVMCB->guest.u64EFER));
1192 Log(("guest.u64STAR %RX64\n", pVMCB->guest.u64STAR));
1193 Log(("guest.u64LSTAR %RX64\n", pVMCB->guest.u64LSTAR));
1194 Log(("guest.u64CSTAR %RX64\n", pVMCB->guest.u64CSTAR));
1195 Log(("guest.u64SFMASK %RX64\n", pVMCB->guest.u64SFMASK));
1196 Log(("guest.u64KernelGSBase %RX64\n", pVMCB->guest.u64KernelGSBase));
1197 Log(("guest.u64GPAT %RX64\n", pVMCB->guest.u64GPAT));
1198 Log(("guest.u64DBGCTL %RX64\n", pVMCB->guest.u64DBGCTL));
1199 Log(("guest.u64BR_FROM %RX64\n", pVMCB->guest.u64BR_FROM));
1200 Log(("guest.u64BR_TO %RX64\n", pVMCB->guest.u64BR_TO));
1201 Log(("guest.u64LASTEXCPFROM %RX64\n", pVMCB->guest.u64LASTEXCPFROM));
1202 Log(("guest.u64LASTEXCPTO %RX64\n", pVMCB->guest.u64LASTEXCPTO));
1203
1204#endif
1205 rc = VERR_SVM_UNABLE_TO_START_VM;
1206 goto end;
1207 }
1208
1209 /* Let's first sync back eip, esp, and eflags. */
1210 pCtx->rip = pVMCB->guest.u64RIP;
1211 pCtx->rsp = pVMCB->guest.u64RSP;
1212 pCtx->eflags.u32 = pVMCB->guest.u64RFlags;
1213 /* eax is saved/restore across the vmrun instruction */
1214 pCtx->rax = pVMCB->guest.u64RAX;
1215
1216 pCtx->msrKERNELGSBASE = pVMCB->guest.u64KernelGSBase; /* swapgs exchange value */
1217
1218 /* Can be updated behind our back in the nested paging case. */
1219 pCtx->cr2 = pVMCB->guest.u64CR2;
1220
1221 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
1222 SVM_READ_SELREG(SS, ss);
1223 SVM_READ_SELREG(CS, cs);
1224 SVM_READ_SELREG(DS, ds);
1225 SVM_READ_SELREG(ES, es);
1226 SVM_READ_SELREG(FS, fs);
1227 SVM_READ_SELREG(GS, gs);
1228
1229 /*
1230 * System MSRs
1231 */
1232 pCtx->SysEnter.cs = pVMCB->guest.u64SysEnterCS;
1233 pCtx->SysEnter.eip = pVMCB->guest.u64SysEnterEIP;
1234 pCtx->SysEnter.esp = pVMCB->guest.u64SysEnterESP;
1235
1236 /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR; must sync everything otherwise we can get out of sync when jumping to ring 3. */
1237 SVM_READ_SELREG(LDTR, ldtr);
1238 SVM_READ_SELREG(TR, tr);
1239
1240 pCtx->gdtr.cbGdt = pVMCB->guest.GDTR.u32Limit;
1241 pCtx->gdtr.pGdt = pVMCB->guest.GDTR.u64Base;
1242
1243 pCtx->idtr.cbIdt = pVMCB->guest.IDTR.u32Limit;
1244 pCtx->idtr.pIdt = pVMCB->guest.IDTR.u64Base;
1245
1246 /* Note: no reason to sync back the CRx and DRx registers. They can't be changed by the guest. */
1247 /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */
1248 if ( pVM->hwaccm.s.fNestedPaging
1249 && pCtx->cr3 != pVMCB->guest.u64CR3)
1250 {
1251 CPUMSetGuestCR3(pVCpu, pVMCB->guest.u64CR3);
1252 PGMUpdateCR3(pVCpu, pVMCB->guest.u64CR3);
1253 }
1254
1255 /* Note! NOW IT'S SAFE FOR LOGGING! */
1256
1257 /* Take care of instruction fusing (sti, mov ss) (see 15.20.5 Interrupt Shadows) */
1258 if (pVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
1259 {
1260 Log(("uInterruptState %x rip=%RGv\n", pVMCB->ctrl.u64IntShadow, (RTGCPTR)pCtx->rip));
1261 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
1262 }
1263 else
1264 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1265
1266 Log2(("exitCode = %x\n", exitCode));
1267
1268 /* Sync back DR6 as it could have been changed by hitting breakpoints. */
1269 pCtx->dr[6] = pVMCB->guest.u64DR6;
1270 /* DR7.GD can be cleared by debug exceptions, so sync it back as well. */
1271 pCtx->dr[7] = pVMCB->guest.u64DR7;
1272
1273 /* Check if an injected event was interrupted prematurely. */
1274 pVCpu->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
1275 if ( pVMCB->ctrl.ExitIntInfo.n.u1Valid
1276 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT /* we don't care about 'int xx' as the instruction will be restarted. */)
1277 {
1278 Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));
1279
1280#ifdef LOG_ENABLED
1281 SVM_EVENT Event;
1282 Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
1283
1284 if ( exitCode == SVM_EXIT_EXCEPTION_E
1285 && Event.n.u8Vector == 0xE)
1286 {
1287 Log(("Double fault!\n"));
1288 }
1289#endif
1290
1291 pVCpu->hwaccm.s.Event.fPending = true;
1292 /* Error code present? (redundant) */
1293 if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)
1294 {
1295 pVCpu->hwaccm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
1296 }
1297 else
1298 pVCpu->hwaccm.s.Event.errCode = 0;
1299 }
1300#ifdef VBOX_WITH_STATISTICS
1301 if (exitCode == SVM_EXIT_NPF)
1302 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitReasonNPF);
1303 else
1304 STAM_COUNTER_INC(&pVCpu->hwaccm.s.paStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);
1305#endif
1306
1307 if (fSyncTPR)
1308 {
1309 rc = PDMApicSetTPR(pVM, pVMCB->ctrl.IntCtrl.n.u8VTPR);
1310 AssertRC(rc);
1311 }
1312
1313 /* Deal with the reason of the VM-exit. */
1314 switch (exitCode)
1315 {
1316 case SVM_EXIT_EXCEPTION_0: case SVM_EXIT_EXCEPTION_1: case SVM_EXIT_EXCEPTION_2: case SVM_EXIT_EXCEPTION_3:
1317 case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: case SVM_EXIT_EXCEPTION_6: case SVM_EXIT_EXCEPTION_7:
1318 case SVM_EXIT_EXCEPTION_8: case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_A: case SVM_EXIT_EXCEPTION_B:
1319 case SVM_EXIT_EXCEPTION_C: case SVM_EXIT_EXCEPTION_D: case SVM_EXIT_EXCEPTION_E: case SVM_EXIT_EXCEPTION_F:
1320 case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11: case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13:
1321 case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16: case SVM_EXIT_EXCEPTION_17:
1322 case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19: case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B:
1323 case SVM_EXIT_EXCEPTION_1C: case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
1324 {
1325 /* Pending trap. */
1326 SVM_EVENT Event;
1327 uint32_t vector = exitCode - SVM_EXIT_EXCEPTION_0;
1328
1329 Log2(("Hardware/software interrupt %d\n", vector));
1330 switch (vector)
1331 {
1332 case X86_XCPT_DB:
1333 {
1334 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDB);
1335
1336 /* Note that we don't support guest and host-initiated debugging at the same time. */
1337 Assert(DBGFIsStepping(pVCpu));
1338
1339 rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pCtx->dr[6]);
1340 if (rc == VINF_EM_RAW_GUEST_TRAP)
1341 {
1342 Log(("Trap %x (debug) at %016RX64\n", vector, pCtx->rip));
1343
1344 /* Reinject the exception. */
1345 Event.au64[0] = 0;
1346 Event.n.u3Type = SVM_EVENT_EXCEPTION; /* trap or fault */
1347 Event.n.u1Valid = 1;
1348 Event.n.u8Vector = X86_XCPT_DB;
1349
1350 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1351
1352 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1353 goto ResumeExecution;
1354 }
1355 /* Return to ring 3 to deal with the debug exit code. */
1356 break;
1357 }
1358
1359 case X86_XCPT_NM:
1360 {
1361 Log(("#NM fault at %RGv\n", (RTGCPTR)pCtx->rip));
1362
1363 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */
1364 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
1365 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx);
1366 if (rc == VINF_SUCCESS)
1367 {
1368 Assert(CPUMIsGuestFPUStateActive(pVCpu));
1369 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowNM);
1370
1371 /* Continue execution. */
1372 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1373 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1374
1375 goto ResumeExecution;
1376 }
1377
1378 Log(("Forward #NM fault to the guest\n"));
1379 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNM);
1380
1381 Event.au64[0] = 0;
1382 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1383 Event.n.u1Valid = 1;
1384 Event.n.u8Vector = X86_XCPT_NM;
1385
1386 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1387 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1388 goto ResumeExecution;
1389 }
1390
1391 case X86_XCPT_PF: /* Page fault */
1392 {
1393 uint32_t errCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1394 RTGCUINTPTR uFaultAddress = pVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */
1395
1396#ifdef DEBUG
1397 if (pVM->hwaccm.s.fNestedPaging)
1398 { /* A genuine pagefault.
1399 * Forward the trap to the guest by injecting the exception and resuming execution.
1400 */
1401 Log(("Guest page fault at %RGv cr2=%RGv error code %x rsp=%RGv\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode, (RTGCPTR)pCtx->rsp));
1402 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
1403
1404 /* Now we must update CR2. */
1405 pCtx->cr2 = uFaultAddress;
1406
1407 Event.au64[0] = 0;
1408 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1409 Event.n.u1Valid = 1;
1410 Event.n.u8Vector = X86_XCPT_PF;
1411 Event.n.u1ErrorCodeValid = 1;
1412 Event.n.u32ErrorCode = errCode;
1413
1414 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1415
1416 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1417 goto ResumeExecution;
1418 }
1419#endif
1420 Assert(!pVM->hwaccm.s.fNestedPaging);
1421
1422 Log2(("Page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
1423 /* Exit qualification contains the linear address of the page fault. */
1424 TRPMAssertTrap(pVCpu, X86_XCPT_PF, TRPM_TRAP);
1425 TRPMSetErrorCode(pVCpu, errCode);
1426 TRPMSetFaultAddress(pVCpu, uFaultAddress);
1427
1428 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
1429 rc = PGMTrap0eHandler(pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
1430 Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc));
1431 if (rc == VINF_SUCCESS)
1432 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1433 Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
1434 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF);
1435
1436 TRPMResetTrap(pVCpu);
1437
1438 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1439 goto ResumeExecution;
1440 }
1441 else
1442 if (rc == VINF_EM_RAW_GUEST_TRAP)
1443 { /* A genuine pagefault.
1444 * Forward the trap to the guest by injecting the exception and resuming execution.
1445 */
1446 Log2(("Forward page fault to the guest\n"));
1447 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
1448 /* The error code might have been changed. */
1449 errCode = TRPMGetErrorCode(pVCpu);
1450
1451 TRPMResetTrap(pVCpu);
1452
1453 /* Now we must update CR2. */
1454 pCtx->cr2 = uFaultAddress;
1455
1456 Event.au64[0] = 0;
1457 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1458 Event.n.u1Valid = 1;
1459 Event.n.u8Vector = X86_XCPT_PF;
1460 Event.n.u1ErrorCodeValid = 1;
1461 Event.n.u32ErrorCode = errCode;
1462
1463 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1464
1465 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1466 goto ResumeExecution;
1467 }
1468#ifdef VBOX_STRICT
1469 if (rc != VINF_EM_RAW_EMULATE_INSTR && rc != VINF_EM_RAW_EMULATE_IO_BLOCK)
1470 LogFlow(("PGMTrap0eHandler failed with %d\n", rc));
1471#endif
1472 /* Need to go back to the recompiler to emulate the instruction. */
1473 TRPMResetTrap(pVCpu);
1474 break;
1475 }
1476
1477 case X86_XCPT_MF: /* Floating point exception. */
1478 {
1479 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestMF);
1480 if (!(pCtx->cr0 & X86_CR0_NE))
1481 {
1482 /* old style FPU error reporting needs some extra work. */
1483 /** @todo don't fall back to the recompiler, but do it manually. */
1484 rc = VINF_EM_RAW_EMULATE_INSTR;
1485 break;
1486 }
1487 Log(("Trap %x at %RGv\n", vector, (RTGCPTR)pCtx->rip));
1488
1489 Event.au64[0] = 0;
1490 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1491 Event.n.u1Valid = 1;
1492 Event.n.u8Vector = X86_XCPT_MF;
1493
1494 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1495
1496 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1497 goto ResumeExecution;
1498 }
1499
1500#ifdef VBOX_STRICT
1501 case X86_XCPT_GP: /* General protection failure exception.*/
1502 case X86_XCPT_UD: /* Unknown opcode exception. */
1503 case X86_XCPT_DE: /* Divide error. */
1504 case X86_XCPT_SS: /* Stack segment exception. */
1505 case X86_XCPT_NP: /* Segment not present exception. */
1506 {
1507 Event.au64[0] = 0;
1508 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1509 Event.n.u1Valid = 1;
1510 Event.n.u8Vector = vector;
1511
1512 switch(vector)
1513 {
1514 case X86_XCPT_GP:
1515 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestGP);
1516 Event.n.u1ErrorCodeValid = 1;
1517 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1518 break;
1519 case X86_XCPT_DE:
1520 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE);
1521 break;
1522 case X86_XCPT_UD:
1523 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD);
1524 break;
1525 case X86_XCPT_SS:
1526 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS);
1527 Event.n.u1ErrorCodeValid = 1;
1528 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1529 break;
1530 case X86_XCPT_NP:
1531 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP);
1532 Event.n.u1ErrorCodeValid = 1;
1533 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1534 break;
1535 }
1536 Log(("Trap %x at %RGv esi=%x\n", vector, (RTGCPTR)pCtx->rip, pCtx->esi));
1537 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1538
1539 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1540 goto ResumeExecution;
1541 }
1542#endif
1543 default:
1544 AssertMsgFailed(("Unexpected vm-exit caused by exception %x\n", vector));
1545 rc = VERR_EM_INTERNAL_ERROR;
1546 break;
1547
1548 } /* switch (vector) */
1549 break;
1550 }
1551
1552 case SVM_EXIT_NPF:
1553 {
1554 /* EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault. */
1555 uint32_t errCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1556 RTGCPHYS uFaultAddress = pVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */
1557 PGMMODE enmShwPagingMode;
1558
1559 Assert(pVM->hwaccm.s.fNestedPaging);
1560 Log(("Nested page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
1561 /* Exit qualification contains the linear address of the page fault. */
1562 TRPMAssertTrap(pVCpu, X86_XCPT_PF, TRPM_TRAP);
1563 TRPMSetErrorCode(pVCpu, errCode);
1564 TRPMSetFaultAddress(pVCpu, uFaultAddress);
1565
1566 /* Handle the pagefault trap for the nested shadow table. */
1567#if HC_ARCH_BITS == 32
1568 if (CPUMIsGuestInLongModeEx(pCtx))
1569 enmShwPagingMode = PGMMODE_AMD64_NX;
1570 else
1571#endif
1572 enmShwPagingMode = PGMGetHostMode(pVM);
1573
1574 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmShwPagingMode, errCode, CPUMCTX2CORE(pCtx), uFaultAddress);
1575 Log2(("PGMR0Trap0eHandlerNestedPaging %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc));
1576 if (rc == VINF_SUCCESS)
1577 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1578 Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
1579 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF);
1580
1581 TRPMResetTrap(pVCpu);
1582
1583 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1584 goto ResumeExecution;
1585 }
1586
1587#ifdef VBOX_STRICT
1588 if (rc != VINF_EM_RAW_EMULATE_INSTR)
1589 LogFlow(("PGMTrap0eHandlerNestedPaging failed with %d\n", rc));
1590#endif
1591 /* Need to go back to the recompiler to emulate the instruction. */
1592 TRPMResetTrap(pVCpu);
1593 break;
1594 }
1595
1596 case SVM_EXIT_VINTR:
1597 /* A virtual interrupt is about to be delivered, which means IF=1. */
1598 Log(("SVM_EXIT_VINTR IF=%d\n", pCtx->eflags.Bits.u1IF));
1599 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 0;
1600 pVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0;
1601 goto ResumeExecution;
1602
1603 case SVM_EXIT_FERR_FREEZE:
1604 case SVM_EXIT_INTR:
1605 case SVM_EXIT_NMI:
1606 case SVM_EXIT_SMI:
1607 case SVM_EXIT_INIT:
1608 /* External interrupt; leave to allow it to be dispatched again. */
1609 rc = VINF_EM_RAW_INTERRUPT;
1610 break;
1611
1612 case SVM_EXIT_WBINVD:
1613 case SVM_EXIT_INVD: /* Guest software attempted to execute INVD. */
1614 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvd);
1615 /* Skip instruction and continue directly. */
1616 pCtx->rip += 2; /* Note! hardcoded opcode size! */
1617 /* Continue execution.*/
1618 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1619 goto ResumeExecution;
1620
1621 case SVM_EXIT_CPUID: /* Guest software attempted to execute CPUID. */
1622 {
1623 Log2(("SVM: Cpuid at %RGv for %x\n", (RTGCPTR)pCtx->rip, pCtx->eax));
1624 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCpuid);
1625 rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
1626 if (rc == VINF_SUCCESS)
1627 {
1628 /* Update EIP and continue execution. */
1629 pCtx->rip += 2; /* Note! hardcoded opcode size! */
1630 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1631 goto ResumeExecution;
1632 }
1633 AssertMsgFailed(("EMU: cpuid failed with %Rrc\n", rc));
1634 rc = VINF_EM_RAW_EMULATE_INSTR;
1635 break;
1636 }
1637
1638 case SVM_EXIT_RDTSC: /* Guest software attempted to execute RDTSC. */
1639 {
1640 Log2(("SVM: Rdtsc\n"));
1641 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc);
1642 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
1643 if (rc == VINF_SUCCESS)
1644 {
1645 /* Update EIP and continue execution. */
1646 pCtx->rip += 2; /* Note! hardcoded opcode size! */
1647 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1648 goto ResumeExecution;
1649 }
1650 rc = VINF_EM_RAW_EMULATE_INSTR;
1651 break;
1652 }
1653
1654 case SVM_EXIT_RDPMC: /* Guest software attempted to execute RDPMC. */
1655 {
1656 Log2(("SVM: Rdpmc %x\n", pCtx->ecx));
1657 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdpmc);
1658 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
1659 if (rc == VINF_SUCCESS)
1660 {
1661 /* Update EIP and continue execution. */
1662 pCtx->rip += 2; /* Note! hardcoded opcode size! */
1663 goto ResumeExecution;
1664 }
1665 rc = VINF_EM_RAW_EMULATE_INSTR;
1666 break;
1667 }
1668
1669 case SVM_EXIT_RDTSCP: /* Guest software attempted to execute RDTSCP. */
1670 {
1671 Log2(("SVM: Rdtscp\n"));
1672 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc);
1673 rc = EMInterpretRdtscp(pVM, pVCpu, pCtx);
1674 if (rc == VINF_SUCCESS)
1675 {
1676 /* Update EIP and continue execution. */
1677 pCtx->rip += 3; /* Note! hardcoded opcode size! */
1678 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1679 goto ResumeExecution;
1680 }
1681 AssertMsgFailed(("EMU: rdtscp failed with %Rrc\n", rc));
1682 rc = VINF_EM_RAW_EMULATE_INSTR;
1683 break;
1684 }
1685
1686 case SVM_EXIT_INVLPG: /* Guest software attempted to execute INVPG. */
1687 {
1688 Log2(("SVM: invlpg\n"));
1689 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvpg);
1690
1691 Assert(!pVM->hwaccm.s.fNestedPaging);
1692
1693 /* Truly a pita. Why can't SVM give the same information as VT-x? */
1694 rc = SVMR0InterpretInvpg(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVMCB->ctrl.TLBCtrl.n.u32ASID);
1695 if (rc == VINF_SUCCESS)
1696 {
1697 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageInvlpg);
1698 goto ResumeExecution; /* eip already updated */
1699 }
1700 break;
1701 }
1702
1703 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
1704 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
1705 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
1706 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
1707 {
1708 uint32_t cbSize;
1709
1710 Log2(("SVM: %RGv mov cr%d, \n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_CR0));
1711 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]);
1712 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize);
1713
1714 switch (exitCode - SVM_EXIT_WRITE_CR0)
1715 {
1716 case 0:
1717 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1718 break;
1719 case 2:
1720 break;
1721 case 3:
1722 Assert(!pVM->hwaccm.s.fNestedPaging);
1723 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
1724 break;
1725 case 4:
1726 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
1727 break;
1728 case 8:
1729 break;
1730 default:
1731 AssertFailed();
1732 }
1733 /* Check if a sync operation is pending. */
1734 if ( rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
1735 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
1736 {
1737 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
1738 AssertRC(rc);
1739
1740 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBCRxChange);
1741
1742 /* Must be set by PGMSyncCR3 */
1743 Assert(rc != VINF_SUCCESS || PGMGetGuestMode(pVCpu) <= PGMMODE_PROTECTED || pVCpu->hwaccm.s.fForceTLBFlush);
1744 }
1745 if (rc == VINF_SUCCESS)
1746 {
1747 /* EIP has been updated already. */
1748
1749 /* Only resume if successful. */
1750 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1751 goto ResumeExecution;
1752 }
1753 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1754 break;
1755 }
1756
1757 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
1758 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
1759 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
1760 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
1761 {
1762 uint32_t cbSize;
1763
1764 Log2(("SVM: %RGv mov x, cr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_CR0));
1765 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]);
1766 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize);
1767 if (rc == VINF_SUCCESS)
1768 {
1769 /* EIP has been updated already. */
1770
1771 /* Only resume if successful. */
1772 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1773 goto ResumeExecution;
1774 }
1775 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1776 break;
1777 }
1778
1779 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
1780 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
1781 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
1782 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
1783 {
1784 uint32_t cbSize;
1785
1786 Log2(("SVM: %RGv mov dr%d, x\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_DR0));
1787 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite);
1788
1789 if (!DBGFIsStepping(pVCpu))
1790 {
1791 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch);
1792
1793 /* Disable drx move intercepts. */
1794 pVMCB->ctrl.u16InterceptRdDRx = 0;
1795 pVMCB->ctrl.u16InterceptWrDRx = 0;
1796
1797 /* Save the host and load the guest debug state. */
1798 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, false /* exclude DR6 */);
1799 AssertRC(rc);
1800
1801 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1802 goto ResumeExecution;
1803 }
1804
1805 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize);
1806 if (rc == VINF_SUCCESS)
1807 {
1808 /* EIP has been updated already. */
1809 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
1810
1811 /* Only resume if successful. */
1812 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1813 goto ResumeExecution;
1814 }
1815 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1816 break;
1817 }
1818
1819 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
1820 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
1821 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
1822 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
1823 {
1824 uint32_t cbSize;
1825
1826 Log2(("SVM: %RGv mov x, dr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_DR0));
1827 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxRead);
1828
1829 if (!DBGFIsStepping(pVCpu))
1830 {
1831 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch);
1832
1833 /* Disable drx move intercepts. */
1834 pVMCB->ctrl.u16InterceptRdDRx = 0;
1835 pVMCB->ctrl.u16InterceptWrDRx = 0;
1836
1837 /* Save the host and load the guest debug state. */
1838 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, false /* exclude DR6 */);
1839 AssertRC(rc);
1840
1841 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1842 goto ResumeExecution;
1843 }
1844
1845 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize);
1846 if (rc == VINF_SUCCESS)
1847 {
1848 /* EIP has been updated already. */
1849
1850 /* Only resume if successful. */
1851 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1852 goto ResumeExecution;
1853 }
1854 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1855 break;
1856 }
1857
1858 /* Note: We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
1859 case SVM_EXIT_IOIO: /* I/O instruction. */
1860 {
1861 SVM_IOIO_EXIT IoExitInfo;
1862 uint32_t uIOSize, uAndVal;
1863
1864 IoExitInfo.au32[0] = pVMCB->ctrl.u64ExitInfo1;
1865
1866 /** @todo could use a lookup table here */
1867 if (IoExitInfo.n.u1OP8)
1868 {
1869 uIOSize = 1;
1870 uAndVal = 0xff;
1871 }
1872 else
1873 if (IoExitInfo.n.u1OP16)
1874 {
1875 uIOSize = 2;
1876 uAndVal = 0xffff;
1877 }
1878 else
1879 if (IoExitInfo.n.u1OP32)
1880 {
1881 uIOSize = 4;
1882 uAndVal = 0xffffffff;
1883 }
1884 else
1885 {
1886 AssertFailed(); /* should be fatal. */
1887 rc = VINF_EM_RAW_EMULATE_INSTR;
1888 break;
1889 }
1890
1891 if (IoExitInfo.n.u1STR)
1892 {
1893 /* ins/outs */
1894 DISCPUSTATE Cpu;
1895
1896 /* Disassemble manually to deal with segment prefixes. */
1897 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu, NULL);
1898 if (rc == VINF_SUCCESS)
1899 {
1900 if (IoExitInfo.n.u1Type == 0)
1901 {
1902 Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
1903 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOStringWrite);
1904 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, Cpu.prefix, uIOSize);
1905 }
1906 else
1907 {
1908 Log2(("IOMInterpretINSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
1909 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOStringRead);
1910 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, Cpu.prefix, uIOSize);
1911 }
1912 }
1913 else
1914 rc = VINF_EM_RAW_EMULATE_INSTR;
1915 }
1916 else
1917 {
1918 /* normal in/out */
1919 Assert(!IoExitInfo.n.u1REP);
1920
1921 if (IoExitInfo.n.u1Type == 0)
1922 {
1923 Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize));
1924 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOWrite);
1925 rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
1926 }
1927 else
1928 {
1929 uint32_t u32Val = 0;
1930
1931 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIORead);
1932 rc = IOMIOPortRead(pVM, IoExitInfo.n.u16Port, &u32Val, uIOSize);
1933 if (IOM_SUCCESS(rc))
1934 {
1935 /* Write back to the EAX register. */
1936 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
1937 Log2(("IOMIOPortRead %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal, uIOSize));
1938 }
1939 }
1940 }
1941 /*
1942 * Handled the I/O return codes.
1943 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1944 */
1945 if (IOM_SUCCESS(rc))
1946 {
1947 /* Update EIP and continue execution. */
1948 pCtx->rip = pVMCB->ctrl.u64ExitInfo2; /* RIP/EIP of the next instruction is saved in EXITINFO2. */
1949 if (RT_LIKELY(rc == VINF_SUCCESS))
1950 {
1951 /* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
1952 if (pCtx->dr[7] & X86_DR7_ENABLED_MASK)
1953 {
1954 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxIOCheck);
1955 for (unsigned i=0;i<4;i++)
1956 {
1957 unsigned uBPLen = g_aIOSize[X86_DR7_GET_LEN(pCtx->dr[7], i)];
1958
1959 if ( (IoExitInfo.n.u16Port >= pCtx->dr[i] && IoExitInfo.n.u16Port < pCtx->dr[i] + uBPLen)
1960 && (pCtx->dr[7] & (X86_DR7_L(i) | X86_DR7_G(i)))
1961 && (pCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO))
1962 {
1963 SVM_EVENT Event;
1964
1965 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1966
1967 /* Clear all breakpoint status flags and set the one we just hit. */
1968 pCtx->dr[6] &= ~(X86_DR6_B0|X86_DR6_B1|X86_DR6_B2|X86_DR6_B3);
1969 pCtx->dr[6] |= (uint64_t)RT_BIT(i);
1970
1971 /* Note: AMD64 Architecture Programmer's Manual 13.1:
1972 * Bits 15:13 of the DR6 register is never cleared by the processor and must be cleared by software after
1973 * the contents have been read.
1974 */
1975 pVMCB->guest.u64DR6 = pCtx->dr[6];
1976
1977 /* X86_DR7_GD will be cleared if drx accesses should be trapped inside the guest. */
1978 pCtx->dr[7] &= ~X86_DR7_GD;
1979
1980 /* Paranoia. */
1981 pCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
1982 pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
1983 pCtx->dr[7] |= 0x400; /* must be one */
1984
1985 pVMCB->guest.u64DR7 = pCtx->dr[7];
1986
1987 /* Inject the exception. */
1988 Log(("Inject IO debug trap at %RGv\n", (RTGCPTR)pCtx->rip));
1989
1990 Event.au64[0] = 0;
1991 Event.n.u3Type = SVM_EVENT_EXCEPTION; /* trap or fault */
1992 Event.n.u1Valid = 1;
1993 Event.n.u8Vector = X86_XCPT_DB;
1994
1995 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1996
1997 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
1998 goto ResumeExecution;
1999 }
2000 }
2001 }
2002
2003 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
2004 goto ResumeExecution;
2005 }
2006 Log2(("EM status from IO at %RGv %x size %d: %Rrc\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize, rc));
2007 break;
2008 }
2009
2010#ifdef VBOX_STRICT
2011 if (rc == VINF_IOM_HC_IOPORT_READ)
2012 Assert(IoExitInfo.n.u1Type != 0);
2013 else if (rc == VINF_IOM_HC_IOPORT_WRITE)
2014 Assert(IoExitInfo.n.u1Type == 0);
2015 else
2016 AssertMsg(RT_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc));
2017#endif
2018 Log2(("Failed IO at %RGv %x size %d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
2019 break;
2020 }
2021
2022 case SVM_EXIT_HLT:
2023 /** Check if external interrupts are pending; if so, don't switch back. */
2024 pCtx->rip++; /* skip hlt */
2025 if ( pCtx->eflags.Bits.u1IF
2026 && VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
2027 goto ResumeExecution;
2028
2029 rc = VINF_EM_HALT;
2030 break;
2031
2032 case SVM_EXIT_RSM:
2033 case SVM_EXIT_INVLPGA:
2034 case SVM_EXIT_VMRUN:
2035 case SVM_EXIT_VMMCALL:
2036 case SVM_EXIT_VMLOAD:
2037 case SVM_EXIT_VMSAVE:
2038 case SVM_EXIT_STGI:
2039 case SVM_EXIT_CLGI:
2040 case SVM_EXIT_SKINIT:
2041 {
2042 /* Unsupported instructions. */
2043 SVM_EVENT Event;
2044
2045 Event.au64[0] = 0;
2046 Event.n.u3Type = SVM_EVENT_EXCEPTION;
2047 Event.n.u1Valid = 1;
2048 Event.n.u8Vector = X86_XCPT_UD;
2049
2050 Log(("Forced #UD trap at %RGv\n", (RTGCPTR)pCtx->rip));
2051 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
2052
2053 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
2054 goto ResumeExecution;
2055 }
2056
2057 /* Emulate in ring 3. */
2058 case SVM_EXIT_MSR:
2059 {
2060 uint32_t cbSize;
2061
2062 /* Note: the intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */
2063 Log(("SVM: %s\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr"));
2064 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize);
2065 if (rc == VINF_SUCCESS)
2066 {
2067 /* EIP has been updated already. */
2068
2069 /* Only resume if successful. */
2070 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
2071 goto ResumeExecution;
2072 }
2073 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr", rc));
2074 break;
2075 }
2076
2077 case SVM_EXIT_MONITOR:
2078 case SVM_EXIT_PAUSE:
2079 case SVM_EXIT_MWAIT_UNCOND:
2080 case SVM_EXIT_MWAIT_ARMED:
2081 case SVM_EXIT_TASK_SWITCH: /* can change CR3; emulate */
2082 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
2083 break;
2084
2085 case SVM_EXIT_SHUTDOWN:
2086 rc = VINF_EM_RESET; /* Triple fault equals a reset. */
2087 break;
2088
2089 case SVM_EXIT_IDTR_READ:
2090 case SVM_EXIT_GDTR_READ:
2091 case SVM_EXIT_LDTR_READ:
2092 case SVM_EXIT_TR_READ:
2093 case SVM_EXIT_IDTR_WRITE:
2094 case SVM_EXIT_GDTR_WRITE:
2095 case SVM_EXIT_LDTR_WRITE:
2096 case SVM_EXIT_TR_WRITE:
2097 case SVM_EXIT_CR0_SEL_WRITE:
2098 default:
2099 /* Unexpected exit codes. */
2100 rc = VERR_EM_INTERNAL_ERROR;
2101 AssertMsgFailed(("Unexpected exit code %x\n", exitCode)); /* Can't happen. */
2102 break;
2103 }
2104
2105end:
2106
2107 /* Signal changes for the recompiler. */
2108 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);
2109
2110 /* If we executed vmrun and an external irq was pending, then we don't have to do a full sync the next time. */
2111 if (exitCode == SVM_EXIT_INTR)
2112 {
2113 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatPendingHostIrq);
2114 /* On the next entry we'll only sync the host context. */
2115 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
2116 }
2117 else
2118 {
2119 /* On the next entry we'll sync everything. */
2120 /** @todo we can do better than this */
2121 /* Not in the VINF_PGM_CHANGE_MODE though! */
2122 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
2123 }
2124
2125 /* translate into a less severe return code */
2126 if (rc == VERR_EM_INTERPRETER)
2127 rc = VINF_EM_RAW_EMULATE_INSTR;
2128
2129 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
2130 return rc;
2131}
2132
2133/**
2134 * Enters the AMD-V session
2135 *
2136 * @returns VBox status code.
2137 * @param pVM The VM to operate on.
2138 * @param pVCpu The VM CPU to operate on.
2139 * @param pCpu CPU info struct
2140 */
2141VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu)
2142{
2143 Assert(pVM->hwaccm.s.svm.fSupported);
2144
2145 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hwaccm.s.idLastCpu, pVCpu->hwaccm.s.uCurrentASID));
2146 pVCpu->hwaccm.s.fResumeVM = false;
2147
2148 /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */
2149 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;
2150
2151 return VINF_SUCCESS;
2152}
2153
2154
2155/**
2156 * Leaves the AMD-V session
2157 *
2158 * @returns VBox status code.
2159 * @param pVM The VM to operate on.
2160 * @param pVCpu The VM CPU to operate on.
2161 * @param pCtx CPU context
2162 */
2163VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2164{
2165 SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
2166
2167 Assert(pVM->hwaccm.s.svm.fSupported);
2168
2169 /* Save the guest debug state if necessary. */
2170 if (CPUMIsGuestDebugStateActive(pVCpu))
2171 {
2172 CPUMR0SaveGuestDebugState(pVM, pVCpu, pCtx, false /* skip DR6 */);
2173
2174 /* Intercept all DRx reads and writes again. Changed later on. */
2175 pVMCB->ctrl.u16InterceptRdDRx = 0xFFFF;
2176 pVMCB->ctrl.u16InterceptWrDRx = 0xFFFF;
2177
2178 /* Resync the debug registers the next time. */
2179 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
2180 }
2181 else
2182 Assert(pVMCB->ctrl.u16InterceptRdDRx == 0xFFFF && pVMCB->ctrl.u16InterceptWrDRx == 0xFFFF);
2183
2184 return VINF_SUCCESS;
2185}
2186
2187
2188static int svmR0InterpretInvlPg(PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)
2189{
2190 OP_PARAMVAL param1;
2191 RTGCPTR addr;
2192
2193 int rc = DISQueryParamVal(pRegFrame, pCpu, &pCpu->param1, &param1, PARAM_SOURCE);
2194 if(RT_FAILURE(rc))
2195 return VERR_EM_INTERPRETER;
2196
2197 switch(param1.type)
2198 {
2199 case PARMTYPE_IMMEDIATE:
2200 case PARMTYPE_ADDRESS:
2201 if(!(param1.flags & (PARAM_VAL32|PARAM_VAL64)))
2202 return VERR_EM_INTERPRETER;
2203 addr = param1.val.val64;
2204 break;
2205
2206 default:
2207 return VERR_EM_INTERPRETER;
2208 }
2209
2210 /** @todo is addr always a flat linear address or ds based
2211 * (in absence of segment override prefixes)????
2212 */
2213 rc = PGMInvalidatePage(pVCpu, addr);
2214 if (RT_SUCCESS(rc))
2215 {
2216 /* Manually invalidate the page for the VM's TLB. */
2217 Log(("SVMR0InvlpgA %RGv ASID=%d\n", addr, uASID));
2218 SVMR0InvlpgA(addr, uASID);
2219 return VINF_SUCCESS;
2220 }
2221 Assert(rc == VERR_REM_FLUSHED_PAGES_OVERFLOW);
2222 return rc;
2223}
2224
2225/**
2226 * Interprets INVLPG
2227 *
2228 * @returns VBox status code.
2229 * @retval VINF_* Scheduling instructions.
2230 * @retval VERR_EM_INTERPRETER Something we can't cope with.
2231 * @retval VERR_* Fatal errors.
2232 *
2233 * @param pVM The VM handle.
2234 * @param pRegFrame The register frame.
2235 * @param ASID Tagged TLB id for the guest
2236 *
2237 * Updates the EIP if an instruction was executed successfully.
2238 */
2239static int SVMR0InterpretInvpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)
2240{
2241 /*
2242 * Only allow 32 & 64 bits code.
2243 */
2244 DISCPUMODE enmMode = SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid);
2245 if (enmMode != CPUMODE_16BIT)
2246 {
2247 RTGCPTR pbCode;
2248 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->rip, &pbCode);
2249 if (RT_SUCCESS(rc))
2250 {
2251 uint32_t cbOp;
2252 DISCPUSTATE Cpu;
2253
2254 Cpu.mode = enmMode;
2255 rc = EMInterpretDisasOneEx(pVM, pVCpu, pbCode, pRegFrame, &Cpu, &cbOp);
2256 Assert(RT_FAILURE(rc) || Cpu.pCurInstr->opcode == OP_INVLPG);
2257 if (RT_SUCCESS(rc) && Cpu.pCurInstr->opcode == OP_INVLPG)
2258 {
2259 Assert(cbOp == Cpu.opsize);
2260 rc = svmR0InterpretInvlPg(pVCpu, &Cpu, pRegFrame, uASID);
2261 if (RT_SUCCESS(rc))
2262 {
2263 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
2264 }
2265 return rc;
2266 }
2267 }
2268 }
2269 return VERR_EM_INTERPRETER;
2270}
2271
2272
2273/**
2274 * Invalidates a guest page
2275 *
2276 * @returns VBox status code.
2277 * @param pVM The VM to operate on.
2278 * @param pVCpu The VM CPU to operate on.
2279 * @param GCVirt Page to invalidate
2280 */
2281VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
2282{
2283 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVCpu->hwaccm.s.fForceTLBFlush;
2284
2285 /* Skip it if a TLB flush is already pending. */
2286 if (!fFlushPending)
2287 {
2288 SVM_VMCB *pVMCB;
2289
2290 Log2(("SVMR0InvalidatePage %RGv\n", GCVirt));
2291 AssertReturn(pVM, VERR_INVALID_PARAMETER);
2292 Assert(pVM->hwaccm.s.svm.fSupported);
2293
2294 /* @todo SMP */
2295 pVMCB = (SVM_VMCB *)pVM->aCpus[0].hwaccm.s.svm.pVMCB;
2296 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
2297
2298 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
2299#if HC_ARCH_BITS == 32
2300 /* If we get a flush in 64 bits guest mode, then force a full TLB flush. Invlpga takes only 32 bits addresses. */
2301 if (CPUMIsGuestInLongMode(pVCpu))
2302 pVCpu->hwaccm.s.fForceTLBFlush = true;
2303 else
2304#endif
2305 SVMR0InvlpgA(GCVirt, pVMCB->ctrl.TLBCtrl.n.u32ASID);
2306 }
2307 return VINF_SUCCESS;
2308}
2309
2310
2311/**
2312 * Invalidates a guest page by physical address
2313 *
2314 * @returns VBox status code.
2315 * @param pVM The VM to operate on.
2316 * @param pVCpu The VM CPU to operate on.
2317 * @param GCPhys Page to invalidate
2318 */
2319VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
2320{
2321 Assert(pVM->hwaccm.s.fNestedPaging);
2322 /* invlpga only invalidates TLB entries for guest virtual addresses; we have no choice but to force a TLB flush here. */
2323 pVCpu->hwaccm.s.fForceTLBFlush = true;
2324 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBInvlpga);
2325 return VINF_SUCCESS;
2326}
2327
2328#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2329/**
2330 * Prepares for and executes VMRUN (64 bits guests from a 32 bits hosts).
2331 *
2332 * @returns VBox status code.
2333 * @param pVMCBHostPhys Physical address of host VMCB.
2334 * @param pVMCBPhys Physical address of the VMCB.
2335 * @param pCtx Guest context.
2336 * @param pVM The VM to operate on.
2337 * @param pVCpu The VMCPU to operate on.
2338 */
2339DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
2340{
2341 uint32_t aParam[4];
2342
2343 aParam[0] = (uint32_t)(pVMCBHostPhys); /* Param 1: pVMCBHostPhys - Lo. */
2344 aParam[1] = (uint32_t)(pVMCBHostPhys >> 32); /* Param 1: pVMCBHostPhys - Hi. */
2345 aParam[2] = (uint32_t)(pVMCBPhys); /* Param 2: pVMCBPhys - Lo. */
2346 aParam[3] = (uint32_t)(pVMCBPhys >> 32); /* Param 2: pVMCBPhys - Hi. */
2347
2348 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSVMGCVMRun64, 4, &aParam[0]);
2349}
2350
2351/**
2352 * Executes the specified handler in 64 mode
2353 *
2354 * @returns VBox status code.
2355 * @param pVM The VM to operate on.
2356 * @param pVCpu The VMCPU to operate on.
2357 * @param pCtx Guest context
2358 * @param pfnHandler RC handler
2359 * @param cbParam Number of parameters
2360 * @param paParam Array of 32 bits parameters
2361 */
2362VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam)
2363{
2364 int rc;
2365 RTHCUINTREG uFlags;
2366
2367 /* @todo This code is not guest SMP safe (hyper stack) */
2368 AssertReturn(pVM->cCPUs == 1, VERR_ACCESS_DENIED);
2369 Assert(pfnHandler);
2370
2371 uFlags = ASMIntDisableFlags();
2372
2373 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVM));
2374 CPUMSetHyperEIP(pVCpu, pfnHandler);
2375 for (int i=(int)cbParam-1;i>=0;i--)
2376 CPUMPushHyper(pVCpu, paParam[i]);
2377
2378 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
2379 /* Call switcher. */
2380 rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM);
2381 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
2382
2383 ASMSetFlags(uFlags);
2384 return rc;
2385}
2386
2387#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette