VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp@ 8863

Last change on this file since 8863 was 8659, checked in by vboxsync, 17 years ago

Updates for 64 bits paging.
Removed conditional dirty and accessed bits syncing. Doesn't make sense not to do this.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 78.6 KB
Line 
1/* $Id: HWVMXR0.cpp 8659 2008-05-07 14:39:41Z vboxsync $ */
2/** @file
3 * HWACCM VMX - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/pgm.h>
32#include <VBox/pdm.h>
33#include <VBox/err.h>
34#include <VBox/log.h>
35#include <VBox/selm.h>
36#include <VBox/iom.h>
37#include <iprt/param.h>
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#include <iprt/string.h>
41#include "HWVMXR0.h"
42
43
44/* IO operation lookup arrays. */
45static uint32_t aIOSize[4] = {1, 2, 0, 4};
46static uint32_t aIOOpAnd[4] = {0xff, 0xffff, 0, 0xffffffff};
47
48
49static void VMXR0CheckError(PVM pVM, int rc)
50{
51 if (rc == VERR_VMX_GENERIC)
52 {
53 RTCCUINTREG instrError;
54
55 VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError);
56 pVM->hwaccm.s.vmx.ulLastInstrError = instrError;
57 }
58 pVM->hwaccm.s.lLastError = rc;
59}
60
61/**
62 * Sets up and activates VT-x on the current CPU
63 *
64 * @returns VBox status code.
65 * @param idCpu The identifier for the CPU the function is called on.
66 * @param pVM The VM to operate on.
67 * @param pvPageCpu Pointer to the global cpu page
68 * @param pPageCpuPhys Physical address of the global cpu page
69 */
70HWACCMR0DECL(int) VMXR0EnableCpu(RTCPUID idCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
71{
72 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
73 AssertReturn(pVM, VERR_INVALID_PARAMETER);
74 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
75
76 /* Setup Intel VMX. */
77 Assert(pVM->hwaccm.s.vmx.fSupported);
78
79#ifdef LOG_ENABLED
80 SUPR0Printf("VMXR0EnableCpu cpu %d page (%x) %x\n", idCpu, pvPageCpu, (uint32_t)pPageCpuPhys);
81#endif
82 /* Set revision dword at the beginning of the VMXON structure. */
83 *(uint32_t *)pvPageCpu = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
84
85 /* @todo we should unmap the two pages from the virtual address space in order to prevent accidental corruption.
86 * (which can have very bad consequences!!!)
87 */
88
89 /* Make sure the VMX instructions don't cause #UD faults. */
90 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
91
92 /* Enter VMX Root Mode */
93 int rc = VMXEnable(pPageCpuPhys);
94 if (VBOX_FAILURE(rc))
95 {
96 VMXR0CheckError(pVM, rc);
97 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
98 return VERR_VMX_VMXON_FAILED;
99 }
100 return VINF_SUCCESS;
101}
102
103/**
104 * Deactivates VT-x on the current CPU
105 *
106 * @returns VBox status code.
107 * @param idCpu The identifier for the CPU the function is called on.
108 * @param pvPageCpu Pointer to the global cpu page
109 * @param pPageCpuPhys Physical address of the global cpu page
110 */
111HWACCMR0DECL(int) VMXR0DisableCpu(RTCPUID idCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
112{
113 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
114 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
115
116 /* Leave VMX Root Mode. */
117 VMXDisable();
118
119 /* And clear the X86_CR4_VMXE bit */
120 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
121
122#ifdef LOG_ENABLED
123 SUPR0Printf("VMXR0DisableCpu cpu %d\n", idCpu);
124#endif
125 return VINF_SUCCESS;
126}
127
128/**
129 * Does Ring-0 per VM VT-x init.
130 *
131 * @returns VBox status code.
132 * @param pVM The VM to operate on.
133 */
134HWACCMR0DECL(int) VMXR0InitVM(PVM pVM)
135{
136 int rc;
137
138#ifdef LOG_ENABLED
139 SUPR0Printf("VMXR0InitVM %x\n", pVM);
140#endif
141
142 /* Allocate one page for the VM control structure (VMCS). */
143 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjVMCS, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
144 AssertRC(rc);
145 if (RT_FAILURE(rc))
146 return rc;
147
148 pVM->hwaccm.s.vmx.pVMCS = RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjVMCS);
149 pVM->hwaccm.s.vmx.pVMCSPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjVMCS, 0);
150 ASMMemZero32(pVM->hwaccm.s.vmx.pVMCS, PAGE_SIZE);
151
152 /* Allocate one page for the TSS we need for real mode emulation. */
153 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjRealModeTSS, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
154 AssertRC(rc);
155 if (RT_FAILURE(rc))
156 return rc;
157
158 pVM->hwaccm.s.vmx.pRealModeTSS = (PVBOXTSS)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjRealModeTSS);
159 pVM->hwaccm.s.vmx.pRealModeTSSPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjRealModeTSS, 0);
160
161 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. Outside the TSS on purpose; the CPU will not check it
162 * for I/O operations. */
163 ASMMemZero32(pVM->hwaccm.s.vmx.pRealModeTSS, PAGE_SIZE);
164 pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
165 /* Bit set to 0 means redirection enabled. */
166 memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
167
168#ifdef LOG_ENABLED
169 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x) RealModeTSS=%x (%x)\n", pVM, pVM->hwaccm.s.vmx.pVMCS, (uint32_t)pVM->hwaccm.s.vmx.pVMCSPhys, pVM->hwaccm.s.vmx.pRealModeTSS, (uint32_t)pVM->hwaccm.s.vmx.pRealModeTSSPhys);
170#endif
171 return VINF_SUCCESS;
172}
173
174/**
175 * Does Ring-0 per VM VT-x termination.
176 *
177 * @returns VBox status code.
178 * @param pVM The VM to operate on.
179 */
180HWACCMR0DECL(int) VMXR0TermVM(PVM pVM)
181{
182 if (pVM->hwaccm.s.vmx.pMemObjVMCS)
183 {
184 RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjVMCS, false);
185 pVM->hwaccm.s.vmx.pMemObjVMCS = 0;
186 pVM->hwaccm.s.vmx.pVMCS = 0;
187 pVM->hwaccm.s.vmx.pVMCSPhys = 0;
188 }
189 if (pVM->hwaccm.s.vmx.pMemObjRealModeTSS)
190 {
191 RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjRealModeTSS, false);
192 pVM->hwaccm.s.vmx.pMemObjRealModeTSS = 0;
193 pVM->hwaccm.s.vmx.pRealModeTSS = 0;
194 pVM->hwaccm.s.vmx.pRealModeTSSPhys = 0;
195 }
196 return VINF_SUCCESS;
197}
198
199/**
200 * Sets up VT-x for the specified VM
201 *
202 * @returns VBox status code.
203 * @param pVM The VM to operate on.
204 */
205HWACCMR0DECL(int) VMXR0SetupVM(PVM pVM)
206{
207 int rc = VINF_SUCCESS;
208 uint32_t val;
209
210 AssertReturn(pVM, VERR_INVALID_PARAMETER);
211 Assert(pVM->hwaccm.s.vmx.pVMCS);
212
213 /* Set revision dword at the beginning of the VMCS structure. */
214 *(uint32_t *)pVM->hwaccm.s.vmx.pVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
215
216 /* Clear VM Control Structure. */
217 Log(("pVMCSPhys = %VHp\n", pVM->hwaccm.s.vmx.pVMCSPhys));
218 rc = VMXClearVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
219 if (VBOX_FAILURE(rc))
220 goto vmx_end;
221
222 /* Activate the VM Control Structure. */
223 rc = VMXActivateVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
224 if (VBOX_FAILURE(rc))
225 goto vmx_end;
226
227 /* VMX_VMCS_CTRL_PIN_EXEC_CONTROLS
228 * Set required bits to one and zero according to the MSR capabilities.
229 */
230 val = (pVM->hwaccm.s.vmx.msr.vmx_pin_ctls & 0xFFFFFFFF);
231 /* External and non-maskable interrupts cause VM-exits. */
232 val = val | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT;
233 val &= (pVM->hwaccm.s.vmx.msr.vmx_pin_ctls >> 32ULL);
234
235 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, val);
236 AssertRC(rc);
237
238 /* VMX_VMCS_CTRL_PROC_EXEC_CONTROLS
239 * Set required bits to one and zero according to the MSR capabilities.
240 */
241 val = (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls & 0xFFFFFFFF);
242 /* Program which event cause VM-exits and which features we want to use. */
243 val = val | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT
244 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET
245 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
246 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT
247 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT
248 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
249
250 /** @note VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch failure with an invalid control fields error. (combined with some other exit reasons) */
251
252 /*
253 if AMD64 guest mode
254 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT
255 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT;
256 */
257#if HC_ARCH_BITS == 64
258 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT
259 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT;
260#endif
261 /* Mask away the bits that the CPU doesn't support */
262 /** @todo make sure they don't conflict with the above requirements. */
263 val &= (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls >> 32ULL);
264 pVM->hwaccm.s.vmx.proc_ctls = val;
265
266 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, val);
267 AssertRC(rc);
268
269 /* VMX_VMCS_CTRL_CR3_TARGET_COUNT
270 * Set required bits to one and zero according to the MSR capabilities.
271 */
272 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR3_TARGET_COUNT, 0);
273 AssertRC(rc);
274
275 /* VMX_VMCS_CTRL_ENTRY_CONTROLS
276 * Set required bits to one and zero according to the MSR capabilities.
277 */
278 val = (pVM->hwaccm.s.vmx.msr.vmx_entry & 0xFFFFFFFF);
279 if (pVM->hwaccm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
280 {
281 /** @todo 32 bits guest mode only for now. */
282 /* val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE; */
283 }
284 /* Mask away the bits that the CPU doesn't support */
285 /** @todo make sure they don't conflict with the above requirements. */
286 val &= (pVM->hwaccm.s.vmx.msr.vmx_entry >> 32ULL);
287 /* else Must be zero when AMD64 is not available. */
288 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val);
289 AssertRC(rc);
290
291 /* VMX_VMCS_CTRL_EXIT_CONTROLS
292 * Set required bits to one and zero according to the MSR capabilities.
293 */
294 val = (pVM->hwaccm.s.vmx.msr.vmx_exit & 0xFFFFFFFF);
295#if HC_ARCH_BITS == 64
296 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64;
297#else
298 /* else Must be zero when AMD64 is not available. */
299#endif
300 val &= (pVM->hwaccm.s.vmx.msr.vmx_exit >> 32ULL);
301 /* Don't acknowledge external interrupts on VM-exit. */
302 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, val);
303 AssertRC(rc);
304
305 /* Forward all exception except #NM & #PF to the guest.
306 * We always need to check pagefaults since our shadow page table can be out of sync.
307 * And we always lazily sync the FPU & XMM state.
308 */
309
310 /*
311 * @todo Possible optimization:
312 * Keep the FPU and XMM state current in the EM thread. That way there's no need to
313 * lazily sync anything, but the downside is that we can't use the FPU stack or XMM
314 * registers ourselves of course.
315 *
316 * @note only possible if the current state is actually ours (X86_CR0_TS flag)
317 */
318 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXCEPTION_BITMAP, HWACCM_VMX_TRAP_MASK);
319 AssertRC(rc);
320
321 /* Don't filter page faults; all of them should cause a switch. */
322 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MASK, 0);
323 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MATCH, 0);
324 AssertRC(rc);
325
326 /* Init TSC offset to zero. */
327 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, 0);
328#if HC_ARCH_BITS == 32
329 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_HIGH, 0);
330#endif
331 AssertRC(rc);
332
333 rc = VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_A_FULL, 0);
334#if HC_ARCH_BITS == 32
335 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_A_HIGH, 0);
336#endif
337 AssertRC(rc);
338
339 rc = VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_B_FULL, 0);
340#if HC_ARCH_BITS == 32
341 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_B_HIGH, 0);
342#endif
343 AssertRC(rc);
344
345 /* Clear MSR controls. */
346 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
347 {
348 /* Optional */
349 rc = VMXWriteVMCS(VMX_VMCS_CTRL_MSR_BITMAP_FULL, 0);
350#if HC_ARCH_BITS == 32
351 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_MSR_BITMAP_HIGH, 0);
352#endif
353 AssertRC(rc);
354 }
355 rc = VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, 0);
356 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, 0);
357 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, 0);
358#if HC_ARCH_BITS == 32
359 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_HIGH, 0);
360 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_HIGH, 0);
361 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_HIGH, 0);
362#endif
363 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0);
364 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, 0);
365 AssertRC(rc);
366
367 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
368 {
369 /* Optional */
370 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_TRESHOLD, 0);
371 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, 0);
372#if HC_ARCH_BITS == 32
373 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VAPIC_PAGEADDR_HIGH, 0);
374#endif
375 AssertRC(rc);
376 }
377
378 /* Set link pointer to -1. Not currently used. */
379#if HC_ARCH_BITS == 32
380 rc = VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_FULL, 0xFFFFFFFF);
381 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_HIGH, 0xFFFFFFFF);
382#else
383 rc = VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_FULL, 0xFFFFFFFFFFFFFFFF);
384#endif
385 AssertRC(rc);
386
387 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
388 rc = VMXClearVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
389 AssertRC(rc);
390
391vmx_end:
392 VMXR0CheckError(pVM, rc);
393 return rc;
394}
395
396
397/**
398 * Injects an event (trap or external interrupt)
399 *
400 * @returns VBox status code.
401 * @param pVM The VM to operate on.
402 * @param pCtx CPU Context
403 * @param intInfo VMX interrupt info
404 * @param cbInstr Opcode length of faulting instruction
405 * @param errCode Error code (optional)
406 */
407static int VMXR0InjectEvent(PVM pVM, CPUMCTX *pCtx, uint32_t intInfo, uint32_t cbInstr, uint32_t errCode)
408{
409 int rc;
410
411#ifdef VBOX_STRICT
412 uint32_t iGate = VMX_EXIT_INTERRUPTION_INFO_VECTOR(intInfo);
413 if (iGate == 0xE)
414 Log2(("VMXR0InjectEvent: Injecting interrupt %d at %VGv error code=%08x CR2=%08x intInfo=%08x\n", iGate, pCtx->eip, errCode, pCtx->cr2, intInfo));
415 else
416 if (iGate < 0x20)
417 Log2(("VMXR0InjectEvent: Injecting interrupt %d at %VGv error code=%08x\n", iGate, pCtx->eip, errCode));
418 else
419 {
420 Log2(("INJ-EI: %x at %VGv\n", iGate, pCtx->eip));
421 Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
422 Assert(pCtx->eflags.u32 & X86_EFL_IF);
423 }
424#endif
425
426 /* Set event injection state. */
427 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_IRQ_INFO,
428 intInfo | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT)
429 );
430
431 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
432 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE, errCode);
433
434 AssertRC(rc);
435 return rc;
436}
437
438
439/**
440 * Checks for pending guest interrupts and injects them
441 *
442 * @returns VBox status code.
443 * @param pVM The VM to operate on.
444 * @param pCtx CPU Context
445 */
446static int VMXR0CheckPendingInterrupt(PVM pVM, CPUMCTX *pCtx)
447{
448 int rc;
449
450 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
451 if (pVM->hwaccm.s.Event.fPending)
452 {
453 Log(("Reinjecting event %VX64 %08x at %VGv\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, pCtx->eip));
454 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject);
455 rc = VMXR0InjectEvent(pVM, pCtx, pVM->hwaccm.s.Event.intInfo, 0, pVM->hwaccm.s.Event.errCode);
456 AssertRC(rc);
457
458 pVM->hwaccm.s.Event.fPending = false;
459 return VINF_SUCCESS;
460 }
461
462 /* When external interrupts are pending, we should exit the VM when IF is set. */
463 if ( !TRPMHasTrap(pVM)
464 && VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
465 {
466 if (!(pCtx->eflags.u32 & X86_EFL_IF))
467 {
468 Log2(("Enable irq window exit!\n"));
469 pVM->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
470 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
471 AssertRC(rc);
472 }
473 else
474 if (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
475 {
476 uint8_t u8Interrupt;
477
478 rc = PDMGetInterrupt(pVM, &u8Interrupt);
479 Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Vrc\n", u8Interrupt, u8Interrupt, rc));
480 if (VBOX_SUCCESS(rc))
481 {
482 rc = TRPMAssertTrap(pVM, u8Interrupt, TRPM_HARDWARE_INT);
483 AssertRC(rc);
484 }
485 else
486 {
487 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
488 Assert(!VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)));
489 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchGuestIrq);
490 /* Just continue */
491 }
492 }
493 else
494 Log(("Pending interrupt blocked at %VGv by VM_FF_INHIBIT_INTERRUPTS!!\n", pCtx->eip));
495 }
496
497#ifdef VBOX_STRICT
498 if (TRPMHasTrap(pVM))
499 {
500 uint8_t u8Vector;
501 rc = TRPMQueryTrapAll(pVM, &u8Vector, 0, 0, 0);
502 AssertRC(rc);
503 }
504#endif
505
506 if ( pCtx->eflags.u32 & X86_EFL_IF
507 && (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
508 && TRPMHasTrap(pVM)
509 )
510 {
511 uint8_t u8Vector;
512 int rc;
513 TRPMEVENT enmType;
514 RTGCUINTPTR intInfo, errCode;
515
516 /* If a new event is pending, then dispatch it now. */
517 rc = TRPMQueryTrapAll(pVM, &u8Vector, &enmType, &errCode, 0);
518 AssertRC(rc);
519 Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
520 Assert(enmType != TRPM_SOFTWARE_INT);
521
522 /* Clear the pending trap. */
523 rc = TRPMResetTrap(pVM);
524 AssertRC(rc);
525
526 intInfo = u8Vector;
527 intInfo |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
528
529 if (enmType == TRPM_TRAP)
530 {
531 switch (u8Vector) {
532 case 8:
533 case 10:
534 case 11:
535 case 12:
536 case 13:
537 case 14:
538 case 17:
539 /* Valid error codes. */
540 intInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
541 break;
542 default:
543 break;
544 }
545 if (u8Vector == X86_XCPT_BP || u8Vector == X86_XCPT_OF)
546 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
547 else
548 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
549 }
550 else
551 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
552
553 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntInject);
554 rc = VMXR0InjectEvent(pVM, pCtx, intInfo, 0, errCode);
555 AssertRC(rc);
556 } /* if (interrupts can be dispatched) */
557
558 return VINF_SUCCESS;
559}
560
561/**
562 * Save the host state
563 *
564 * @returns VBox status code.
565 * @param pVM The VM to operate on.
566 */
567HWACCMR0DECL(int) VMXR0SaveHostState(PVM pVM)
568{
569 int rc = VINF_SUCCESS;
570
571 /*
572 * Host CPU Context
573 */
574 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
575 {
576 RTIDTR idtr;
577 RTGDTR gdtr;
578 RTSEL SelTR;
579 PX86DESCHC pDesc;
580 uintptr_t trBase;
581
582 /* Control registers */
583 rc = VMXWriteVMCS(VMX_VMCS_HOST_CR0, ASMGetCR0());
584 rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR3, ASMGetCR3());
585 rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR4, ASMGetCR4());
586 AssertRC(rc);
587 Log2(("VMX_VMCS_HOST_CR0 %08x\n", ASMGetCR0()));
588 Log2(("VMX_VMCS_HOST_CR3 %VHp\n", ASMGetCR3()));
589 Log2(("VMX_VMCS_HOST_CR4 %08x\n", ASMGetCR4()));
590
591 /* Selector registers. */
592 rc = VMXWriteVMCS(VMX_VMCS_HOST_FIELD_CS, ASMGetCS());
593 /** @note VMX is (again) very picky about the RPL of the selectors here; we'll restore them manually. */
594 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_DS, 0);
595 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_ES, 0);
596#if HC_ARCH_BITS == 32
597 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_FS, 0);
598 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_GS, 0);
599#endif
600 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_SS, ASMGetSS());
601 SelTR = ASMGetTR();
602 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_TR, SelTR);
603 AssertRC(rc);
604 Log2(("VMX_VMCS_HOST_FIELD_CS %08x\n", ASMGetCS()));
605 Log2(("VMX_VMCS_HOST_FIELD_DS %08x\n", ASMGetDS()));
606 Log2(("VMX_VMCS_HOST_FIELD_ES %08x\n", ASMGetES()));
607 Log2(("VMX_VMCS_HOST_FIELD_FS %08x\n", ASMGetFS()));
608 Log2(("VMX_VMCS_HOST_FIELD_GS %08x\n", ASMGetGS()));
609 Log2(("VMX_VMCS_HOST_FIELD_SS %08x\n", ASMGetSS()));
610 Log2(("VMX_VMCS_HOST_FIELD_TR %08x\n", ASMGetTR()));
611
612 /* GDTR & IDTR */
613 ASMGetGDTR(&gdtr);
614 rc = VMXWriteVMCS(VMX_VMCS_HOST_GDTR_BASE, gdtr.pGdt);
615 ASMGetIDTR(&idtr);
616 rc |= VMXWriteVMCS(VMX_VMCS_HOST_IDTR_BASE, idtr.pIdt);
617 AssertRC(rc);
618 Log2(("VMX_VMCS_HOST_GDTR_BASE %VHv\n", gdtr.pGdt));
619 Log2(("VMX_VMCS_HOST_IDTR_BASE %VHv\n", idtr.pIdt));
620
621 /* Save the base address of the TR selector. */
622 if (SelTR > gdtr.cbGdt)
623 {
624 AssertMsgFailed(("Invalid TR selector %x. GDTR.cbGdt=%x\n", SelTR, gdtr.cbGdt));
625 return VERR_VMX_INVALID_HOST_STATE;
626 }
627
628 pDesc = &((PX86DESCHC)gdtr.pGdt)[SelTR >> X86_SEL_SHIFT_HC];
629#if HC_ARCH_BITS == 64
630 trBase = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16ULL) | (pDesc->Gen.u8BaseHigh2 << 24ULL) | ((uintptr_t)pDesc->Gen.u32BaseHigh3 << 32ULL);
631#else
632 trBase = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
633#endif
634 rc = VMXWriteVMCS(VMX_VMCS_HOST_TR_BASE, trBase);
635 AssertRC(rc);
636 Log2(("VMX_VMCS_HOST_TR_BASE %VHv\n", trBase));
637
638 /* FS and GS base. */
639#if HC_ARCH_BITS == 64
640 Log2(("MSR_K8_FS_BASE = %VHv\n", ASMRdMsr(MSR_K8_FS_BASE)));
641 Log2(("MSR_K8_GS_BASE = %VHv\n", ASMRdMsr(MSR_K8_GS_BASE)));
642 rc = VMXWriteVMCS64(VMX_VMCS_HOST_FS_BASE, ASMRdMsr(MSR_K8_FS_BASE));
643 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_GS_BASE, ASMRdMsr(MSR_K8_GS_BASE));
644#endif
645 AssertRC(rc);
646
647 /* Sysenter MSRs. */
648 /** @todo expensive!! */
649 rc = VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
650 Log2(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)));
651#if HC_ARCH_BITS == 32
652 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
653 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
654 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));
655 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));
656#else
657 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", ASMRdMsr(MSR_IA32_SYSENTER_EIP)));
658 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", ASMRdMsr(MSR_IA32_SYSENTER_ESP)));
659 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
660 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
661#endif
662 AssertRC(rc);
663
664 pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;
665 }
666 return rc;
667}
668
669
670/**
671 * Loads the guest state
672 *
673 * @returns VBox status code.
674 * @param pVM The VM to operate on.
675 * @param pCtx Guest context
676 */
677HWACCMR0DECL(int) VMXR0LoadGuestState(PVM pVM, CPUMCTX *pCtx)
678{
679 int rc = VINF_SUCCESS;
680 RTGCUINTPTR val;
681 X86EFLAGS eflags;
682
683 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
684 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
685 {
686 VMX_WRITE_SELREG(ES, es);
687 AssertRC(rc);
688
689 VMX_WRITE_SELREG(CS, cs);
690 AssertRC(rc);
691
692 VMX_WRITE_SELREG(SS, ss);
693 AssertRC(rc);
694
695 VMX_WRITE_SELREG(DS, ds);
696 AssertRC(rc);
697
698 VMX_WRITE_SELREG(FS, fs);
699 AssertRC(rc);
700
701 VMX_WRITE_SELREG(GS, gs);
702 AssertRC(rc);
703 }
704
705 /* Guest CPU context: LDTR. */
706 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
707 {
708 if (pCtx->ldtr == 0)
709 {
710 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_LDTR, 0);
711 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_LIMIT, 0);
712 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_BASE, 0);
713 /** @note vmlaunch will fail with 0 or just 0x02. No idea why. */
714 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x82 /* present, LDT */);
715 }
716 else
717 {
718 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_LDTR, pCtx->ldtr);
719 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_LIMIT, pCtx->ldtrHid.u32Limit);
720 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtrHid.u32Base);
721 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_ACCESS_RIGHTS, pCtx->ldtrHid.Attr.u);
722 }
723 AssertRC(rc);
724 }
725 /* Guest CPU context: TR. */
726 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
727 {
728 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_TR, pCtx->tr);
729
730 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
731 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
732 {
733 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_LIMIT, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
734 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_BASE, 0);
735 }
736 else
737 {
738 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_LIMIT, pCtx->trHid.u32Limit);
739 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_BASE, pCtx->trHid.u32Base);
740 }
741 val = pCtx->trHid.Attr.u;
742
743 /* The TSS selector must be busy. */
744 if ((val & 0xF) == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
745 val = (val & ~0xF) | X86_SEL_TYPE_SYS_286_TSS_BUSY;
746 else
747 /* Default even if no TR selector has been set (otherwise vmlaunch will fail!) */
748 val = (val & ~0xF) | X86_SEL_TYPE_SYS_386_TSS_BUSY;
749
750 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_ACCESS_RIGHTS, val);
751 AssertRC(rc);
752 }
753 /* Guest CPU context: GDTR. */
754 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
755 {
756 rc = VMXWriteVMCS(VMX_VMCS_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);
757 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt);
758 AssertRC(rc);
759 }
760 /* Guest CPU context: IDTR. */
761 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
762 {
763 rc = VMXWriteVMCS(VMX_VMCS_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);
764 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt);
765 AssertRC(rc);
766 }
767
768 /*
769 * Sysenter MSRs
770 */
771 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SYSENTER_MSR)
772 {
773 rc = VMXWriteVMCS(VMX_VMCS_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
774 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
775 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
776 AssertRC(rc);
777 }
778
779 /* Control registers */
780 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
781 {
782 val = pCtx->cr0;
783 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW, val);
784 Log2(("Guest CR0-shadow %08x\n", val));
785 if (CPUMIsGuestFPUStateActive(pVM) == false)
786 {
787 /* Always use #NM exceptions to load the FPU/XMM state on demand. */
788 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
789 }
790 else
791 {
792 Assert(pVM->hwaccm.s.vmx.fResumeVM == true);
793 /** @todo check if we support the old style mess correctly. */
794 if (!(val & X86_CR0_NE))
795 {
796 Log(("Forcing X86_CR0_NE!!!\n"));
797
798 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
799 if (!pVM->hwaccm.s.fFPUOldStyleOverride)
800 {
801 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXCEPTION_BITMAP, HWACCM_VMX_TRAP_MASK | RT_BIT(16));
802 AssertRC(rc);
803 pVM->hwaccm.s.fFPUOldStyleOverride = true;
804 }
805 }
806
807 val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
808 }
809 /* Note: protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */
810 val |= X86_CR0_PE | X86_CR0_PG;
811 /* Note: We must also set this as we rely on protecting various pages for which supervisor writes must be caught. */
812 val |= X86_CR0_WP;
813
814 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_CR0, val);
815 Log2(("Guest CR0 %08x\n", val));
816 /* CR0 flags owned by the host; if the guests attempts to change them, then
817 * the VM will exit.
818 */
819 val = X86_CR0_PE /* Must monitor this bit (assumptions are made for real mode emulation) */
820 | X86_CR0_WP /* Must monitor this bit (it must always be enabled). */
821 | X86_CR0_PG /* Must monitor this bit (assumptions are made for real mode & protected mode without paging emulation) */
822 | X86_CR0_TS
823 | X86_CR0_ET
824 | X86_CR0_NE
825 | X86_CR0_MP;
826 pVM->hwaccm.s.vmx.cr0_mask = val;
827
828 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR0_MASK, val);
829 Log2(("Guest CR0-mask %08x\n", val));
830 AssertRC(rc);
831 }
832 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
833 {
834 /* CR4 */
835 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, pCtx->cr4);
836 Log2(("Guest CR4-shadow %08x\n", pCtx->cr4));
837 /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */
838 val = pCtx->cr4 | (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
839 switch(pVM->hwaccm.s.enmShadowMode)
840 {
841 case PGMMODE_REAL: /* Real mode -> emulated using v86 mode */
842 case PGMMODE_PROTECTED: /* Protected mode, no paging -> emulated using identity mapping. */
843 case PGMMODE_32_BIT: /* 32-bit paging. */
844 break;
845
846 case PGMMODE_PAE: /* PAE paging. */
847 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
848 /** @todo use normal 32 bits paging */
849 val |= X86_CR4_PAE;
850 break;
851
852 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
853 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
854 AssertFailed();
855 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
856
857 default: /* shut up gcc */
858 AssertFailed();
859 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
860 }
861 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
862 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
863 val |= X86_CR4_VME;
864
865 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_CR4, val);
866 Log2(("Guest CR4 %08x\n", val));
867 /* CR4 flags owned by the host; if the guests attempts to change them, then
868 * the VM will exit.
869 */
870 val = X86_CR4_PAE
871 | X86_CR4_PGE
872 | X86_CR4_PSE
873 | X86_CR4_VMXE;
874 pVM->hwaccm.s.vmx.cr4_mask = val;
875
876 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR4_MASK, val);
877 Log2(("Guest CR4-mask %08x\n", val));
878 AssertRC(rc);
879 }
880
881 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
882 {
883 /* Save our shadow CR3 register. */
884 val = PGMGetHyperCR3(pVM);
885 rc = VMXWriteVMCS(VMX_VMCS_GUEST_CR3, val);
886 AssertRC(rc);
887 }
888
889 /* Debug registers. */
890 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
891 {
892 /** @todo DR0-6 */
893 val = pCtx->dr7;
894 val &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
895 val |= 0x400; /* must be one */
896#ifdef VBOX_STRICT
897 val = 0x400;
898#endif
899 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DR7, val);
900 AssertRC(rc);
901
902 /* IA32_DEBUGCTL MSR. */
903 rc = VMXWriteVMCS(VMX_VMCS_GUEST_DEBUGCTL_FULL, 0);
904 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUGCTL_HIGH, 0);
905 AssertRC(rc);
906
907 /** @todo */
908 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUG_EXCEPTIONS, 0);
909 AssertRC(rc);
910 }
911
912 /* EIP, ESP and EFLAGS */
913 rc = VMXWriteVMCS(VMX_VMCS_GUEST_RIP, pCtx->eip);
914 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_RSP, pCtx->esp);
915 AssertRC(rc);
916
917 /* Bits 22-31, 15, 5 & 3 must be zero. Bit 1 must be 1. */
918 eflags = pCtx->eflags;
919 eflags.u32 &= VMX_EFLAGS_RESERVED_0;
920 eflags.u32 |= VMX_EFLAGS_RESERVED_1;
921
922 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
923 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
924 {
925 eflags.Bits.u1VM = 1;
926 eflags.Bits.u1VIF = pCtx->eflags.Bits.u1IF;
927 eflags.Bits.u2IOPL = 3;
928 }
929
930 rc = VMXWriteVMCS(VMX_VMCS_GUEST_RFLAGS, eflags.u32);
931 AssertRC(rc);
932
933 /** TSC offset. */
934 uint64_t u64TSCOffset;
935
936 if (TMCpuTickCanUseRealTSC(pVM, &u64TSCOffset))
937 {
938 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET */
939#if HC_ARCH_BITS == 64
940 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, u64TSCOffset);
941#else
942 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, (uint32_t)u64TSCOffset);
943 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_HIGH, (uint32_t)(u64TSCOffset >> 32ULL));
944#endif
945 AssertRC(rc);
946
947 pVM->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
948 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
949 AssertRC(rc);
950 }
951 else
952 {
953 pVM->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
954 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
955 AssertRC(rc);
956 }
957
958 /* Done. */
959 pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
960
961 return rc;
962}
963
964/**
965 * Runs guest code in a VMX VM.
966 *
967 * @note NEVER EVER turn on interrupts here. Due to our illegal entry into the kernel, it might mess things up. (XP kernel traps have been frequently observed)
968 *
969 * @returns VBox status code.
970 * @param pVM The VM to operate on.
971 * @param pCtx Guest context
972 */
973HWACCMR0DECL(int) VMXR0RunGuestCode(PVM pVM, CPUMCTX *pCtx)
974{
975 int rc = VINF_SUCCESS;
976 RTCCUINTREG val, valShadow;
977 RTCCUINTREG exitReason, instrError, cbInstr;
978 RTGCUINTPTR exitQualification;
979 RTGCUINTPTR intInfo = 0; /* shut up buggy gcc 4 */
980 RTGCUINTPTR errCode, instrInfo, uInterruptState;
981 bool fGuestStateSynced = false;
982 unsigned cResume = 0;
983
984 Log2(("\nE"));
985
986 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatEntry, x);
987
988#ifdef VBOX_STRICT
989 rc = VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val);
990 AssertRC(rc);
991 Log2(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS = %08x\n", val));
992
993 /* allowed zero */
994 if ((val & (pVM->hwaccm.s.vmx.msr.vmx_pin_ctls & 0xFFFFFFFF)) != (pVM->hwaccm.s.vmx.msr.vmx_pin_ctls & 0xFFFFFFFF))
995 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: zero\n"));
996
997 /* allowed one */
998 if ((val & ~(pVM->hwaccm.s.vmx.msr.vmx_pin_ctls >> 32ULL)) != 0)
999 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: one\n"));
1000
1001 rc = VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val);
1002 AssertRC(rc);
1003 Log2(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS = %08x\n", val));
1004
1005 /* allowed zero */
1006 if ((val & (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls & 0xFFFFFFFF)) != (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls & 0xFFFFFFFF))
1007 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: zero\n"));
1008
1009 /* allowed one */
1010 if ((val & ~(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls >> 32ULL)) != 0)
1011 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: one\n"));
1012
1013 rc = VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val);
1014 AssertRC(rc);
1015 Log2(("VMX_VMCS_CTRL_ENTRY_CONTROLS = %08x\n", val));
1016
1017 /* allowed zero */
1018 if ((val & (pVM->hwaccm.s.vmx.msr.vmx_entry & 0xFFFFFFFF)) != (pVM->hwaccm.s.vmx.msr.vmx_entry & 0xFFFFFFFF))
1019 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: zero\n"));
1020
1021 /* allowed one */
1022 if ((val & ~(pVM->hwaccm.s.vmx.msr.vmx_entry >> 32ULL)) != 0)
1023 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: one\n"));
1024
1025 rc = VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val);
1026 AssertRC(rc);
1027 Log2(("VMX_VMCS_CTRL_EXIT_CONTROLS = %08x\n", val));
1028
1029 /* allowed zero */
1030 if ((val & (pVM->hwaccm.s.vmx.msr.vmx_exit & 0xFFFFFFFF)) != (pVM->hwaccm.s.vmx.msr.vmx_exit & 0xFFFFFFFF))
1031 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: zero\n"));
1032
1033 /* allowed one */
1034 if ((val & ~(pVM->hwaccm.s.vmx.msr.vmx_exit >> 32ULL)) != 0)
1035 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n"));
1036#endif
1037
1038#if 0
1039 /*
1040 * Check if debug registers are armed.
1041 */
1042 uint32_t u32DR7 = ASMGetDR7();
1043 if (u32DR7 & X86_DR7_ENABLED_MASK)
1044 {
1045 pVM->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
1046 }
1047 else
1048 pVM->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HOST;
1049#endif
1050
1051 /* We can jump to this point to resume execution after determining that a VM-exit is innocent.
1052 */
1053ResumeExecution:
1054 /* Safety precaution; looping for too long here can have a very bad effect on the host */
1055 if (++cResume > HWACCM_MAX_RESUME_LOOPS)
1056 {
1057 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitMaxResume);
1058 rc = VINF_EM_RAW_INTERRUPT;
1059 goto end;
1060 }
1061
1062 /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
1063 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
1064 {
1065 Log(("VM_FF_INHIBIT_INTERRUPTS at %VGv successor %VGv\n", pCtx->eip, EMGetInhibitInterruptsPC(pVM)));
1066 if (pCtx->eip != EMGetInhibitInterruptsPC(pVM))
1067 {
1068 /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
1069 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
1070 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
1071 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
1072 */
1073 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
1074 /* Irq inhibition is no longer active; clear the corresponding VMX state. */
1075 rc = VMXWriteVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, 0);
1076 AssertRC(rc);
1077 }
1078 }
1079 else
1080 {
1081 /* Irq inhibition is no longer active; clear the corresponding VMX state. */
1082 rc = VMXWriteVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, 0);
1083 AssertRC(rc);
1084 }
1085
1086 /* Check for pending actions that force us to go back to ring 3. */
1087 if (VM_FF_ISPENDING(pVM, VM_FF_TO_R3 | VM_FF_TIMER))
1088 {
1089 VM_FF_CLEAR(pVM, VM_FF_TO_R3);
1090 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchToR3);
1091 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1092 rc = VINF_EM_RAW_TO_R3;
1093 goto end;
1094 }
1095 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
1096 if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST))
1097 {
1098 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1099 rc = VINF_EM_PENDING_REQUEST;
1100 goto end;
1101 }
1102
1103 /* When external interrupts are pending, we should exit the VM when IF is set. */
1104 /** @note *after* VM_FF_INHIBIT_INTERRUPTS check!!! */
1105 rc = VMXR0CheckPendingInterrupt(pVM, pCtx);
1106 if (VBOX_FAILURE(rc))
1107 {
1108 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1109 goto end;
1110 }
1111
1112 /** @todo check timers?? */
1113
1114 /* Save the host state first. */
1115 rc = VMXR0SaveHostState(pVM);
1116 if (rc != VINF_SUCCESS)
1117 {
1118 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1119 goto end;
1120 }
1121 /* Load the guest state */
1122 rc = VMXR0LoadGuestState(pVM, pCtx);
1123 if (rc != VINF_SUCCESS)
1124 {
1125 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1126 goto end;
1127 }
1128 fGuestStateSynced = true;
1129
1130 /* Non-register state Guest Context */
1131 /** @todo change me according to cpu state */
1132 rc = VMXWriteVMCS(VMX_VMCS_GUEST_ACTIVITY_STATE, VMX_CMS_GUEST_ACTIVITY_ACTIVE);
1133 AssertRC(rc);
1134
1135 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1136
1137 /* Manual save and restore:
1138 * - General purpose registers except RIP, RSP
1139 *
1140 * Trashed:
1141 * - CR2 (we don't care)
1142 * - LDTR (reset to 0)
1143 * - DRx (presumably not changed at all)
1144 * - DR7 (reset to 0x400)
1145 * - EFLAGS (reset to RT_BIT(1); not relevant)
1146 *
1147 */
1148
1149 /* All done! Let's start VM execution. */
1150 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x);
1151 if (pVM->hwaccm.s.vmx.fResumeVM == false)
1152 rc = VMXStartVM(pCtx);
1153 else
1154 rc = VMXResumeVM(pCtx);
1155
1156 /* In case we execute a goto ResumeExecution later on. */
1157 pVM->hwaccm.s.vmx.fResumeVM = true;
1158
1159 /**
1160 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1161 * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING 3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
1162 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1163 */
1164
1165 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatInGC, x);
1166 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatExit, x);
1167
1168 switch (rc)
1169 {
1170 case VINF_SUCCESS:
1171 break;
1172
1173 case VERR_VMX_INVALID_VMXON_PTR:
1174 AssertFailed();
1175 goto end;
1176
1177 case VERR_VMX_UNABLE_TO_START_VM:
1178 case VERR_VMX_UNABLE_TO_RESUME_VM:
1179 {
1180#ifdef VBOX_STRICT
1181 int rc1;
1182
1183 rc1 = VMXReadVMCS(VMX_VMCS_RO_EXIT_REASON, &exitReason);
1184 rc1 |= VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError);
1185 AssertRC(rc1);
1186 if (rc1 == VINF_SUCCESS)
1187 {
1188 RTGDTR gdtr;
1189 PX86DESCHC pDesc;
1190
1191 ASMGetGDTR(&gdtr);
1192
1193 Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason, (uint32_t)instrError));
1194 Log(("Current stack %08x\n", &rc1));
1195
1196
1197 VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val);
1198 Log(("Old eip %VGv new %VGv\n", pCtx->eip, (RTGCPTR)val));
1199 VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val);
1200 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val));
1201 VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val);
1202 Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS %08x\n", val));
1203 VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val);
1204 Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS %08x\n", val));
1205 VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val);
1206 Log(("VMX_VMCS_CTRL_EXIT_CONTROLS %08x\n", val));
1207
1208 VMXReadVMCS(VMX_VMCS_HOST_CR0, &val);
1209 Log(("VMX_VMCS_HOST_CR0 %08x\n", val));
1210
1211 VMXReadVMCS(VMX_VMCS_HOST_CR3, &val);
1212 Log(("VMX_VMCS_HOST_CR3 %VHp\n", val));
1213
1214 VMXReadVMCS(VMX_VMCS_HOST_CR4, &val);
1215 Log(("VMX_VMCS_HOST_CR4 %08x\n", val));
1216
1217 VMXReadVMCS(VMX_VMCS_HOST_FIELD_CS, &val);
1218 Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val));
1219 if (val < gdtr.cbGdt)
1220 {
1221 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1222 HWACCMR0DumpDescriptor(pDesc, val, "CS: ");
1223 }
1224
1225 VMXReadVMCS(VMX_VMCS_HOST_FIELD_DS, &val);
1226 Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val));
1227 if (val < gdtr.cbGdt)
1228 {
1229 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1230 HWACCMR0DumpDescriptor(pDesc, val, "DS: ");
1231 }
1232
1233 VMXReadVMCS(VMX_VMCS_HOST_FIELD_ES, &val);
1234 Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val));
1235 if (val < gdtr.cbGdt)
1236 {
1237 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1238 HWACCMR0DumpDescriptor(pDesc, val, "ES: ");
1239 }
1240
1241 VMXReadVMCS(VMX_VMCS_HOST_FIELD_FS, &val);
1242 Log(("VMX_VMCS_HOST_FIELD_FS %08x\n", val));
1243 if (val < gdtr.cbGdt)
1244 {
1245 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1246 HWACCMR0DumpDescriptor(pDesc, val, "FS: ");
1247 }
1248
1249 VMXReadVMCS(VMX_VMCS_HOST_FIELD_GS, &val);
1250 Log(("VMX_VMCS_HOST_FIELD_GS %08x\n", val));
1251 if (val < gdtr.cbGdt)
1252 {
1253 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1254 HWACCMR0DumpDescriptor(pDesc, val, "GS: ");
1255 }
1256
1257 VMXReadVMCS(VMX_VMCS_HOST_FIELD_SS, &val);
1258 Log(("VMX_VMCS_HOST_FIELD_SS %08x\n", val));
1259 if (val < gdtr.cbGdt)
1260 {
1261 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1262 HWACCMR0DumpDescriptor(pDesc, val, "SS: ");
1263 }
1264
1265 VMXReadVMCS(VMX_VMCS_HOST_FIELD_TR, &val);
1266 Log(("VMX_VMCS_HOST_FIELD_TR %08x\n", val));
1267 if (val < gdtr.cbGdt)
1268 {
1269 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1270 HWACCMR0DumpDescriptor(pDesc, val, "TR: ");
1271 }
1272
1273 VMXReadVMCS(VMX_VMCS_HOST_TR_BASE, &val);
1274 Log(("VMX_VMCS_HOST_TR_BASE %VHv\n", val));
1275
1276 VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE, &val);
1277 Log(("VMX_VMCS_HOST_GDTR_BASE %VHv\n", val));
1278 VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE, &val);
1279 Log(("VMX_VMCS_HOST_IDTR_BASE %VHv\n", val));
1280
1281 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_CS, &val);
1282 Log(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", val));
1283
1284 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP, &val);
1285 Log(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", val));
1286
1287 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP, &val);
1288 Log(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", val));
1289
1290 VMXReadVMCS(VMX_VMCS_HOST_RSP, &val);
1291 Log(("VMX_VMCS_HOST_RSP %VHv\n", val));
1292 VMXReadVMCS(VMX_VMCS_HOST_RIP, &val);
1293 Log(("VMX_VMCS_HOST_RIP %VHv\n", val));
1294
1295#if HC_ARCH_BITS == 64
1296 Log(("MSR_K6_EFER = %VX64\n", ASMRdMsr(MSR_K6_EFER)));
1297 Log(("MSR_K6_STAR = %VX64\n", ASMRdMsr(MSR_K6_STAR)));
1298 Log(("MSR_K8_LSTAR = %VX64\n", ASMRdMsr(MSR_K8_LSTAR)));
1299 Log(("MSR_K8_CSTAR = %VX64\n", ASMRdMsr(MSR_K8_CSTAR)));
1300 Log(("MSR_K8_SF_MASK = %VX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
1301#endif
1302 }
1303#endif /* VBOX_STRICT */
1304 goto end;
1305 }
1306
1307 default:
1308 /* impossible */
1309 AssertFailed();
1310 goto end;
1311 }
1312 /* Success. Query the guest state and figure out what has happened. */
1313
1314 /* Investigate why there was a VM-exit. */
1315 rc = VMXReadVMCS(VMX_VMCS_RO_EXIT_REASON, &exitReason);
1316 STAM_COUNTER_INC(&pVM->hwaccm.s.pStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]);
1317
1318 exitReason &= 0xffff; /* bit 0-15 contain the exit code. */
1319 rc |= VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError);
1320 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INSTR_LENGTH, &cbInstr);
1321 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INTERRUPTION_INFO, &val);
1322 intInfo = val;
1323 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INTERRUPTION_ERRCODE, &val);
1324 errCode = val; /* might not be valid; depends on VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID. */
1325 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INSTR_INFO, &val);
1326 instrInfo = val;
1327 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_QUALIFICATION, &val);
1328 exitQualification = val;
1329 AssertRC(rc);
1330
1331 /* Take care of instruction fusing (sti, mov ss) */
1332 rc |= VMXReadVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, &val);
1333 uInterruptState = val;
1334 if (uInterruptState != 0)
1335 {
1336 Assert(uInterruptState <= 2); /* only sti & mov ss */
1337 Log(("uInterruptState %x eip=%VGv\n", uInterruptState, pCtx->eip));
1338 EMSetInhibitInterruptsPC(pVM, pCtx->eip);
1339 }
1340 else
1341 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
1342
1343 /* Let's first sync back eip, esp, and eflags. */
1344 rc = VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val);
1345 AssertRC(rc);
1346 pCtx->eip = val;
1347 rc = VMXReadVMCS(VMX_VMCS_GUEST_RSP, &val);
1348 AssertRC(rc);
1349 pCtx->esp = val;
1350 rc = VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val);
1351 AssertRC(rc);
1352 pCtx->eflags.u32 = val;
1353
1354 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
1355 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
1356 {
1357 /* Hide our emulation flags */
1358 pCtx->eflags.Bits.u1VM = 0;
1359 pCtx->eflags.Bits.u1IF = pCtx->eflags.Bits.u1VIF;
1360 pCtx->eflags.Bits.u1VIF = 0;
1361 pCtx->eflags.Bits.u2IOPL = 0;
1362 }
1363
1364 /* Control registers. */
1365 VMXReadVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW, &valShadow);
1366 VMXReadVMCS(VMX_VMCS_GUEST_CR0, &val);
1367 val = (valShadow & pVM->hwaccm.s.vmx.cr0_mask) | (val & ~pVM->hwaccm.s.vmx.cr0_mask);
1368 CPUMSetGuestCR0(pVM, val);
1369
1370 VMXReadVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, &valShadow);
1371 VMXReadVMCS(VMX_VMCS_GUEST_CR4, &val);
1372 val = (valShadow & pVM->hwaccm.s.vmx.cr4_mask) | (val & ~pVM->hwaccm.s.vmx.cr4_mask);
1373 CPUMSetGuestCR4(pVM, val);
1374
1375 CPUMSetGuestCR2(pVM, ASMGetCR2());
1376
1377 VMXReadVMCS(VMX_VMCS_GUEST_DR7, &val);
1378 CPUMSetGuestDR7(pVM, val);
1379
1380 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
1381 VMX_READ_SELREG(ES, es);
1382 VMX_READ_SELREG(SS, ss);
1383 VMX_READ_SELREG(CS, cs);
1384 VMX_READ_SELREG(DS, ds);
1385 VMX_READ_SELREG(FS, fs);
1386 VMX_READ_SELREG(GS, gs);
1387
1388 /** @note NOW IT'S SAFE FOR LOGGING! */
1389 Log2(("Raw exit reason %08x\n", exitReason));
1390
1391 /* Check if an injected event was interrupted prematurely. */
1392 rc = VMXReadVMCS(VMX_VMCS_RO_IDT_INFO, &val);
1393 AssertRC(rc);
1394 pVM->hwaccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);
1395 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVM->hwaccm.s.Event.intInfo)
1396 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVM->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW)
1397 {
1398 Log(("Pending inject %VX64 at %08x exit=%08x intInfo=%08x exitQualification=%08x\n", pVM->hwaccm.s.Event.intInfo, pCtx->eip, exitReason, intInfo, exitQualification));
1399 pVM->hwaccm.s.Event.fPending = true;
1400 /* Error code present? */
1401 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVM->hwaccm.s.Event.intInfo))
1402 {
1403 rc = VMXReadVMCS(VMX_VMCS_RO_IDT_ERRCODE, &val);
1404 AssertRC(rc);
1405 pVM->hwaccm.s.Event.errCode = val;
1406 }
1407 else
1408 pVM->hwaccm.s.Event.errCode = 0;
1409 }
1410
1411#ifdef VBOX_STRICT
1412 if (exitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE)
1413 HWACCMDumpRegs(pCtx);
1414#endif
1415
1416 Log2(("E%d", exitReason));
1417 Log2(("Exit reason %d, exitQualification %08x\n", exitReason, exitQualification));
1418 Log2(("instrInfo=%d instrError=%d instr length=%d\n", instrInfo, instrError, cbInstr));
1419 Log2(("Interruption error code %d\n", errCode));
1420 Log2(("IntInfo = %08x\n", intInfo));
1421 Log2(("New EIP=%VGv\n", pCtx->eip));
1422
1423 /* Some cases don't need a complete resync of the guest CPU state; handle them here. */
1424 switch (exitReason)
1425 {
1426 case VMX_EXIT_EXCEPTION: /* 0 Exception or non-maskable interrupt (NMI). */
1427 case VMX_EXIT_EXTERNAL_IRQ: /* 1 External interrupt. */
1428 {
1429 uint32_t vector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(intInfo);
1430
1431 if (!VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
1432 {
1433 Assert(exitReason == VMX_EXIT_EXTERNAL_IRQ);
1434 /* External interrupt; leave to allow it to be dispatched again. */
1435 rc = VINF_EM_RAW_INTERRUPT;
1436 break;
1437 }
1438 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo))
1439 {
1440 case VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI: /* Non-maskable interrupt. */
1441 /* External interrupt; leave to allow it to be dispatched again. */
1442 rc = VINF_EM_RAW_INTERRUPT;
1443 break;
1444
1445 case VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT: /* External hardware interrupt. */
1446 AssertFailed(); /* can't come here; fails the first check. */
1447 break;
1448
1449 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT: /* Software exception. (#BP or #OF) */
1450 Assert(vector == 3 || vector == 4);
1451 /* no break */
1452 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT: /* Hardware exception. */
1453 Log2(("Hardware/software interrupt %d\n", vector));
1454 switch (vector)
1455 {
1456 case X86_XCPT_NM:
1457 {
1458 uint32_t oldCR0;
1459
1460 Log(("#NM fault at %VGv error code %x\n", pCtx->eip, errCode));
1461
1462 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */
1463 oldCR0 = ASMGetCR0();
1464 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
1465 rc = CPUMHandleLazyFPU(pVM);
1466 if (rc == VINF_SUCCESS)
1467 {
1468 Assert(CPUMIsGuestFPUStateActive(pVM));
1469
1470 /* CPUMHandleLazyFPU could have changed CR0; restore it. */
1471 ASMSetCR0(oldCR0);
1472
1473 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowNM);
1474
1475 /* Continue execution. */
1476 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1477 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1478
1479 goto ResumeExecution;
1480 }
1481
1482 Log(("Forward #NM fault to the guest\n"));
1483 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNM);
1484 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, 0);
1485 AssertRC(rc);
1486 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1487 goto ResumeExecution;
1488 }
1489
1490 case X86_XCPT_PF: /* Page fault */
1491 {
1492 Log2(("Page fault at %VGv error code %x\n", exitQualification ,errCode));
1493 /* Exit qualification contains the linear address of the page fault. */
1494 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
1495 TRPMSetErrorCode(pVM, errCode);
1496 TRPMSetFaultAddress(pVM, exitQualification);
1497
1498 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
1499 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)exitQualification);
1500 Log2(("PGMTrap0eHandler %VGv returned %Vrc\n", pCtx->eip, rc));
1501 if (rc == VINF_SUCCESS)
1502 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1503 Log2(("Shadow page fault at %VGv cr2=%VGv error code %x\n", pCtx->eip, exitQualification ,errCode));
1504 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF);
1505
1506 TRPMResetTrap(pVM);
1507
1508 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1509 goto ResumeExecution;
1510 }
1511 else
1512 if (rc == VINF_EM_RAW_GUEST_TRAP)
1513 { /* A genuine pagefault.
1514 * Forward the trap to the guest by injecting the exception and resuming execution.
1515 */
1516 Log2(("Forward page fault to the guest\n"));
1517 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF);
1518 /* The error code might have been changed. */
1519 errCode = TRPMGetErrorCode(pVM);
1520
1521 TRPMResetTrap(pVM);
1522
1523 /* Now we must update CR2. */
1524 pCtx->cr2 = exitQualification;
1525 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
1526 AssertRC(rc);
1527
1528 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1529 goto ResumeExecution;
1530 }
1531#ifdef VBOX_STRICT
1532 if (rc != VINF_EM_RAW_EMULATE_INSTR)
1533 Log2(("PGMTrap0eHandler failed with %d\n", rc));
1534#endif
1535 /* Need to go back to the recompiler to emulate the instruction. */
1536 TRPMResetTrap(pVM);
1537 break;
1538 }
1539
1540 case X86_XCPT_MF: /* Floating point exception. */
1541 {
1542 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestMF);
1543 if (!(pCtx->cr0 & X86_CR0_NE))
1544 {
1545 /* old style FPU error reporting needs some extra work. */
1546 /** @todo don't fall back to the recompiler, but do it manually. */
1547 rc = VINF_EM_RAW_EMULATE_INSTR;
1548 break;
1549 }
1550 Log(("Trap %x at %VGv\n", vector, pCtx->eip));
1551 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
1552 AssertRC(rc);
1553
1554 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1555 goto ResumeExecution;
1556 }
1557
1558#ifdef VBOX_STRICT
1559 case X86_XCPT_GP: /* General protection failure exception.*/
1560 case X86_XCPT_UD: /* Unknown opcode exception. */
1561 case X86_XCPT_DE: /* Debug exception. */
1562 case X86_XCPT_SS: /* Stack segment exception. */
1563 case X86_XCPT_NP: /* Segment not present exception. */
1564 {
1565 switch(vector)
1566 {
1567 case X86_XCPT_DE:
1568 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestDE);
1569 break;
1570 case X86_XCPT_UD:
1571 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestUD);
1572 break;
1573 case X86_XCPT_SS:
1574 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestSS);
1575 break;
1576 case X86_XCPT_NP:
1577 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNP);
1578 break;
1579 case X86_XCPT_GP:
1580 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestGP);
1581 break;
1582 }
1583
1584 Log(("Trap %x at %VGv\n", vector, pCtx->eip));
1585 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
1586 AssertRC(rc);
1587
1588 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1589 goto ResumeExecution;
1590 }
1591#endif
1592 default:
1593 AssertMsgFailed(("Unexpected vm-exit caused by exception %x\n", vector));
1594 rc = VERR_EM_INTERNAL_ERROR;
1595 break;
1596 } /* switch (vector) */
1597
1598 break;
1599
1600 default:
1601 rc = VERR_EM_INTERNAL_ERROR;
1602 AssertFailed();
1603 break;
1604 }
1605
1606 break;
1607 }
1608
1609 case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */
1610 /* Clear VM-exit on IF=1 change. */
1611 Log2(("VMX_EXIT_IRQ_WINDOW %VGv\n", pCtx->eip));
1612 pVM->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
1613 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
1614 AssertRC(rc);
1615 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIrqWindow);
1616 goto ResumeExecution; /* we check for pending guest interrupts there */
1617
1618 case VMX_EXIT_INVD: /* 13 Guest software attempted to execute INVD. */
1619 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvd);
1620 /* Skip instruction and continue directly. */
1621 pCtx->eip += cbInstr;
1622 /* Continue execution.*/
1623 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1624 goto ResumeExecution;
1625
1626 case VMX_EXIT_CPUID: /* 10 Guest software attempted to execute CPUID. */
1627 {
1628 Log2(("VMX: Cpuid %x\n", pCtx->eax));
1629 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCpuid);
1630 rc = EMInterpretCpuId(pVM, CPUMCTX2CORE(pCtx));
1631 if (rc == VINF_SUCCESS)
1632 {
1633 /* Update EIP and continue execution. */
1634 Assert(cbInstr == 2);
1635 pCtx->eip += cbInstr;
1636 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1637 goto ResumeExecution;
1638 }
1639 AssertMsgFailed(("EMU: cpuid failed with %Vrc\n", rc));
1640 rc = VINF_EM_RAW_EMULATE_INSTR;
1641 break;
1642 }
1643
1644 case VMX_EXIT_RDTSC: /* 16 Guest software attempted to execute RDTSC. */
1645 {
1646 Log2(("VMX: Rdtsc\n"));
1647 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitRdtsc);
1648 rc = EMInterpretRdtsc(pVM, CPUMCTX2CORE(pCtx));
1649 if (rc == VINF_SUCCESS)
1650 {
1651 /* Update EIP and continue execution. */
1652 Assert(cbInstr == 2);
1653 pCtx->eip += cbInstr;
1654 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1655 goto ResumeExecution;
1656 }
1657 AssertMsgFailed(("EMU: rdtsc failed with %Vrc\n", rc));
1658 rc = VINF_EM_RAW_EMULATE_INSTR;
1659 break;
1660 }
1661
1662 case VMX_EXIT_INVPG: /* 14 Guest software attempted to execute INVPG. */
1663 {
1664 Log2(("VMX: invlpg\n"));
1665 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvpg);
1666 rc = EMInterpretInvlpg(pVM, CPUMCTX2CORE(pCtx), exitQualification);
1667 if (rc == VINF_SUCCESS)
1668 {
1669 /* Update EIP and continue execution. */
1670 pCtx->eip += cbInstr;
1671 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1672 goto ResumeExecution;
1673 }
1674 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: invlpg %VGv failed with %Vrc\n", exitQualification, rc));
1675 break;
1676 }
1677
1678 case VMX_EXIT_CRX_MOVE: /* 28 Control-register accesses. */
1679 {
1680 switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification))
1681 {
1682 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE:
1683 Log2(("VMX: %VGv mov cr%d, x\n", pCtx->eip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));
1684 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxWrite);
1685 rc = EMInterpretCRxWrite(pVM, CPUMCTX2CORE(pCtx),
1686 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification),
1687 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification));
1688
1689 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))
1690 {
1691 case 0:
1692 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1693 break;
1694 case 2:
1695 break;
1696 case 3:
1697 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
1698 break;
1699 case 4:
1700 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
1701 break;
1702 default:
1703 AssertFailed();
1704 }
1705 /* Check if a sync operation is pending. */
1706 if ( rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
1707 && VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
1708 {
1709 rc = PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
1710 AssertRC(rc);
1711 }
1712 break;
1713
1714 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ:
1715 Log2(("VMX: mov x, crx\n"));
1716 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxRead);
1717 rc = EMInterpretCRxRead(pVM, CPUMCTX2CORE(pCtx),
1718 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification),
1719 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification));
1720 break;
1721
1722 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS:
1723 Log2(("VMX: clts\n"));
1724 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCLTS);
1725 rc = EMInterpretCLTS(pVM);
1726 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1727 break;
1728
1729 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW:
1730 Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)));
1731 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitLMSW);
1732 rc = EMInterpretLMSW(pVM, VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
1733 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1734 break;
1735 }
1736
1737 /* Update EIP if no error occurred. */
1738 if (VBOX_SUCCESS(rc))
1739 pCtx->eip += cbInstr;
1740
1741 if (rc == VINF_SUCCESS)
1742 {
1743 /* Only resume if successful. */
1744 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1745 goto ResumeExecution;
1746 }
1747 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1748 break;
1749 }
1750
1751 case VMX_EXIT_DRX_MOVE: /* 29 Debug-register accesses. */
1752 {
1753 /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT after the first time and restore drx registers afterwards */
1754 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
1755 {
1756 Log2(("VMX: mov drx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
1757 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxWrite);
1758 rc = EMInterpretDRxWrite(pVM, CPUMCTX2CORE(pCtx),
1759 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
1760 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification));
1761 Log2(("DR7=%08x\n", pCtx->dr7));
1762 }
1763 else
1764 {
1765 Log2(("VMX: mov x, drx\n"));
1766 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1767 rc = EMInterpretDRxRead(pVM, CPUMCTX2CORE(pCtx),
1768 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification),
1769 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification));
1770 }
1771 /* Update EIP if no error occurred. */
1772 if (VBOX_SUCCESS(rc))
1773 pCtx->eip += cbInstr;
1774
1775 if (rc == VINF_SUCCESS)
1776 {
1777 /* Only resume if successful. */
1778 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1779 goto ResumeExecution;
1780 }
1781 Assert(rc == VERR_EM_INTERPRETER);
1782 break;
1783 }
1784
1785 /** @note We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
1786 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */
1787 {
1788 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(exitQualification);
1789 uint32_t uPort;
1790 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
1791
1792 /** @todo necessary to make the distinction? */
1793 if (VMX_EXIT_QUALIFICATION_IO_ENCODING(exitQualification) == VMX_EXIT_QUALIFICATION_IO_ENCODING_DX)
1794 {
1795 uPort = pCtx->edx & 0xffff;
1796 }
1797 else
1798 uPort = VMX_EXIT_QUALIFICATION_IO_PORT(exitQualification); /* Immediate encoding. */
1799
1800 /* paranoia */
1801 if (RT_UNLIKELY(uIOWidth == 2 || uIOWidth >= 4))
1802 {
1803 rc = fIOWrite ? VINF_IOM_HC_IOPORT_WRITE : VINF_IOM_HC_IOPORT_READ;
1804 break;
1805 }
1806
1807 uint32_t cbSize = aIOSize[uIOWidth];
1808
1809 if (VMX_EXIT_QUALIFICATION_IO_STRING(exitQualification))
1810 {
1811 /* ins/outs */
1812 uint32_t prefix = 0;
1813 if (VMX_EXIT_QUALIFICATION_IO_REP(exitQualification))
1814 prefix |= PREFIX_REP;
1815
1816 if (fIOWrite)
1817 {
1818 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", pCtx->eip, uPort, cbSize));
1819 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringWrite);
1820 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), uPort, prefix, cbSize);
1821 }
1822 else
1823 {
1824 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", pCtx->eip, uPort, cbSize));
1825 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringRead);
1826 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), uPort, prefix, cbSize);
1827 }
1828 }
1829 else
1830 {
1831 /* normal in/out */
1832 uint32_t uAndVal = aIOOpAnd[uIOWidth];
1833
1834 Assert(!VMX_EXIT_QUALIFICATION_IO_REP(exitQualification));
1835
1836 if (fIOWrite)
1837 {
1838 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOWrite);
1839 rc = IOMIOPortWrite(pVM, uPort, pCtx->eax & uAndVal, cbSize);
1840 }
1841 else
1842 {
1843 uint32_t u32Val = 0;
1844
1845 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIORead);
1846 rc = IOMIOPortRead(pVM, uPort, &u32Val, cbSize);
1847 if (IOM_SUCCESS(rc))
1848 {
1849 /* Write back to the EAX register. */
1850 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
1851 }
1852 }
1853 }
1854 /*
1855 * Handled the I/O return codes.
1856 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1857 */
1858 if (IOM_SUCCESS(rc))
1859 {
1860 /* Update EIP and continue execution. */
1861 pCtx->eip += cbInstr;
1862 if (RT_LIKELY(rc == VINF_SUCCESS))
1863 {
1864 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1865 goto ResumeExecution;
1866 }
1867 break;
1868 }
1869
1870#ifdef VBOX_STRICT
1871 if (rc == VINF_IOM_HC_IOPORT_READ)
1872 Assert(!fIOWrite);
1873 else if (rc == VINF_IOM_HC_IOPORT_WRITE)
1874 Assert(fIOWrite);
1875 else
1876 AssertMsg(VBOX_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Vrc\n", rc));
1877#endif
1878 break;
1879 }
1880
1881 default:
1882 /* The rest is handled after syncing the entire CPU state. */
1883 break;
1884 }
1885
1886 /* Note: the guest state isn't entirely synced back at this stage. */
1887
1888 /* Investigate why there was a VM-exit. (part 2) */
1889 switch (exitReason)
1890 {
1891 case VMX_EXIT_EXCEPTION: /* 0 Exception or non-maskable interrupt (NMI). */
1892 case VMX_EXIT_EXTERNAL_IRQ: /* 1 External interrupt. */
1893 /* Already handled above. */
1894 break;
1895
1896 case VMX_EXIT_TRIPLE_FAULT: /* 2 Triple fault. */
1897 rc = VINF_EM_RESET; /* Triple fault equals a reset. */
1898 break;
1899
1900 case VMX_EXIT_INIT_SIGNAL: /* 3 INIT signal. */
1901 case VMX_EXIT_SIPI: /* 4 Start-up IPI (SIPI). */
1902 rc = VINF_EM_RAW_INTERRUPT;
1903 AssertFailed(); /* Can't happen. Yet. */
1904 break;
1905
1906 case VMX_EXIT_IO_SMI_IRQ: /* 5 I/O system-management interrupt (SMI). */
1907 case VMX_EXIT_SMI_IRQ: /* 6 Other SMI. */
1908 rc = VINF_EM_RAW_INTERRUPT;
1909 AssertFailed(); /* Can't happen afaik. */
1910 break;
1911
1912 case VMX_EXIT_TASK_SWITCH: /* 9 Task switch. */
1913 rc = VERR_EM_INTERPRETER;
1914 break;
1915
1916 case VMX_EXIT_HLT: /* 12 Guest software attempted to execute HLT. */
1917 /** Check if external interrupts are pending; if so, don't switch back. */
1918 if (VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
1919 {
1920 pCtx->eip++; /* skip hlt */
1921 goto ResumeExecution;
1922 }
1923
1924 rc = VINF_EM_RAW_EMULATE_INSTR_HLT;
1925 break;
1926
1927 case VMX_EXIT_RSM: /* 17 Guest software attempted to execute RSM in SMM. */
1928 AssertFailed(); /* can't happen. */
1929 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
1930 break;
1931
1932 case VMX_EXIT_VMCALL: /* 18 Guest software executed VMCALL. */
1933 case VMX_EXIT_VMCLEAR: /* 19 Guest software executed VMCLEAR. */
1934 case VMX_EXIT_VMLAUNCH: /* 20 Guest software executed VMLAUNCH. */
1935 case VMX_EXIT_VMPTRLD: /* 21 Guest software executed VMPTRLD. */
1936 case VMX_EXIT_VMPTRST: /* 22 Guest software executed VMPTRST. */
1937 case VMX_EXIT_VMREAD: /* 23 Guest software executed VMREAD. */
1938 case VMX_EXIT_VMRESUME: /* 24 Guest software executed VMRESUME. */
1939 case VMX_EXIT_VMWRITE: /* 25 Guest software executed VMWRITE. */
1940 case VMX_EXIT_VMXOFF: /* 26 Guest software executed VMXOFF. */
1941 case VMX_EXIT_VMXON: /* 27 Guest software executed VMXON. */
1942 /** @todo inject #UD immediately */
1943 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
1944 break;
1945
1946 case VMX_EXIT_CPUID: /* 10 Guest software attempted to execute CPUID. */
1947 case VMX_EXIT_RDTSC: /* 16 Guest software attempted to execute RDTSC. */
1948 case VMX_EXIT_INVPG: /* 14 Guest software attempted to execute INVPG. */
1949 case VMX_EXIT_CRX_MOVE: /* 28 Control-register accesses. */
1950 case VMX_EXIT_DRX_MOVE: /* 29 Debug-register accesses. */
1951 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */
1952 /* already handled above */
1953 AssertMsg( rc == VINF_PGM_CHANGE_MODE
1954 || rc == VINF_EM_RAW_INTERRUPT
1955 || rc == VERR_EM_INTERPRETER
1956 || rc == VINF_EM_RAW_EMULATE_INSTR
1957 || rc == VINF_PGM_SYNC_CR3
1958 || rc == VINF_IOM_HC_IOPORT_READ
1959 || rc == VINF_IOM_HC_IOPORT_WRITE
1960 || rc == VINF_EM_RAW_GUEST_TRAP
1961 || rc == VINF_TRPM_XCPT_DISPATCHED
1962 || rc == VINF_EM_RESCHEDULE_REM,
1963 ("rc = %d\n", rc));
1964 break;
1965
1966 case VMX_EXIT_RDPMC: /* 15 Guest software attempted to execute RDPMC. */
1967 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */
1968 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */
1969 case VMX_EXIT_MWAIT: /* 36 Guest software executed MWAIT. */
1970 case VMX_EXIT_MONITOR: /* 39 Guest software attempted to execute MONITOR. */
1971 case VMX_EXIT_PAUSE: /* 40 Guest software attempted to execute PAUSE. */
1972 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
1973 break;
1974
1975 case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */
1976 Assert(rc == VINF_EM_RAW_INTERRUPT);
1977 break;
1978
1979 case VMX_EXIT_TPR: /* 43 TPR below threshold. Guest software executed MOV to CR8. */
1980 case VMX_EXIT_ERR_INVALID_GUEST_STATE: /* 33 VM-entry failure due to invalid guest state. */
1981 case VMX_EXIT_ERR_MSR_LOAD: /* 34 VM-entry failure due to MSR loading. */
1982 case VMX_EXIT_ERR_MACHINE_CHECK: /* 41 VM-entry failure due to machine-check. */
1983 default:
1984 rc = VERR_EM_INTERNAL_ERROR;
1985 AssertMsgFailed(("Unexpected exit code %d\n", exitReason)); /* Can't happen. */
1986 break;
1987
1988 }
1989end:
1990 if (fGuestStateSynced)
1991 {
1992 /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR. */
1993 VMX_READ_SELREG(LDTR, ldtr);
1994 VMX_READ_SELREG(TR, tr);
1995
1996 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_LIMIT, &val);
1997 pCtx->gdtr.cbGdt = val;
1998 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_BASE, &val);
1999 pCtx->gdtr.pGdt = val;
2000
2001 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_LIMIT, &val);
2002 pCtx->idtr.cbIdt = val;
2003 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_BASE, &val);
2004 pCtx->idtr.pIdt = val;
2005
2006 /*
2007 * System MSRs
2008 */
2009 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_CS, &val);
2010 pCtx->SysEnter.cs = val;
2011 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_EIP, &val);
2012 pCtx->SysEnter.eip = val;
2013 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_ESP, &val);
2014 pCtx->SysEnter.esp = val;
2015 }
2016
2017 /* Signal changes for the recompiler. */
2018 CPUMSetChangedFlags(pVM, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);
2019
2020 /* If we executed vmlaunch/vmresume and an external irq was pending, then we don't have to do a full sync the next time. */
2021 if ( exitReason == VMX_EXIT_EXTERNAL_IRQ
2022 && !VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
2023 {
2024 STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq);
2025 /* On the next entry we'll only sync the host context. */
2026 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
2027 }
2028 else
2029 {
2030 /* On the next entry we'll sync everything. */
2031 /** @todo we can do better than this */
2032 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
2033 }
2034
2035 /* translate into a less severe return code */
2036 if (rc == VERR_EM_INTERPRETER)
2037 rc = VINF_EM_RAW_EMULATE_INSTR;
2038
2039 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
2040 Log2(("X"));
2041 return rc;
2042}
2043
2044
2045/**
2046 * Enters the VT-x session
2047 *
2048 * @returns VBox status code.
2049 * @param pVM The VM to operate on.
2050 */
2051HWACCMR0DECL(int) VMXR0Enter(PVM pVM)
2052{
2053 Assert(pVM->hwaccm.s.vmx.fSupported);
2054
2055 unsigned cr4 = ASMGetCR4();
2056 if (!(cr4 & X86_CR4_VMXE))
2057 {
2058 AssertMsgFailed(("X86_CR4_VMXE should be set!\n"));
2059 return VERR_VMX_X86_CR4_VMXE_CLEARED;
2060 }
2061
2062 /* Activate the VM Control Structure. */
2063 int rc = VMXActivateVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
2064 if (VBOX_FAILURE(rc))
2065 return rc;
2066
2067 pVM->hwaccm.s.vmx.fResumeVM = false;
2068 return VINF_SUCCESS;
2069}
2070
2071
2072/**
2073 * Leaves the VT-x session
2074 *
2075 * @returns VBox status code.
2076 * @param pVM The VM to operate on.
2077 */
2078HWACCMR0DECL(int) VMXR0Leave(PVM pVM)
2079{
2080 Assert(pVM->hwaccm.s.vmx.fSupported);
2081
2082 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
2083 int rc = VMXClearVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
2084 AssertRC(rc);
2085
2086 return VINF_SUCCESS;
2087}
2088
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette