VirtualBox

source: vbox/trunk/src/VBox/VMM/HWACCMInternal.h@ 30105

Last change on this file since 30105 was 30105, checked in by vboxsync, 14 years ago

More stats

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 33.7 KB
Line 
1/* $Id: HWACCMInternal.h 30105 2010-06-09 11:03:37Z vboxsync $ */
2/** @file
3 * HWACCM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HWACCMInternal_h
19#define ___HWACCMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/em.h>
24#include <VBox/stam.h>
25#include <VBox/dis.h>
26#include <VBox/hwaccm.h>
27#include <VBox/pgm.h>
28#include <VBox/cpum.h>
29#include <iprt/memobj.h>
30#include <iprt/cpuset.h>
31#include <iprt/mp.h>
32#include <iprt/avl.h>
33
34#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
35/* Enable 64 bits guest support. */
36# define VBOX_ENABLE_64_BITS_GUESTS
37#endif
38
39#define VMX_USE_CACHED_VMCS_ACCESSES
40#define HWACCM_VMX_EMULATE_REALMODE
41#define HWACCM_VTX_WITH_EPT
42#define HWACCM_VTX_WITH_VPID
43
44
45#if 0
46/* Seeing somewhat random behaviour on my Nehalem system with auto-save of guest MSRs;
47 * for some strange reason the CPU doesn't save the MSRs during the VM-exit.
48 * Clearly visible with a dual VCPU configured OpenSolaris 200906 live cd VM.
49 *
50 * Note: change the assembly files when enabling this! (remove the manual auto load/save)
51 */
52#define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
53#endif
54
55RT_C_DECLS_BEGIN
56
57
58/** @defgroup grp_hwaccm_int Internal
59 * @ingroup grp_hwaccm
60 * @internal
61 * @{
62 */
63
64
65/** Maximum number of exit reason statistics counters. */
66#define MAX_EXITREASON_STAT 0x100
67#define MASK_EXITREASON_STAT 0xff
68#define MASK_INJECT_IRQ_STAT 0xff
69
70/** @name Changed flags
71 * These flags are used to keep track of which important registers that
72 * have been changed since last they were reset.
73 * @{
74 */
75#define HWACCM_CHANGED_GUEST_FPU RT_BIT(0)
76#define HWACCM_CHANGED_GUEST_CR0 RT_BIT(1)
77#define HWACCM_CHANGED_GUEST_CR3 RT_BIT(2)
78#define HWACCM_CHANGED_GUEST_CR4 RT_BIT(3)
79#define HWACCM_CHANGED_GUEST_GDTR RT_BIT(4)
80#define HWACCM_CHANGED_GUEST_IDTR RT_BIT(5)
81#define HWACCM_CHANGED_GUEST_LDTR RT_BIT(6)
82#define HWACCM_CHANGED_GUEST_TR RT_BIT(7)
83#define HWACCM_CHANGED_GUEST_SYSENTER_MSR RT_BIT(8)
84#define HWACCM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)
85#define HWACCM_CHANGED_GUEST_DEBUG RT_BIT(10)
86#define HWACCM_CHANGED_HOST_CONTEXT RT_BIT(11)
87
88#define HWACCM_CHANGED_ALL ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
89 | HWACCM_CHANGED_GUEST_CR0 \
90 | HWACCM_CHANGED_GUEST_CR3 \
91 | HWACCM_CHANGED_GUEST_CR4 \
92 | HWACCM_CHANGED_GUEST_GDTR \
93 | HWACCM_CHANGED_GUEST_IDTR \
94 | HWACCM_CHANGED_GUEST_LDTR \
95 | HWACCM_CHANGED_GUEST_TR \
96 | HWACCM_CHANGED_GUEST_SYSENTER_MSR \
97 | HWACCM_CHANGED_GUEST_FPU \
98 | HWACCM_CHANGED_GUEST_DEBUG \
99 | HWACCM_CHANGED_HOST_CONTEXT)
100
101#define HWACCM_CHANGED_ALL_GUEST ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
102 | HWACCM_CHANGED_GUEST_CR0 \
103 | HWACCM_CHANGED_GUEST_CR3 \
104 | HWACCM_CHANGED_GUEST_CR4 \
105 | HWACCM_CHANGED_GUEST_GDTR \
106 | HWACCM_CHANGED_GUEST_IDTR \
107 | HWACCM_CHANGED_GUEST_LDTR \
108 | HWACCM_CHANGED_GUEST_TR \
109 | HWACCM_CHANGED_GUEST_SYSENTER_MSR \
110 | HWACCM_CHANGED_GUEST_DEBUG \
111 | HWACCM_CHANGED_GUEST_FPU)
112
113/** @} */
114
115/** @name Intercepted traps
116 * Traps that need to be intercepted so we can correctly dispatch them to the guest if required.
117 * Currently #NM and #PF only
118 */
119#ifdef VBOX_STRICT
120#define HWACCM_VMX_TRAP_MASK RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_PF) | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_MF)
121#define HWACCM_SVM_TRAP_MASK HWACCM_VMX_TRAP_MASK
122#else
123#define HWACCM_VMX_TRAP_MASK RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_PF)
124#define HWACCM_SVM_TRAP_MASK RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_PF)
125#endif
126/* All exceptions have to be intercept in emulated real-mode (minues NM & PF as they are always intercepted. */
127#define HWACCM_VMX_TRAP_MASK_REALMODE RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_DF) | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) | RT_BIT(X86_XCPT_XF)
128/** @} */
129
130
131/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
132#define HWACCM_MAX_TLB_SHOOTDOWN_PAGES 8
133
134/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
135#define HWACCM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
136/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
137#define HWACCM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1)
138/** Total guest mapped memory needed. */
139#define HWACCM_VTX_TOTAL_DEVHEAP_MEM (HWACCM_EPT_IDENTITY_PG_TABLE_SIZE + HWACCM_VTX_TSS_SIZE)
140
141/* Enable for TPR guest patching. */
142#define VBOX_HWACCM_WITH_GUEST_PATCHING
143
144/** HWACCM SSM version
145 */
146#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
147#define HWACCM_SSM_VERSION 5
148#define HWACCM_SSM_VERSION_NO_PATCHING 4
149#else
150#define HWACCM_SSM_VERSION 4
151#define HWACCM_SSM_VERSION_NO_PATCHING 4
152#endif
153#define HWACCM_SSM_VERSION_2_0_X 3
154
155/* Per-cpu information. (host) */
156typedef struct
157{
158 RTCPUID idCpu;
159
160 RTR0MEMOBJ pMemObj;
161 /* Current ASID (AMD-V)/VPID (Intel) */
162 uint32_t uCurrentASID;
163 /* TLB flush count */
164 uint32_t cTLBFlushes;
165
166 /* Set the first time a cpu is used to make sure we start with a clean TLB. */
167 bool fFlushTLB;
168
169 /** Configured for VT-x or AMD-V. */
170 bool fConfigured;
171
172 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
173 bool fIgnoreAMDVInUseError;
174
175 /** In use by our code. (for power suspend) */
176 volatile bool fInUse;
177} HWACCM_CPUINFO;
178typedef HWACCM_CPUINFO *PHWACCM_CPUINFO;
179
180typedef enum
181{
182 HWACCMPENDINGIO_INVALID = 0,
183 HWACCMPENDINGIO_PORT_READ,
184 HWACCMPENDINGIO_PORT_WRITE,
185 HWACCMPENDINGIO_STRING_READ,
186 HWACCMPENDINGIO_STRING_WRITE,
187 /** The usual 32-bit paranoia. */
188 HWACCMPENDINGIO_32BIT_HACK = 0x7fffffff
189} HWACCMPENDINGIO;
190
191
192typedef enum
193{
194 HWACCMTPRINSTR_INVALID,
195 HWACCMTPRINSTR_READ,
196 HWACCMTPRINSTR_READ_SHR4,
197 HWACCMTPRINSTR_WRITE_REG,
198 HWACCMTPRINSTR_WRITE_IMM,
199 HWACCMTPRINSTR_JUMP_REPLACEMENT,
200 /** The usual 32-bit paranoia. */
201 HWACCMTPRINSTR_32BIT_HACK = 0x7fffffff
202} HWACCMTPRINSTR;
203
204typedef struct
205{
206 /** The key is the address of patched instruction. (32 bits GC ptr) */
207 AVLOU32NODECORE Core;
208 /** Original opcode. */
209 uint8_t aOpcode[16];
210 /** Instruction size. */
211 uint32_t cbOp;
212 /** Replacement opcode. */
213 uint8_t aNewOpcode[16];
214 /** Replacement instruction size. */
215 uint32_t cbNewOp;
216 /** Instruction type. */
217 HWACCMTPRINSTR enmType;
218 /** Source operand. */
219 uint32_t uSrcOperand;
220 /** Destination operand. */
221 uint32_t uDstOperand;
222 /** Number of times the instruction caused a fault. */
223 uint32_t cFaults;
224 /** Patch address of the jump replacement. */
225 RTGCPTR32 pJumpTarget;
226} HWACCMTPRPATCH;
227/** Pointer to HWACCMTPRPATCH. */
228typedef HWACCMTPRPATCH *PHWACCMTPRPATCH;
229
230/**
231 * Switcher function, HC to RC.
232 *
233 * @param pVM The VM handle.
234 * @returns Return code indicating the action to take.
235 */
236typedef DECLASMTYPE(int) FNHWACCMSWITCHERHC(PVM pVM);
237/** Pointer to switcher function. */
238typedef FNHWACCMSWITCHERHC *PFNHWACCMSWITCHERHC;
239
240/**
241 * HWACCM VM Instance data.
242 * Changes to this must checked against the padding of the cfgm union in VM!
243 */
244typedef struct HWACCM
245{
246 /** Set when we've initialized VMX or SVM. */
247 bool fInitialized;
248
249 /** Set when hardware acceleration is allowed. */
250 bool fAllowed;
251
252 /** Set if nested paging is enabled. */
253 bool fNestedPaging;
254
255 /** Set if nested paging is allowed. */
256 bool fAllowNestedPaging;
257
258 /** Set if large pages are enabled (requires nested paging). */
259 bool fLargePages;
260
261 /** Set if we can support 64-bit guests or not. */
262 bool fAllow64BitGuests;
263
264 /** Set if an IO-APIC is configured for this VM. */
265 bool fHasIoApic;
266
267 /** Set when TPR patching is allowed. */
268 bool fTRPPatchingAllowed;
269
270 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
271 bool fGlobalInit;
272
273 /** Set when TPR patching is active. */
274 bool fTPRPatchingActive;
275 bool u8Alignment[6];
276
277 /** And mask for copying register contents. */
278 uint64_t u64RegisterMask;
279
280 /** Maximum ASID allowed. */
281 RTUINT uMaxASID;
282
283 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
284 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
285 uint32_t cMaxResumeLoops;
286
287 /** Guest allocated memory for patching purposes. */
288 RTGCPTR pGuestPatchMem;
289 /** Current free pointer inside the patch block. */
290 RTGCPTR pFreeGuestPatchMem;
291 /** Size of the guest patch memory block. */
292 uint32_t cbGuestPatchMem;
293 uint32_t uPadding1;
294
295#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
296 /** 32 to 64 bits switcher entrypoint. */
297 R0PTRTYPE(PFNHWACCMSWITCHERHC) pfnHost32ToGuest64R0;
298
299 /* AMD-V 64 bits vmrun handler */
300 RTRCPTR pfnSVMGCVMRun64;
301
302 /* VT-x 64 bits vmlaunch handler */
303 RTRCPTR pfnVMXGCStartVM64;
304
305 /* RC handler to setup the 64 bits FPU state. */
306 RTRCPTR pfnSaveGuestFPU64;
307
308 /* RC handler to setup the 64 bits debug state. */
309 RTRCPTR pfnSaveGuestDebug64;
310
311 /* Test handler */
312 RTRCPTR pfnTest64;
313
314 RTRCPTR uAlignment[2];
315/*#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
316 uint32_t u32Alignment[1]; */
317#endif
318
319 struct
320 {
321 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
322 bool fSupported;
323
324 /** Set when we've enabled VMX. */
325 bool fEnabled;
326
327 /** Set if VPID is supported. */
328 bool fVPID;
329
330 /** Set if VT-x VPID is allowed. */
331 bool fAllowVPID;
332
333 /** Set if unrestricted guest execution is allowed (real and protected mode without paging). */
334 bool fUnrestrictedGuest;
335 bool uAlignment[3];
336
337 /** Virtual address of the TSS page used for real mode emulation. */
338 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
339
340 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
341 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
342
343 /** R0 memory object for the APIC physical page (serves for filtering accesses). */
344 RTR0MEMOBJ pMemObjAPIC;
345 /** Physical address of the APIC physical page (serves for filtering accesses). */
346 RTHCPHYS pAPICPhys;
347 /** Virtual address of the APIC physical page (serves for filtering accesses). */
348 R0PTRTYPE(uint8_t *) pAPIC;
349
350 /** R0 memory object for the MSR entry load page (guest MSRs). */
351 RTR0MEMOBJ pMemObjMSREntryLoad;
352 /** Physical address of the MSR entry load page (guest MSRs). */
353 RTHCPHYS pMSREntryLoadPhys;
354 /** Virtual address of the MSR entry load page (guest MSRs). */
355 R0PTRTYPE(uint8_t *) pMSREntryLoad;
356
357#ifdef VBOX_WITH_CRASHDUMP_MAGIC
358 RTR0MEMOBJ pMemObjScratch;
359 RTHCPHYS pScratchPhys;
360 R0PTRTYPE(uint8_t *) pScratch;
361#endif
362 /** R0 memory object for the MSR exit store page (guest MSRs). */
363 RTR0MEMOBJ pMemObjMSRExitStore;
364 /** Physical address of the MSR exit store page (guest MSRs). */
365 RTHCPHYS pMSRExitStorePhys;
366 /** Virtual address of the MSR exit store page (guest MSRs). */
367 R0PTRTYPE(uint8_t *) pMSRExitStore;
368
369 /** R0 memory object for the MSR exit load page (host MSRs). */
370 RTR0MEMOBJ pMemObjMSRExitLoad;
371 /** Physical address of the MSR exit load page (host MSRs). */
372 RTHCPHYS pMSRExitLoadPhys;
373 /** Virtual address of the MSR exit load page (host MSRs). */
374 R0PTRTYPE(uint8_t *) pMSRExitLoad;
375
376 /** Ring 0 handlers for VT-x. */
377 DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM, PVMCPU pVCpu));
378
379#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
380 uint32_t u32Alignment;
381#endif
382 /** Host CR4 value (set by ring-0 VMX init) */
383 uint64_t hostCR4;
384
385 /** Host EFER value (set by ring-0 VMX init) */
386 uint64_t hostEFER;
387
388 /** VMX MSR values */
389 struct
390 {
391 uint64_t feature_ctrl;
392 uint64_t vmx_basic_info;
393 VMX_CAPABILITY vmx_pin_ctls;
394 VMX_CAPABILITY vmx_proc_ctls;
395 VMX_CAPABILITY vmx_proc_ctls2;
396 VMX_CAPABILITY vmx_exit;
397 VMX_CAPABILITY vmx_entry;
398 uint64_t vmx_misc;
399 uint64_t vmx_cr0_fixed0;
400 uint64_t vmx_cr0_fixed1;
401 uint64_t vmx_cr4_fixed0;
402 uint64_t vmx_cr4_fixed1;
403 uint64_t vmx_vmcs_enum;
404 uint64_t vmx_eptcaps;
405 } msr;
406
407 /** Flush types for invept & invvpid; they depend on capabilities. */
408 VMX_FLUSH enmFlushPage;
409 VMX_FLUSH enmFlushContext;
410 } vmx;
411
412 struct
413 {
414 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
415 bool fSupported;
416 /** Set when we've enabled SVM. */
417 bool fEnabled;
418 /** Set if erratum 170 affects the AMD cpu. */
419 bool fAlwaysFlushTLB;
420 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
421 bool fIgnoreInUseError;
422
423 /** R0 memory object for the IO bitmap (12kb). */
424 RTR0MEMOBJ pMemObjIOBitmap;
425 /** Physical address of the IO bitmap (12kb). */
426 RTHCPHYS pIOBitmapPhys;
427 /** Virtual address of the IO bitmap. */
428 R0PTRTYPE(void *) pIOBitmap;
429
430 /* HWCR msr (for diagnostics) */
431 uint64_t msrHWCR;
432
433 /** SVM revision. */
434 uint32_t u32Rev;
435
436 /** SVM feature bits from cpuid 0x8000000a */
437 uint32_t u32Features;
438 } svm;
439
440 /**
441 * AVL tree with all patches (active or disabled) sorted by guest instruction address
442 */
443 AVLOU32TREE PatchTree;
444 uint32_t cPatches;
445 HWACCMTPRPATCH aPatches[64];
446
447 struct
448 {
449 uint32_t u32AMDFeatureECX;
450 uint32_t u32AMDFeatureEDX;
451 } cpuid;
452
453 /** Saved error from detection */
454 int32_t lLastError;
455
456 /** HWACCMR0Init was run */
457 bool fHWACCMR0Init;
458 bool u8Alignment1[7];
459
460 STAMCOUNTER StatTPRPatchSuccess;
461 STAMCOUNTER StatTPRPatchFailure;
462 STAMCOUNTER StatTPRReplaceSuccess;
463 STAMCOUNTER StatTPRReplaceFailure;
464} HWACCM;
465/** Pointer to HWACCM VM instance data. */
466typedef HWACCM *PHWACCM;
467
468/* Maximum number of cached entries. */
469#define VMCSCACHE_MAX_ENTRY 128
470
471/* Structure for storing read and write VMCS actions. */
472typedef struct VMCSCACHE
473{
474#ifdef VBOX_WITH_CRASHDUMP_MAGIC
475 /* Magic marker for searching in crash dumps. */
476 uint8_t aMagic[16];
477 uint64_t uMagic;
478 uint64_t u64TimeEntry;
479 uint64_t u64TimeSwitch;
480 uint64_t cResume;
481 uint64_t interPD;
482 uint64_t pSwitcher;
483 uint32_t uPos;
484 uint32_t idCpu;
485#endif
486 /* CR2 is saved here for EPT syncing. */
487 uint64_t cr2;
488 struct
489 {
490 uint32_t cValidEntries;
491 uint32_t uAlignment;
492 uint32_t aField[VMCSCACHE_MAX_ENTRY];
493 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
494 } Write;
495 struct
496 {
497 uint32_t cValidEntries;
498 uint32_t uAlignment;
499 uint32_t aField[VMCSCACHE_MAX_ENTRY];
500 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
501 } Read;
502#ifdef DEBUG
503 struct
504 {
505 RTHCPHYS pPageCpuPhys;
506 RTHCPHYS pVMCSPhys;
507 RTGCPTR pCache;
508 RTGCPTR pCtx;
509 } TestIn;
510 struct
511 {
512 RTHCPHYS pVMCSPhys;
513 RTGCPTR pCache;
514 RTGCPTR pCtx;
515 uint64_t eflags;
516 uint64_t cr8;
517 } TestOut;
518 struct
519 {
520 uint64_t param1;
521 uint64_t param2;
522 uint64_t param3;
523 uint64_t param4;
524 } ScratchPad;
525#endif
526} VMCSCACHE;
527/** Pointer to VMCSCACHE. */
528typedef VMCSCACHE *PVMCSCACHE;
529
530/** VMX StartVM function. */
531typedef DECLCALLBACK(int) FNHWACCMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
532/** Pointer to a VMX StartVM function. */
533typedef R0PTRTYPE(FNHWACCMVMXSTARTVM *) PFNHWACCMVMXSTARTVM;
534
535/** SVM VMRun function. */
536typedef DECLCALLBACK(int) FNHWACCMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
537/** Pointer to a SVM VMRun function. */
538typedef R0PTRTYPE(FNHWACCMSVMVMRUN *) PFNHWACCMSVMVMRUN;
539
540/**
541 * HWACCM VMCPU Instance data.
542 */
543typedef struct HWACCMCPU
544{
545 /** Old style FPU reporting trap mask override performed (optimization) */
546 bool fFPUOldStyleOverride;
547
548 /** Set if we don't have to flush the TLB on VM entry. */
549 bool fResumeVM;
550
551 /** Set if we need to flush the TLB during the world switch. */
552 bool fForceTLBFlush;
553
554 /** Set when we're using VT-x or AMD-V at that moment. */
555 bool fActive;
556
557 /** Set when the TLB has been checked until we return from the world switch. */
558 volatile uint8_t fCheckedTLBFlush;
559 uint8_t bAlignment[3];
560
561 /** HWACCM_CHANGED_* flags. */
562 RTUINT fContextUseFlags;
563
564 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
565 RTCPUID idLastCpu;
566
567 /** TLB flush count */
568 RTUINT cTLBFlushes;
569
570 /** Current ASID in use by the VM */
571 RTUINT uCurrentASID;
572
573 /** World switch exit counter. */
574 volatile uint32_t cWorldSwitchExit;
575 uint32_t u32Alignment;
576
577 struct
578 {
579 /** Physical address of the VM control structure (VMCS). */
580 RTHCPHYS pVMCSPhys;
581 /** R0 memory object for the VM control structure (VMCS). */
582 RTR0MEMOBJ pMemObjVMCS;
583 /** Virtual address of the VM control structure (VMCS). */
584 R0PTRTYPE(void *) pVMCS;
585
586 /** Ring 0 handlers for VT-x. */
587 PFNHWACCMVMXSTARTVM pfnStartVM;
588
589#if HC_ARCH_BITS == 32
590 uint32_t u32Alignment;
591#endif
592
593 /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
594 uint64_t proc_ctls;
595
596 /** Current VMX_VMCS_CTRL_PROC_EXEC2_CONTROLS. */
597 uint64_t proc_ctls2;
598
599 /** Physical address of the virtual APIC page for TPR caching. */
600 RTHCPHYS pVAPICPhys;
601 /** R0 memory object for the virtual APIC page for TPR caching. */
602 RTR0MEMOBJ pMemObjVAPIC;
603 /** Virtual address of the virtual APIC page for TPR caching. */
604 R0PTRTYPE(uint8_t *) pVAPIC;
605
606 /** Current CR0 mask. */
607 uint64_t cr0_mask;
608 /** Current CR4 mask. */
609 uint64_t cr4_mask;
610
611 /** Current EPTP. */
612 RTHCPHYS GCPhysEPTP;
613
614 /** Physical address of the MSR bitmap (1 page). */
615 RTHCPHYS pMSRBitmapPhys;
616 /** R0 memory object for the MSR bitmap (1 page). */
617 RTR0MEMOBJ pMemObjMSRBitmap;
618 /** Virtual address of the MSR bitmap (1 page). */
619 R0PTRTYPE(uint8_t *) pMSRBitmap;
620
621#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
622 /** Physical address of the guest MSR load area (1 page). */
623 RTHCPHYS pGuestMSRPhys;
624 /** R0 memory object for the guest MSR load area (1 page). */
625 RTR0MEMOBJ pMemObjGuestMSR;
626 /** Virtual address of the guest MSR load area (1 page). */
627 R0PTRTYPE(uint8_t *) pGuestMSR;
628
629 /** Physical address of the MSR load area (1 page). */
630 RTHCPHYS pHostMSRPhys;
631 /** R0 memory object for the MSR load area (1 page). */
632 RTR0MEMOBJ pMemObjHostMSR;
633 /** Virtual address of the MSR load area (1 page). */
634 R0PTRTYPE(uint8_t *) pHostMSR;
635#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
636
637 /* Number of automatically loaded/restored MSRs. */
638 uint32_t cCachedMSRs;
639 uint32_t uAlignement;
640
641 /* Last use TSC offset value. (cached) */
642 uint64_t u64TSCOffset;
643
644 /** VMCS cache. */
645 VMCSCACHE VMCSCache;
646
647 /** Real-mode emulation state. */
648 struct
649 {
650 X86EFLAGS eflags;
651 uint32_t fValid;
652 } RealMode;
653
654 struct
655 {
656 uint64_t u64VMCSPhys;
657 uint32_t ulVMCSRevision;
658 uint32_t ulInstrError;
659 uint32_t ulExitReason;
660 RTCPUID idEnteredCpu;
661 RTCPUID idCurrentCpu;
662 uint32_t padding;
663 } lasterror;
664
665 /** The last seen guest paging mode (by VT-x). */
666 PGMMODE enmLastSeenGuestMode;
667 /** Current guest paging mode (as seen by HWACCMR3PagingModeChanged). */
668 PGMMODE enmCurrGuestMode;
669 /** Previous guest paging mode (as seen by HWACCMR3PagingModeChanged). */
670 PGMMODE enmPrevGuestMode;
671 } vmx;
672
673 struct
674 {
675 /** R0 memory object for the host VM control block (VMCB). */
676 RTR0MEMOBJ pMemObjVMCBHost;
677 /** Physical address of the host VM control block (VMCB). */
678 RTHCPHYS pVMCBHostPhys;
679 /** Virtual address of the host VM control block (VMCB). */
680 R0PTRTYPE(void *) pVMCBHost;
681
682 /** R0 memory object for the VM control block (VMCB). */
683 RTR0MEMOBJ pMemObjVMCB;
684 /** Physical address of the VM control block (VMCB). */
685 RTHCPHYS pVMCBPhys;
686 /** Virtual address of the VM control block (VMCB). */
687 R0PTRTYPE(void *) pVMCB;
688
689 /** Ring 0 handlers for VT-x. */
690 PFNHWACCMSVMVMRUN pfnVMRun;
691
692 /** R0 memory object for the MSR bitmap (8kb). */
693 RTR0MEMOBJ pMemObjMSRBitmap;
694 /** Physical address of the MSR bitmap (8kb). */
695 RTHCPHYS pMSRBitmapPhys;
696 /** Virtual address of the MSR bitmap. */
697 R0PTRTYPE(void *) pMSRBitmap;
698 } svm;
699
700 /** Event injection state. */
701 struct
702 {
703 uint32_t fPending;
704 uint32_t errCode;
705 uint64_t intInfo;
706 } Event;
707
708 /** IO Block emulation state. */
709 struct
710 {
711 bool fEnabled;
712 uint8_t u8Align[7];
713
714 /** RIP at the start of the io code we wish to emulate in the recompiler. */
715 RTGCPTR GCPtrFunctionEip;
716
717 uint64_t cr0;
718 } EmulateIoBlock;
719
720 struct
721 {
722 /* Pending IO operation type. */
723 HWACCMPENDINGIO enmType;
724 uint32_t uPadding;
725 RTGCPTR GCPtrRip;
726 RTGCPTR GCPtrRipNext;
727 union
728 {
729 struct
730 {
731 unsigned uPort;
732 unsigned uAndVal;
733 unsigned cbSize;
734 } Port;
735 uint64_t aRaw[2];
736 } s;
737 } PendingIO;
738
739 /** Currenty shadow paging mode. */
740 PGMMODE enmShadowMode;
741
742 /** The CPU ID of the CPU currently owning the VMCS. Set in
743 * HWACCMR0Enter and cleared in HWACCMR0Leave. */
744 RTCPUID idEnteredCpu;
745
746 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
747 struct
748 {
749 RTGCPTR aPages[HWACCM_MAX_TLB_SHOOTDOWN_PAGES];
750 unsigned cPages;
751 } TlbShootdown;
752
753 /** For saving stack space, the disassembler state is allocated here instead of
754 * on the stack.
755 * @note The DISCPUSTATE structure is not R3/R0/RZ clean! */
756 union
757 {
758 /** The disassembler scratch space. */
759 DISCPUSTATE DisState;
760 /** Padding. */
761 uint8_t abDisStatePadding[DISCPUSTATE_PADDING_SIZE];
762 };
763
764 RTUINT padding2[1];
765
766 STAMPROFILEADV StatEntry;
767 STAMPROFILEADV StatExit1;
768 STAMPROFILEADV StatExit2;
769#if 1 /* temporary for tracking down darwin issues. */
770 STAMPROFILEADV StatExit2Sub1;
771 STAMPROFILEADV StatExit2Sub2;
772 STAMPROFILEADV StatExit2Sub3;
773#endif
774 STAMPROFILEADV StatInGC;
775
776#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
777 STAMPROFILEADV StatWorldSwitch3264;
778#endif
779 STAMPROFILEADV StatPoke;
780 STAMPROFILEADV StatSpinPoke;
781 STAMPROFILEADV StatSpinPokeFailed;
782
783 STAMCOUNTER StatIntInject;
784
785 STAMCOUNTER StatExitShadowNM;
786 STAMCOUNTER StatExitGuestNM;
787 STAMCOUNTER StatExitShadowPF;
788 STAMCOUNTER StatExitGuestPF;
789 STAMCOUNTER StatExitGuestUD;
790 STAMCOUNTER StatExitGuestSS;
791 STAMCOUNTER StatExitGuestNP;
792 STAMCOUNTER StatExitGuestGP;
793 STAMCOUNTER StatExitGuestDE;
794 STAMCOUNTER StatExitGuestDB;
795 STAMCOUNTER StatExitGuestMF;
796 STAMCOUNTER StatExitInvpg;
797 STAMCOUNTER StatExitInvd;
798 STAMCOUNTER StatExitCpuid;
799 STAMCOUNTER StatExitRdtsc;
800 STAMCOUNTER StatExitRdpmc;
801 STAMCOUNTER StatExitCli;
802 STAMCOUNTER StatExitSti;
803 STAMCOUNTER StatExitPushf;
804 STAMCOUNTER StatExitPopf;
805 STAMCOUNTER StatExitIret;
806 STAMCOUNTER StatExitInt;
807 STAMCOUNTER StatExitCRxWrite[16];
808 STAMCOUNTER StatExitCRxRead[16];
809 STAMCOUNTER StatExitDRxWrite;
810 STAMCOUNTER StatExitDRxRead;
811 STAMCOUNTER StatExitRdmsr;
812 STAMCOUNTER StatExitWrmsr;
813 STAMCOUNTER StatExitCLTS;
814 STAMCOUNTER StatExitHlt;
815 STAMCOUNTER StatExitMwait;
816 STAMCOUNTER StatExitMonitor;
817 STAMCOUNTER StatExitLMSW;
818 STAMCOUNTER StatExitIOWrite;
819 STAMCOUNTER StatExitIORead;
820 STAMCOUNTER StatExitIOStringWrite;
821 STAMCOUNTER StatExitIOStringRead;
822 STAMCOUNTER StatExitIrqWindow;
823 STAMCOUNTER StatExitMaxResume;
824 STAMCOUNTER StatExitPreemptPending;
825 STAMCOUNTER StatIntReinject;
826 STAMCOUNTER StatPendingHostIrq;
827
828 STAMCOUNTER StatFlushPage;
829 STAMCOUNTER StatFlushPageManual;
830 STAMCOUNTER StatFlushPhysPageManual;
831 STAMCOUNTER StatFlushTLB;
832 STAMCOUNTER StatFlushTLBManual;
833 STAMCOUNTER StatFlushPageInvlpg;
834 STAMCOUNTER StatFlushTLBWorldSwitch;
835 STAMCOUNTER StatNoFlushTLBWorldSwitch;
836 STAMCOUNTER StatFlushTLBCRxChange;
837 STAMCOUNTER StatFlushASID;
838 STAMCOUNTER StatFlushTLBInvlpga;
839 STAMCOUNTER StatTlbShootdown;
840 STAMCOUNTER StatTlbShootdownFlush;
841
842 STAMCOUNTER StatSwitchGuestIrq;
843 STAMCOUNTER StatSwitchToR3;
844
845 STAMCOUNTER StatTSCOffset;
846 STAMCOUNTER StatTSCIntercept;
847 STAMCOUNTER StatTSCInterceptOverFlow;
848
849 STAMCOUNTER StatExitReasonNPF;
850 STAMCOUNTER StatDRxArmed;
851 STAMCOUNTER StatDRxContextSwitch;
852 STAMCOUNTER StatDRxIOCheck;
853
854#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
855 STAMCOUNTER StatFpu64SwitchBack;
856 STAMCOUNTER StatDebug64SwitchBack;
857#endif
858
859#ifdef VBOX_WITH_STATISTICS
860 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
861 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
862 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
863 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
864#endif
865} HWACCMCPU;
866/** Pointer to HWACCM VM instance data. */
867typedef HWACCMCPU *PHWACCMCPU;
868
869
870#ifdef IN_RING0
871
872VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu();
873VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu);
874
875
876#ifdef VBOX_STRICT
877VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
878VMMR0DECL(void) HWACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
879#else
880# define HWACCMDumpRegs(a, b ,c) do { } while (0)
881# define HWACCMR0DumpDescriptor(a, b, c) do { } while (0)
882#endif
883
884/* Dummy callback handlers. */
885VMMR0DECL(int) HWACCMR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu);
886VMMR0DECL(int) HWACCMR0DummyLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
887VMMR0DECL(int) HWACCMR0DummyEnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
888VMMR0DECL(int) HWACCMR0DummyDisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
889VMMR0DECL(int) HWACCMR0DummyInitVM(PVM pVM);
890VMMR0DECL(int) HWACCMR0DummyTermVM(PVM pVM);
891VMMR0DECL(int) HWACCMR0DummySetupVM(PVM pVM);
892VMMR0DECL(int) HWACCMR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
893VMMR0DECL(int) HWACCMR0DummySaveHostState(PVM pVM, PVMCPU pVCpu);
894VMMR0DECL(int) HWACCMR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
895
896# ifdef VBOX_WITH_KERNEL_USING_XMM
897DECLASM(int) hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
898DECLASM(int) hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
899# endif
900
901# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
902/**
903 * Gets 64-bit GDTR and IDTR on darwin.
904 * @param pGdtr Where to store the 64-bit GDTR.
905 * @param pIdtr Where to store the 64-bit IDTR.
906 */
907DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
908
909/**
910 * Gets 64-bit CR3 on darwin.
911 * @returns CR3
912 */
913DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
914# endif
915
916#endif /* IN_RING0 */
917
918/** @} */
919
920RT_C_DECLS_END
921
922#endif
923
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette