VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HWACCMInternal.h@ 42696

Last change on this file since 42696 was 42373, checked in by vboxsync, 12 years ago

VMM: invpg -> invlpg, some cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 32.4 KB
Line 
1/* $Id: HWACCMInternal.h 42373 2012-07-25 07:18:33Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HWACCMInternal_h
19#define ___HWACCMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hwaccm.h>
27#include <VBox/vmm/hwacc_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#define VMX_USE_CACHED_VMCS_ACCESSES
41#define HWACCM_VMX_EMULATE_REALMODE
42
43
44/* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we
45 * handle this MSR manually. See @bugref{6208}. This is clearly visible while
46 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus.
47 *
48 * Note: don't forget to update the assembly files while modifying this!
49 */
50#define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
51
52RT_C_DECLS_BEGIN
53
54
55/** @defgroup grp_hwaccm_int Internal
56 * @ingroup grp_hwaccm
57 * @internal
58 * @{
59 */
60
61
62/** Maximum number of exit reason statistics counters. */
63#define MAX_EXITREASON_STAT 0x100
64#define MASK_EXITREASON_STAT 0xff
65#define MASK_INJECT_IRQ_STAT 0xff
66
67/** @name Changed flags
68 * These flags are used to keep track of which important registers that
69 * have been changed since last they were reset.
70 * @{
71 */
72#define HWACCM_CHANGED_GUEST_FPU RT_BIT(0)
73#define HWACCM_CHANGED_GUEST_CR0 RT_BIT(1)
74#define HWACCM_CHANGED_GUEST_CR3 RT_BIT(2)
75#define HWACCM_CHANGED_GUEST_CR4 RT_BIT(3)
76#define HWACCM_CHANGED_GUEST_GDTR RT_BIT(4)
77#define HWACCM_CHANGED_GUEST_IDTR RT_BIT(5)
78#define HWACCM_CHANGED_GUEST_LDTR RT_BIT(6)
79#define HWACCM_CHANGED_GUEST_TR RT_BIT(7)
80#define HWACCM_CHANGED_GUEST_MSR RT_BIT(8)
81#define HWACCM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)
82#define HWACCM_CHANGED_GUEST_DEBUG RT_BIT(10)
83#define HWACCM_CHANGED_HOST_CONTEXT RT_BIT(11)
84
85#define HWACCM_CHANGED_ALL ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
86 | HWACCM_CHANGED_GUEST_CR0 \
87 | HWACCM_CHANGED_GUEST_CR3 \
88 | HWACCM_CHANGED_GUEST_CR4 \
89 | HWACCM_CHANGED_GUEST_GDTR \
90 | HWACCM_CHANGED_GUEST_IDTR \
91 | HWACCM_CHANGED_GUEST_LDTR \
92 | HWACCM_CHANGED_GUEST_TR \
93 | HWACCM_CHANGED_GUEST_MSR \
94 | HWACCM_CHANGED_GUEST_FPU \
95 | HWACCM_CHANGED_GUEST_DEBUG \
96 | HWACCM_CHANGED_HOST_CONTEXT)
97
98#define HWACCM_CHANGED_ALL_GUEST ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
99 | HWACCM_CHANGED_GUEST_CR0 \
100 | HWACCM_CHANGED_GUEST_CR3 \
101 | HWACCM_CHANGED_GUEST_CR4 \
102 | HWACCM_CHANGED_GUEST_GDTR \
103 | HWACCM_CHANGED_GUEST_IDTR \
104 | HWACCM_CHANGED_GUEST_LDTR \
105 | HWACCM_CHANGED_GUEST_TR \
106 | HWACCM_CHANGED_GUEST_MSR \
107 | HWACCM_CHANGED_GUEST_DEBUG \
108 | HWACCM_CHANGED_GUEST_FPU)
109
110/** @} */
111
112/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
113#define HWACCM_MAX_TLB_SHOOTDOWN_PAGES 8
114
115/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
116#define HWACCM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
117/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
118#define HWACCM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1)
119/** Total guest mapped memory needed. */
120#define HWACCM_VTX_TOTAL_DEVHEAP_MEM (HWACCM_EPT_IDENTITY_PG_TABLE_SIZE + HWACCM_VTX_TSS_SIZE)
121
122/** Enable for TPR guest patching. */
123#define VBOX_HWACCM_WITH_GUEST_PATCHING
124
125/** HWACCM SSM version
126 */
127#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
128# define HWACCM_SSM_VERSION 5
129# define HWACCM_SSM_VERSION_NO_PATCHING 4
130#else
131# define HWACCM_SSM_VERSION 4
132# define HWACCM_SSM_VERSION_NO_PATCHING 4
133#endif
134#define HWACCM_SSM_VERSION_2_0_X 3
135
136/**
137 * Global per-cpu information. (host)
138 */
139typedef struct HMGLOBLCPUINFO
140{
141 /** The CPU ID. */
142 RTCPUID idCpu;
143 /** The memory object */
144 RTR0MEMOBJ hMemObj;
145 /** Current ASID (AMD-V) / VPID (Intel). */
146 uint32_t uCurrentASID;
147 /** TLB flush count. */
148 uint32_t cTLBFlushes;
149 /** Whether to flush each new ASID/VPID before use. */
150 bool fFlushASIDBeforeUse;
151 /** Configured for VT-x or AMD-V. */
152 bool fConfigured;
153 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
154 bool fIgnoreAMDVInUseError;
155 /** In use by our code. (for power suspend) */
156 volatile bool fInUse;
157} HMGLOBLCPUINFO;
158/** Pointer to the per-cpu global information. */
159typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
160
161typedef enum
162{
163 HWACCMPENDINGIO_INVALID = 0,
164 HWACCMPENDINGIO_PORT_READ,
165 HWACCMPENDINGIO_PORT_WRITE,
166 HWACCMPENDINGIO_STRING_READ,
167 HWACCMPENDINGIO_STRING_WRITE,
168 /** The usual 32-bit paranoia. */
169 HWACCMPENDINGIO_32BIT_HACK = 0x7fffffff
170} HWACCMPENDINGIO;
171
172
173typedef enum
174{
175 HWACCMTPRINSTR_INVALID,
176 HWACCMTPRINSTR_READ,
177 HWACCMTPRINSTR_READ_SHR4,
178 HWACCMTPRINSTR_WRITE_REG,
179 HWACCMTPRINSTR_WRITE_IMM,
180 HWACCMTPRINSTR_JUMP_REPLACEMENT,
181 /** The usual 32-bit paranoia. */
182 HWACCMTPRINSTR_32BIT_HACK = 0x7fffffff
183} HWACCMTPRINSTR;
184
185typedef struct
186{
187 /** The key is the address of patched instruction. (32 bits GC ptr) */
188 AVLOU32NODECORE Core;
189 /** Original opcode. */
190 uint8_t aOpcode[16];
191 /** Instruction size. */
192 uint32_t cbOp;
193 /** Replacement opcode. */
194 uint8_t aNewOpcode[16];
195 /** Replacement instruction size. */
196 uint32_t cbNewOp;
197 /** Instruction type. */
198 HWACCMTPRINSTR enmType;
199 /** Source operand. */
200 uint32_t uSrcOperand;
201 /** Destination operand. */
202 uint32_t uDstOperand;
203 /** Number of times the instruction caused a fault. */
204 uint32_t cFaults;
205 /** Patch address of the jump replacement. */
206 RTGCPTR32 pJumpTarget;
207} HWACCMTPRPATCH;
208/** Pointer to HWACCMTPRPATCH. */
209typedef HWACCMTPRPATCH *PHWACCMTPRPATCH;
210
211/**
212 * Switcher function, HC to RC.
213 *
214 * @param pVM Pointer to the VM.
215 * @param uOffsetVMCPU VMCPU offset from pVM
216 * @returns Return code indicating the action to take.
217 */
218typedef DECLCALLBACK (int) FNHWACCMSWITCHERHC(PVM pVM, uint32_t uOffsetVMCPU);
219/** Pointer to switcher function. */
220typedef FNHWACCMSWITCHERHC *PFNHWACCMSWITCHERHC;
221
222/**
223 * HWACCM VM Instance data.
224 * Changes to this must checked against the padding of the hwaccm union in VM!
225 */
226typedef struct HWACCM
227{
228 /** Set when we've initialized VMX or SVM. */
229 bool fInitialized;
230
231 /** Set when hardware acceleration is allowed. */
232 bool fAllowed;
233
234 /** Set if nested paging is enabled. */
235 bool fNestedPaging;
236
237 /** Set if nested paging is allowed. */
238 bool fAllowNestedPaging;
239
240 /** Set if large pages are enabled (requires nested paging). */
241 bool fLargePages;
242
243 /** Set if we can support 64-bit guests or not. */
244 bool fAllow64BitGuests;
245
246 /** Set if an IO-APIC is configured for this VM. */
247 bool fHasIoApic;
248
249 /** Set when TPR patching is allowed. */
250 bool fTRPPatchingAllowed;
251
252 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
253 bool fGlobalInit;
254
255 /** Set when TPR patching is active. */
256 bool fTPRPatchingActive;
257 bool u8Alignment[6];
258
259 /** And mask for copying register contents. */
260 uint64_t u64RegisterMask;
261
262 /** Maximum ASID allowed. */
263 uint32_t uMaxASID;
264
265 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
266 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
267 uint32_t cMaxResumeLoops;
268
269 /** Guest allocated memory for patching purposes. */
270 RTGCPTR pGuestPatchMem;
271 /** Current free pointer inside the patch block. */
272 RTGCPTR pFreeGuestPatchMem;
273 /** Size of the guest patch memory block. */
274 uint32_t cbGuestPatchMem;
275 uint32_t uPadding1;
276
277#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
278 /** 32 to 64 bits switcher entrypoint. */
279 R0PTRTYPE(PFNHWACCMSWITCHERHC) pfnHost32ToGuest64R0;
280
281 /* AMD-V 64 bits vmrun handler */
282 RTRCPTR pfnSVMGCVMRun64;
283
284 /* VT-x 64 bits vmlaunch handler */
285 RTRCPTR pfnVMXGCStartVM64;
286
287 /* RC handler to setup the 64 bits FPU state. */
288 RTRCPTR pfnSaveGuestFPU64;
289
290 /* RC handler to setup the 64 bits debug state. */
291 RTRCPTR pfnSaveGuestDebug64;
292
293 /* Test handler */
294 RTRCPTR pfnTest64;
295
296 RTRCPTR uAlignment[2];
297/*#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
298 uint32_t u32Alignment[1]; */
299#endif
300
301 struct
302 {
303 /** Set by the ring-0 side of HWACCM to indicate VMX is supported by the
304 * CPU. */
305 bool fSupported;
306
307 /** Set when we've enabled VMX. */
308 bool fEnabled;
309
310 /** Set if VPID is supported. */
311 bool fVPID;
312
313 /** Set if VT-x VPID is allowed. */
314 bool fAllowVPID;
315
316 /** Set if unrestricted guest execution is allowed (real and protected mode without paging). */
317 bool fUnrestrictedGuest;
318
319 /** Whether we're using the preemption timer or not. */
320 bool fUsePreemptTimer;
321 /** The shift mask employed by the VMX-Preemption timer. */
322 uint8_t cPreemptTimerShift;
323
324 bool uAlignment[1];
325
326 /** Virtual address of the TSS page used for real mode emulation. */
327 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
328
329 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
330 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
331
332 /** R0 memory object for the APIC physical page (serves for filtering accesses). */
333 RTR0MEMOBJ pMemObjAPIC;
334 /** Physical address of the APIC physical page (serves for filtering accesses). */
335 RTHCPHYS pAPICPhys;
336 /** Virtual address of the APIC physical page (serves for filtering accesses). */
337 R0PTRTYPE(uint8_t *) pAPIC;
338
339 /** R0 memory object for the MSR entry load page (guest MSRs). */
340 RTR0MEMOBJ pMemObjMSREntryLoad;
341 /** Physical address of the MSR entry load page (guest MSRs). */
342 RTHCPHYS pMSREntryLoadPhys;
343 /** Virtual address of the MSR entry load page (guest MSRs). */
344 R0PTRTYPE(uint8_t *) pMSREntryLoad;
345
346#ifdef VBOX_WITH_CRASHDUMP_MAGIC
347 RTR0MEMOBJ pMemObjScratch;
348 RTHCPHYS pScratchPhys;
349 R0PTRTYPE(uint8_t *) pScratch;
350#endif
351 /** R0 memory object for the MSR exit store page (guest MSRs). */
352 RTR0MEMOBJ pMemObjMSRExitStore;
353 /** Physical address of the MSR exit store page (guest MSRs). */
354 RTHCPHYS pMSRExitStorePhys;
355 /** Virtual address of the MSR exit store page (guest MSRs). */
356 R0PTRTYPE(uint8_t *) pMSRExitStore;
357
358 /** R0 memory object for the MSR exit load page (host MSRs). */
359 RTR0MEMOBJ pMemObjMSRExitLoad;
360 /** Physical address of the MSR exit load page (host MSRs). */
361 RTHCPHYS pMSRExitLoadPhys;
362 /** Virtual address of the MSR exit load page (host MSRs). */
363 R0PTRTYPE(uint8_t *) pMSRExitLoad;
364
365 /** Ring 0 handlers for VT-x. */
366 DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM, PVMCPU pVCpu));
367
368#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
369 uint32_t u32Alignment;
370#endif
371 /** Host CR4 value (set by ring-0 VMX init) */
372 uint64_t hostCR4;
373
374 /** Host EFER value (set by ring-0 VMX init) */
375 uint64_t hostEFER;
376
377 /** VMX MSR values */
378 struct
379 {
380 uint64_t feature_ctrl;
381 uint64_t vmx_basic_info;
382 VMX_CAPABILITY vmx_pin_ctls;
383 VMX_CAPABILITY vmx_proc_ctls;
384 VMX_CAPABILITY vmx_proc_ctls2;
385 VMX_CAPABILITY vmx_exit;
386 VMX_CAPABILITY vmx_entry;
387 uint64_t vmx_misc;
388 uint64_t vmx_cr0_fixed0;
389 uint64_t vmx_cr0_fixed1;
390 uint64_t vmx_cr4_fixed0;
391 uint64_t vmx_cr4_fixed1;
392 uint64_t vmx_vmcs_enum;
393 uint64_t vmx_eptcaps;
394 } msr;
395
396 /** Flush types for invept & invvpid; they depend on capabilities. */
397 VMX_FLUSH_EPT enmFlushEPT;
398 VMX_FLUSH_VPID enmFlushVPID;
399 } vmx;
400
401 struct
402 {
403 /** Set by the ring-0 side of HWACCM to indicate SVM is supported by the
404 * CPU. */
405 bool fSupported;
406 /** Set when we've enabled SVM. */
407 bool fEnabled;
408 /** Set if erratum 170 affects the AMD cpu. */
409 bool fAlwaysFlushTLB;
410 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
411 bool fIgnoreInUseError;
412
413 /** R0 memory object for the IO bitmap (12kb). */
414 RTR0MEMOBJ pMemObjIOBitmap;
415 /** Physical address of the IO bitmap (12kb). */
416 RTHCPHYS pIOBitmapPhys;
417 /** Virtual address of the IO bitmap. */
418 R0PTRTYPE(void *) pIOBitmap;
419
420 /* HWCR msr (for diagnostics) */
421 uint64_t msrHWCR;
422
423 /** SVM revision. */
424 uint32_t u32Rev;
425
426 /** SVM feature bits from cpuid 0x8000000a */
427 uint32_t u32Features;
428 } svm;
429
430 /**
431 * AVL tree with all patches (active or disabled) sorted by guest instruction address
432 */
433 AVLOU32TREE PatchTree;
434 uint32_t cPatches;
435 HWACCMTPRPATCH aPatches[64];
436
437 struct
438 {
439 uint32_t u32AMDFeatureECX;
440 uint32_t u32AMDFeatureEDX;
441 } cpuid;
442
443 /** Saved error from detection */
444 int32_t lLastError;
445
446 /** HWACCMR0Init was run */
447 bool fHWACCMR0Init;
448 bool u8Alignment1[7];
449
450 STAMCOUNTER StatTPRPatchSuccess;
451 STAMCOUNTER StatTPRPatchFailure;
452 STAMCOUNTER StatTPRReplaceSuccess;
453 STAMCOUNTER StatTPRReplaceFailure;
454} HWACCM;
455/** Pointer to HWACCM VM instance data. */
456typedef HWACCM *PHWACCM;
457
458/* Maximum number of cached entries. */
459#define VMCSCACHE_MAX_ENTRY 128
460
461/* Structure for storing read and write VMCS actions. */
462typedef struct VMCSCACHE
463{
464#ifdef VBOX_WITH_CRASHDUMP_MAGIC
465 /* Magic marker for searching in crash dumps. */
466 uint8_t aMagic[16];
467 uint64_t uMagic;
468 uint64_t u64TimeEntry;
469 uint64_t u64TimeSwitch;
470 uint64_t cResume;
471 uint64_t interPD;
472 uint64_t pSwitcher;
473 uint32_t uPos;
474 uint32_t idCpu;
475#endif
476 /* CR2 is saved here for EPT syncing. */
477 uint64_t cr2;
478 struct
479 {
480 uint32_t cValidEntries;
481 uint32_t uAlignment;
482 uint32_t aField[VMCSCACHE_MAX_ENTRY];
483 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
484 } Write;
485 struct
486 {
487 uint32_t cValidEntries;
488 uint32_t uAlignment;
489 uint32_t aField[VMCSCACHE_MAX_ENTRY];
490 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
491 } Read;
492#ifdef DEBUG
493 struct
494 {
495 RTHCPHYS HCPhysCpuPage;
496 RTHCPHYS HCPhysVMCS;
497 RTGCPTR pCache;
498 RTGCPTR pCtx;
499 } TestIn;
500 struct
501 {
502 RTHCPHYS HCPhysVMCS;
503 RTGCPTR pCache;
504 RTGCPTR pCtx;
505 uint64_t eflags;
506 uint64_t cr8;
507 } TestOut;
508 struct
509 {
510 uint64_t param1;
511 uint64_t param2;
512 uint64_t param3;
513 uint64_t param4;
514 } ScratchPad;
515#endif
516} VMCSCACHE;
517/** Pointer to VMCSCACHE. */
518typedef VMCSCACHE *PVMCSCACHE;
519
520/** VMX StartVM function. */
521typedef DECLCALLBACK(int) FNHWACCMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
522/** Pointer to a VMX StartVM function. */
523typedef R0PTRTYPE(FNHWACCMVMXSTARTVM *) PFNHWACCMVMXSTARTVM;
524
525/** SVM VMRun function. */
526typedef DECLCALLBACK(int) FNHWACCMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
527/** Pointer to a SVM VMRun function. */
528typedef R0PTRTYPE(FNHWACCMSVMVMRUN *) PFNHWACCMSVMVMRUN;
529
530/**
531 * HWACCM VMCPU Instance data.
532 */
533typedef struct HWACCMCPU
534{
535 /** Old style FPU reporting trap mask override performed (optimization) */
536 bool fFPUOldStyleOverride;
537
538 /** Set if we don't have to flush the TLB on VM entry. */
539 bool fResumeVM;
540
541 /** Set if we need to flush the TLB during the world switch. */
542 bool fForceTLBFlush;
543
544 /** Set when we're using VT-x or AMD-V at that moment. */
545 bool fActive;
546
547 /** Set when the TLB has been checked until we return from the world switch. */
548 volatile bool fCheckedTLBFlush;
549 uint8_t bAlignment[3];
550
551 /** World switch exit counter. */
552 volatile uint32_t cWorldSwitchExits;
553
554 /** HWACCM_CHANGED_* flags. */
555 uint32_t fContextUseFlags;
556
557 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
558 RTCPUID idLastCpu;
559
560 /** TLB flush count */
561 uint32_t cTLBFlushes;
562
563 /** Current ASID in use by the VM */
564 uint32_t uCurrentASID;
565
566 uint32_t u32Alignment;
567
568 /* Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
569 uint64_t u64HostTSCAux;
570
571 struct
572 {
573 /** Physical address of the VM control structure (VMCS). */
574 RTHCPHYS HCPhysVMCS;
575 /** R0 memory object for the VM control structure (VMCS). */
576 RTR0MEMOBJ hMemObjVMCS;
577 /** Virtual address of the VM control structure (VMCS). */
578 R0PTRTYPE(void *) pvVMCS;
579
580 /** Ring 0 handlers for VT-x. */
581 PFNHWACCMVMXSTARTVM pfnStartVM;
582
583#if HC_ARCH_BITS == 32
584 uint32_t u32Alignment;
585#endif
586
587 /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
588 uint64_t proc_ctls;
589
590 /** Current VMX_VMCS_CTRL_PROC_EXEC2_CONTROLS. */
591 uint64_t proc_ctls2;
592
593 /** Physical address of the virtual APIC page for TPR caching. */
594 RTHCPHYS HCPhysVAPIC;
595 /** R0 memory object for the virtual APIC page for TPR caching. */
596 RTR0MEMOBJ hMemObjVAPIC;
597 /** Virtual address of the virtual APIC page for TPR caching. */
598 R0PTRTYPE(uint8_t *) pbVAPIC;
599
600 /** Current CR0 mask. */
601 uint64_t cr0_mask;
602 /** Current CR4 mask. */
603 uint64_t cr4_mask;
604
605 /** Current EPTP. */
606 RTHCPHYS GCPhysEPTP;
607
608 /** Physical address of the MSR bitmap (1 page). */
609 RTHCPHYS pMSRBitmapPhys;
610 /** R0 memory object for the MSR bitmap (1 page). */
611 RTR0MEMOBJ pMemObjMSRBitmap;
612 /** Virtual address of the MSR bitmap (1 page). */
613 R0PTRTYPE(uint8_t *) pMSRBitmap;
614
615#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
616 /** Physical address of the guest MSR load area (1 page). */
617 RTHCPHYS pGuestMSRPhys;
618 /** R0 memory object for the guest MSR load area (1 page). */
619 RTR0MEMOBJ pMemObjGuestMSR;
620 /** Virtual address of the guest MSR load area (1 page). */
621 R0PTRTYPE(uint8_t *) pGuestMSR;
622
623 /** Physical address of the MSR load area (1 page). */
624 RTHCPHYS pHostMSRPhys;
625 /** R0 memory object for the MSR load area (1 page). */
626 RTR0MEMOBJ pMemObjHostMSR;
627 /** Virtual address of the MSR load area (1 page). */
628 R0PTRTYPE(uint8_t *) pHostMSR;
629
630 /* Number of automatically loaded/restored guest MSRs during the world switch. */
631 uint32_t cCachedMSRs;
632 uint32_t uAlignement;
633#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
634
635 /* Last use TSC offset value. (cached) */
636 uint64_t u64TSCOffset;
637
638 /** VMCS cache. */
639 VMCSCACHE VMCSCache;
640
641 /** Real-mode emulation state. */
642 struct
643 {
644 X86EFLAGS eflags;
645 uint32_t fValid;
646 } RealMode;
647
648 struct
649 {
650 uint64_t u64VMCSPhys;
651 uint32_t ulVMCSRevision;
652 uint32_t ulInstrError;
653 uint32_t ulExitReason;
654 RTCPUID idEnteredCpu;
655 RTCPUID idCurrentCpu;
656 uint32_t padding;
657 } lasterror;
658
659 /** The last seen guest paging mode (by VT-x). */
660 PGMMODE enmLastSeenGuestMode;
661 /** Current guest paging mode (as seen by HWACCMR3PagingModeChanged). */
662 PGMMODE enmCurrGuestMode;
663 /** Previous guest paging mode (as seen by HWACCMR3PagingModeChanged). */
664 PGMMODE enmPrevGuestMode;
665 } vmx;
666
667 struct
668 {
669 /** R0 memory object for the host VM control block (VMCB). */
670 RTR0MEMOBJ pMemObjVMCBHost;
671 /** Physical address of the host VM control block (VMCB). */
672 RTHCPHYS pVMCBHostPhys;
673 /** Virtual address of the host VM control block (VMCB). */
674 R0PTRTYPE(void *) pVMCBHost;
675
676 /** R0 memory object for the VM control block (VMCB). */
677 RTR0MEMOBJ pMemObjVMCB;
678 /** Physical address of the VM control block (VMCB). */
679 RTHCPHYS pVMCBPhys;
680 /** Virtual address of the VM control block (VMCB). */
681 R0PTRTYPE(void *) pVMCB;
682
683 /** Ring 0 handlers for VT-x. */
684 PFNHWACCMSVMVMRUN pfnVMRun;
685
686 /** R0 memory object for the MSR bitmap (8kb). */
687 RTR0MEMOBJ pMemObjMSRBitmap;
688 /** Physical address of the MSR bitmap (8kb). */
689 RTHCPHYS pMSRBitmapPhys;
690 /** Virtual address of the MSR bitmap. */
691 R0PTRTYPE(void *) pMSRBitmap;
692 } svm;
693
694 /** Event injection state. */
695 struct
696 {
697 uint32_t fPending;
698 uint32_t errCode;
699 uint64_t intInfo;
700 } Event;
701
702 /** IO Block emulation state. */
703 struct
704 {
705 bool fEnabled;
706 uint8_t u8Align[7];
707
708 /** RIP at the start of the io code we wish to emulate in the recompiler. */
709 RTGCPTR GCPtrFunctionEip;
710
711 uint64_t cr0;
712 } EmulateIoBlock;
713
714 struct
715 {
716 /* Pending IO operation type. */
717 HWACCMPENDINGIO enmType;
718 uint32_t uPadding;
719 RTGCPTR GCPtrRip;
720 RTGCPTR GCPtrRipNext;
721 union
722 {
723 struct
724 {
725 unsigned uPort;
726 unsigned uAndVal;
727 unsigned cbSize;
728 } Port;
729 uint64_t aRaw[2];
730 } s;
731 } PendingIO;
732
733 /** Currently shadow paging mode. */
734 PGMMODE enmShadowMode;
735
736 /** The CPU ID of the CPU currently owning the VMCS. Set in
737 * HWACCMR0Enter and cleared in HWACCMR0Leave. */
738 RTCPUID idEnteredCpu;
739
740 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
741 struct
742 {
743 RTGCPTR aPages[HWACCM_MAX_TLB_SHOOTDOWN_PAGES];
744 unsigned cPages;
745 } TlbShootdown;
746
747 /** For saving stack space, the disassembler state is allocated here instead of
748 * on the stack. */
749 DISCPUSTATE DisState;
750
751 uint32_t padding2[1];
752
753 STAMPROFILEADV StatEntry;
754 STAMPROFILEADV StatExit1;
755 STAMPROFILEADV StatExit2;
756#if 1 /* temporary for tracking down darwin issues. */
757 STAMPROFILEADV StatExit2Sub1;
758 STAMPROFILEADV StatExit2Sub2;
759 STAMPROFILEADV StatExit2Sub3;
760#endif
761 STAMPROFILEADV StatInGC;
762
763#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
764 STAMPROFILEADV StatWorldSwitch3264;
765#endif
766 STAMPROFILEADV StatPoke;
767 STAMPROFILEADV StatSpinPoke;
768 STAMPROFILEADV StatSpinPokeFailed;
769
770 STAMCOUNTER StatIntInject;
771
772 STAMCOUNTER StatExitShadowNM;
773 STAMCOUNTER StatExitGuestNM;
774 STAMCOUNTER StatExitShadowPF;
775 STAMCOUNTER StatExitShadowPFEM;
776 STAMCOUNTER StatExitGuestPF;
777 STAMCOUNTER StatExitGuestUD;
778 STAMCOUNTER StatExitGuestSS;
779 STAMCOUNTER StatExitGuestNP;
780 STAMCOUNTER StatExitGuestGP;
781 STAMCOUNTER StatExitGuestDE;
782 STAMCOUNTER StatExitGuestDB;
783 STAMCOUNTER StatExitGuestMF;
784 STAMCOUNTER StatExitGuestBP;
785 STAMCOUNTER StatExitGuestXF;
786 STAMCOUNTER StatExitGuestXcpUnk;
787 STAMCOUNTER StatExitInvlpg;
788 STAMCOUNTER StatExitInvd;
789 STAMCOUNTER StatExitCpuid;
790 STAMCOUNTER StatExitRdtsc;
791 STAMCOUNTER StatExitRdtscp;
792 STAMCOUNTER StatExitRdpmc;
793 STAMCOUNTER StatExitCli;
794 STAMCOUNTER StatExitSti;
795 STAMCOUNTER StatExitPushf;
796 STAMCOUNTER StatExitPopf;
797 STAMCOUNTER StatExitIret;
798 STAMCOUNTER StatExitInt;
799 STAMCOUNTER StatExitCRxWrite[16];
800 STAMCOUNTER StatExitCRxRead[16];
801 STAMCOUNTER StatExitDRxWrite;
802 STAMCOUNTER StatExitDRxRead;
803 STAMCOUNTER StatExitRdmsr;
804 STAMCOUNTER StatExitWrmsr;
805 STAMCOUNTER StatExitCLTS;
806 STAMCOUNTER StatExitHlt;
807 STAMCOUNTER StatExitMwait;
808 STAMCOUNTER StatExitMonitor;
809 STAMCOUNTER StatExitLMSW;
810 STAMCOUNTER StatExitIOWrite;
811 STAMCOUNTER StatExitIORead;
812 STAMCOUNTER StatExitIOStringWrite;
813 STAMCOUNTER StatExitIOStringRead;
814 STAMCOUNTER StatExitIrqWindow;
815 STAMCOUNTER StatExitMaxResume;
816 STAMCOUNTER StatExitPreemptPending;
817 STAMCOUNTER StatExitMTF;
818 STAMCOUNTER StatIntReinject;
819 STAMCOUNTER StatPendingHostIrq;
820
821 STAMCOUNTER StatFlushPage;
822 STAMCOUNTER StatFlushPageManual;
823 STAMCOUNTER StatFlushPhysPageManual;
824 STAMCOUNTER StatFlushTLB;
825 STAMCOUNTER StatFlushTLBManual;
826 STAMCOUNTER StatFlushPageInvlpg;
827 STAMCOUNTER StatFlushTLBWorldSwitch;
828 STAMCOUNTER StatNoFlushTLBWorldSwitch;
829 STAMCOUNTER StatFlushTLBCRxChange;
830 STAMCOUNTER StatFlushASID;
831 STAMCOUNTER StatFlushTLBInvlpga;
832 STAMCOUNTER StatTlbShootdown;
833 STAMCOUNTER StatTlbShootdownFlush;
834
835 STAMCOUNTER StatSwitchGuestIrq;
836 STAMCOUNTER StatSwitchToR3;
837
838 STAMCOUNTER StatTSCOffset;
839 STAMCOUNTER StatTSCIntercept;
840 STAMCOUNTER StatTSCInterceptOverFlow;
841
842 STAMCOUNTER StatExitReasonNPF;
843 STAMCOUNTER StatDRxArmed;
844 STAMCOUNTER StatDRxContextSwitch;
845 STAMCOUNTER StatDRxIOCheck;
846
847 STAMCOUNTER StatLoadMinimal;
848 STAMCOUNTER StatLoadFull;
849
850#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
851 STAMCOUNTER StatFpu64SwitchBack;
852 STAMCOUNTER StatDebug64SwitchBack;
853#endif
854
855#ifdef VBOX_WITH_STATISTICS
856 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
857 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
858 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
859 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
860#endif
861} HWACCMCPU;
862/** Pointer to HWACCM VM instance data. */
863typedef HWACCMCPU *PHWACCMCPU;
864
865
866#ifdef IN_RING0
867
868VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void);
869VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu);
870
871
872#ifdef VBOX_STRICT
873VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
874VMMR0DECL(void) HWACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
875#else
876# define HWACCMDumpRegs(a, b ,c) do { } while (0)
877# define HWACCMR0DumpDescriptor(a, b, c) do { } while (0)
878#endif
879
880# ifdef VBOX_WITH_KERNEL_USING_XMM
881DECLASM(int) hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
882DECLASM(int) hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
883# endif
884
885# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
886/**
887 * Gets 64-bit GDTR and IDTR on darwin.
888 * @param pGdtr Where to store the 64-bit GDTR.
889 * @param pIdtr Where to store the 64-bit IDTR.
890 */
891DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
892
893/**
894 * Gets 64-bit CR3 on darwin.
895 * @returns CR3
896 */
897DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
898# endif
899
900#endif /* IN_RING0 */
901
902/** @} */
903
904RT_C_DECLS_END
905
906#endif
907
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette