VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 93554

Last change on this file since 93554 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 55.5 KB
Line 
1/* $Id: HMInternal.h 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_HMInternal_h
19#define VMM_INCLUDED_SRC_include_HMInternal_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/types.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/dis.h>
28#include <VBox/vmm/hm.h>
29#include <VBox/vmm/hm_vmx.h>
30#include <VBox/vmm/hm_svm.h>
31#include <VBox/vmm/pgm.h>
32#include <VBox/vmm/cpum.h>
33#include <VBox/vmm/trpm.h>
34#include <iprt/memobj.h>
35#include <iprt/cpuset.h>
36#include <iprt/mp.h>
37#include <iprt/avl.h>
38#include <iprt/string.h>
39
40#include "HMVMXCommon.h"
41
42#if HC_ARCH_BITS == 32
43# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
44#endif
45
46/** @def HM_PROFILE_EXIT_DISPATCH
47 * Enables profiling of the VM exit handler dispatching. */
48#if 0 || defined(DOXYGEN_RUNNING)
49# define HM_PROFILE_EXIT_DISPATCH
50#endif
51
52RT_C_DECLS_BEGIN
53
54
55/** @defgroup grp_hm_int Internal
56 * @ingroup grp_hm
57 * @internal
58 * @{
59 */
60
61/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
62#define HM_EPT_IDENTITY_PG_TABLE_SIZE HOST_PAGE_SIZE
63/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
64#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * X86_PAGE_SIZE + 1)
65/** Total guest mapped memory needed. */
66#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
67
68
69/** @name Macros for enabling and disabling preemption.
70 * These are really just for hiding the RTTHREADPREEMPTSTATE and asserting that
71 * preemption has already been disabled when there is no context hook.
72 * @{ */
73#ifdef VBOX_STRICT
74# define HM_DISABLE_PREEMPT(a_pVCpu) \
75 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
76 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD) || VMMR0ThreadCtxHookIsEnabled((a_pVCpu))); \
77 RTThreadPreemptDisable(&PreemptStateInternal)
78#else
79# define HM_DISABLE_PREEMPT(a_pVCpu) \
80 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
81 RTThreadPreemptDisable(&PreemptStateInternal)
82#endif /* VBOX_STRICT */
83#define HM_RESTORE_PREEMPT() do { RTThreadPreemptRestore(&PreemptStateInternal); } while(0)
84/** @} */
85
86
87/** @name HM saved state versions.
88 * @{
89 */
90#define HM_SAVED_STATE_VERSION HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT
91#define HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT 6
92#define HM_SAVED_STATE_VERSION_TPR_PATCHING 5
93#define HM_SAVED_STATE_VERSION_NO_TPR_PATCHING 4
94#define HM_SAVED_STATE_VERSION_2_0_X 3
95/** @} */
96
97
98/**
99 * HM physical (host) CPU information.
100 */
101typedef struct HMPHYSCPU
102{
103 /** The CPU ID. */
104 RTCPUID idCpu;
105 /** The VM_HSAVE_AREA (AMD-V) / VMXON region (Intel) memory backing. */
106 RTR0MEMOBJ hMemObj;
107 /** The physical address of the first page in hMemObj (it's a
108 * physcially contigous allocation if it spans multiple pages). */
109 RTHCPHYS HCPhysMemObj;
110 /** The address of the memory (for pfnEnable). */
111 void *pvMemObj;
112 /** Current ASID (AMD-V) / VPID (Intel). */
113 uint32_t uCurrentAsid;
114 /** TLB flush count. */
115 uint32_t cTlbFlushes;
116 /** Whether to flush each new ASID/VPID before use. */
117 bool fFlushAsidBeforeUse;
118 /** Configured for VT-x or AMD-V. */
119 bool fConfigured;
120 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
121 bool fIgnoreAMDVInUseError;
122 /** Whether CR4.VMXE was already enabled prior to us enabling it. */
123 bool fVmxeAlreadyEnabled;
124 /** In use by our code. (for power suspend) */
125 bool volatile fInUse;
126#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
127 /** Nested-guest union (put data common to SVM/VMX outside the union). */
128 union
129 {
130 /** Nested-guest SVM data. */
131 struct
132 {
133 /** The active nested-guest MSR permission bitmap memory backing. */
134 RTR0MEMOBJ hNstGstMsrpm;
135 /** The physical address of the first page in hNstGstMsrpm (physcially
136 * contiguous allocation). */
137 RTHCPHYS HCPhysNstGstMsrpm;
138 /** The address of the active nested-guest MSRPM. */
139 void *pvNstGstMsrpm;
140 } svm;
141 /** @todo Nested-VMX. */
142 } n;
143#endif
144} HMPHYSCPU;
145/** Pointer to HMPHYSCPU struct. */
146typedef HMPHYSCPU *PHMPHYSCPU;
147/** Pointer to a const HMPHYSCPU struct. */
148typedef const HMPHYSCPU *PCHMPHYSCPU;
149
150/**
151 * TPR-instruction type.
152 */
153typedef enum
154{
155 HMTPRINSTR_INVALID,
156 HMTPRINSTR_READ,
157 HMTPRINSTR_READ_SHR4,
158 HMTPRINSTR_WRITE_REG,
159 HMTPRINSTR_WRITE_IMM,
160 HMTPRINSTR_JUMP_REPLACEMENT,
161 /** The usual 32-bit paranoia. */
162 HMTPRINSTR_32BIT_HACK = 0x7fffffff
163} HMTPRINSTR;
164
165/**
166 * TPR patch information.
167 */
168typedef struct
169{
170 /** The key is the address of patched instruction. (32 bits GC ptr) */
171 AVLOU32NODECORE Core;
172 /** Original opcode. */
173 uint8_t aOpcode[16];
174 /** Instruction size. */
175 uint32_t cbOp;
176 /** Replacement opcode. */
177 uint8_t aNewOpcode[16];
178 /** Replacement instruction size. */
179 uint32_t cbNewOp;
180 /** Instruction type. */
181 HMTPRINSTR enmType;
182 /** Source operand. */
183 uint32_t uSrcOperand;
184 /** Destination operand. */
185 uint32_t uDstOperand;
186 /** Number of times the instruction caused a fault. */
187 uint32_t cFaults;
188 /** Patch address of the jump replacement. */
189 RTGCPTR32 pJumpTarget;
190} HMTPRPATCH;
191/** Pointer to HMTPRPATCH. */
192typedef HMTPRPATCH *PHMTPRPATCH;
193/** Pointer to a const HMTPRPATCH. */
194typedef const HMTPRPATCH *PCHMTPRPATCH;
195
196
197/**
198 * Makes a HMEXITSTAT::uKey value from a program counter and an exit code.
199 *
200 * @returns 64-bit key
201 * @param a_uPC The RIP + CS.BASE value of the exit.
202 * @param a_uExit The exit code.
203 * @todo Add CPL?
204 */
205#define HMEXITSTAT_MAKE_KEY(a_uPC, a_uExit) (((a_uPC) & UINT64_C(0x0000ffffffffffff)) | (uint64_t)(a_uExit) << 48)
206
207typedef struct HMEXITINFO
208{
209 /** See HMEXITSTAT_MAKE_KEY(). */
210 uint64_t uKey;
211 /** Number of recent hits (depreciates with time). */
212 uint32_t volatile cHits;
213 /** The age + lock. */
214 uint16_t volatile uAge;
215 /** Action or action table index. */
216 uint16_t iAction;
217} HMEXITINFO;
218AssertCompileSize(HMEXITINFO, 16); /* Lots of these guys, so don't add any unnecessary stuff! */
219
220typedef struct HMEXITHISTORY
221{
222 /** The exit timestamp. */
223 uint64_t uTscExit;
224 /** The index of the corresponding HMEXITINFO entry.
225 * UINT32_MAX if none (too many collisions, race, whatever). */
226 uint32_t iExitInfo;
227 /** Figure out later, needed for padding now. */
228 uint32_t uSomeClueOrSomething;
229} HMEXITHISTORY;
230
231/**
232 * Switcher function, HC to the special 64-bit RC.
233 *
234 * @param pVM The cross context VM structure.
235 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
236 * @returns Return code indicating the action to take.
237 */
238typedef DECLCALLBACKTYPE(int, FNHMSWITCHERHC,(PVM pVM, uint32_t offCpumVCpu));
239/** Pointer to switcher function. */
240typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
241
242
243/**
244 * HM VM Instance data.
245 * Changes to this must checked against the padding of the hm union in VM!
246 */
247typedef struct HM
248{
249 /** Set when the debug facility has breakpoints/events enabled that requires
250 * us to use the debug execution loop in ring-0. */
251 bool fUseDebugLoop;
252 /** Set when TPR patching is allowed. */
253 bool fTprPatchingAllowed;
254 /** Set when TPR patching is active. */
255 bool fTprPatchingActive;
256 /** Alignment padding. */
257 bool afAlignment1[5];
258
259 struct
260 {
261 /** Set by the ring-0 side of HM to indicate VMX is supported by the CPU. */
262 bool fSupported;
263 /** Set when we've enabled VMX. */
264 bool fEnabled;
265 /** The shift mask employed by the VMX-Preemption timer (set by ring-0). */
266 uint8_t cPreemptTimerShift;
267 bool fAlignment1;
268
269 /** @name Configuration (gets copied if problematic)
270 * @{ */
271 /** Set if Last Branch Record (LBR) is enabled. */
272 bool fLbrCfg;
273 /** Set if VT-x VPID is allowed. */
274 bool fAllowVpid;
275 /** Set if unrestricted guest execution is in use (real and protected mode
276 * without paging). */
277 bool fUnrestrictedGuestCfg;
278 /** Set if the preemption timer should be used if available. Ring-0
279 * quietly clears this if the hardware doesn't support the preemption timer. */
280 bool fUsePreemptTimerCfg;
281 /** @} */
282
283 /** Pause-loop exiting (PLE) gap in ticks. */
284 uint32_t cPleGapTicks;
285 /** Pause-loop exiting (PLE) window in ticks. */
286 uint32_t cPleWindowTicks;
287
288 /** Virtual address of the TSS page used for real mode emulation. */
289 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
290 /** Virtual address of the identity page table used for real mode and protected
291 * mode without paging emulation in EPT mode. */
292 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
293 } vmx;
294
295 struct
296 {
297 /** Set by the ring-0 side of HM to indicate SVM is supported by the CPU. */
298 bool fSupported;
299 /** Set when we've enabled SVM. */
300 bool fEnabled;
301 /** Set when the hack to ignore VERR_SVM_IN_USE is active.
302 * @todo Safe? */
303 bool fIgnoreInUseError;
304 /** Whether to use virtualized VMSAVE/VMLOAD feature. */
305 bool fVirtVmsaveVmload;
306 /** Whether to use virtual GIF feature. */
307 bool fVGif;
308 /** Whether to use LBR virtualization feature. */
309 bool fLbrVirt;
310 bool afAlignment1[2];
311
312 /** Pause filter counter. */
313 uint16_t cPauseFilter;
314 /** Pause filter treshold in ticks. */
315 uint16_t cPauseFilterThresholdTicks;
316 uint32_t u32Alignment2;
317 } svm;
318
319 /** AVL tree with all patches (active or disabled) sorted by guest instruction address.
320 * @todo For @bugref{9217} this AVL tree must be eliminated and instead
321 * sort aPatches by address and do a safe binary search on it. */
322 AVLOU32TREE PatchTree;
323 uint32_t cPatches;
324 HMTPRPATCH aPatches[64];
325
326 /** Guest allocated memory for patching purposes. */
327 RTGCPTR pGuestPatchMem;
328 /** Current free pointer inside the patch block. */
329 RTGCPTR pFreeGuestPatchMem;
330 /** Size of the guest patch memory block. */
331 uint32_t cbGuestPatchMem;
332 uint32_t u32Alignment2;
333
334 /** For ring-3 use only. */
335 struct
336 {
337 /** Last recorded error code during HM ring-0 init. */
338 int32_t rcInit;
339 uint32_t u32Alignment3;
340
341 /** Maximum ASID allowed.
342 * This is mainly for the release log. */
343 uint32_t uMaxAsid;
344 /** World switcher flags (HM_WSF_XXX) for the release log. */
345 uint32_t fWorldSwitcher;
346
347 struct
348 {
349 /** Set if VPID is supported (ring-3 copy). */
350 bool fVpid;
351 /** Whether the CPU supports VMCS fields for swapping EFER (set by ring-0 VMX
352 * init, for logging). */
353 bool fSupportsVmcsEfer;
354 /** Whether to use VMCS shadowing. */
355 bool fUseVmcsShadowing;
356 bool fAlignment2;
357
358 /** Host CR4 value (set by ring-0 VMX init, for logging). */
359 uint64_t u64HostCr4;
360 /** Host SMM monitor control (set by ring-0 VMX init, for logging). */
361 uint64_t u64HostSmmMonitorCtl;
362 /** Host EFER value (set by ring-0 VMX init, for logging and guest NX). */
363 uint64_t u64HostMsrEfer;
364 /** Host IA32_FEATURE_CONTROL MSR (set by ring-0 VMX init, for logging). */
365 uint64_t u64HostFeatCtrl;
366
367 /** The first valid host LBR branch-from-IP stack range. */
368 uint32_t idLbrFromIpMsrFirst;
369 /** The last valid host LBR branch-from-IP stack range. */
370 uint32_t idLbrFromIpMsrLast;
371
372 /** The first valid host LBR branch-to-IP stack range. */
373 uint32_t idLbrToIpMsrFirst;
374 /** The last valid host LBR branch-to-IP stack range. */
375 uint32_t idLbrToIpMsrLast;
376
377 /** Host-physical address for a failing VMXON instruction (for diagnostics, ring-3). */
378 RTHCPHYS HCPhysVmxEnableError;
379 /** VMX MSR values (only for ring-3 consumption). */
380 VMXMSRS Msrs;
381
382 /** Tagged-TLB flush type (only for ring-3 consumption). */
383 VMXTLBFLUSHTYPE enmTlbFlushType;
384 /** Flush type to use for INVEPT (only for ring-3 consumption). */
385 VMXTLBFLUSHEPT enmTlbFlushEpt;
386 /** Flush type to use for INVVPID (only for ring-3 consumption). */
387 VMXTLBFLUSHVPID enmTlbFlushVpid;
388 } vmx;
389
390 struct
391 {
392 /** SVM revision. */
393 uint32_t u32Rev;
394 /** SVM feature bits from cpuid 0x8000000a, ring-3 copy. */
395 uint32_t fFeatures;
396 /** HWCR MSR (for diagnostics). */
397 uint64_t u64MsrHwcr;
398 } svm;
399 } ForR3;
400
401 /** @name Configuration not used (much) after VM setup
402 * @{ */
403 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
404 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
405 uint32_t cMaxResumeLoopsCfg;
406 /** Set if nested paging is enabled.
407 * Config value that is copied to HMR0PERVM::fNestedPaging on setup. */
408 bool fNestedPagingCfg;
409 /** Set if large pages are enabled (requires nested paging).
410 * Config only, passed on the PGM where it really belongs.
411 * @todo move to PGM */
412 bool fLargePages;
413 /** Set if we can support 64-bit guests or not.
414 * Config value that is copied to HMR0PERVM::fAllow64BitGuests on setup. */
415 bool fAllow64BitGuestsCfg;
416 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
417 bool fGlobalInit;
418 /** Set if hardware APIC virtualization is enabled.
419 * @todo Not really used by HM, move to APIC where it's actually used. */
420 bool fVirtApicRegs;
421 /** Set if posted interrupt processing is enabled.
422 * @todo Not really used by HM, move to APIC where it's actually used. */
423 bool fPostedIntrs;
424 /** VM needs workaround for missing TLB flush in OS/2, see ticketref:20625.
425 * @note Currently only heeded by AMD-V. */
426 bool fMissingOS2TlbFlushWorkaround;
427 /** @} */
428
429 /** @name Processed into HMR0PERVCPU::fWorldSwitcher by ring-0 on VM init.
430 * @{ */
431 /** Set if indirect branch prediction barrier on VM exit. */
432 bool fIbpbOnVmExit;
433 /** Set if indirect branch prediction barrier on VM entry. */
434 bool fIbpbOnVmEntry;
435 /** Set if level 1 data cache should be flushed on VM entry. */
436 bool fL1dFlushOnVmEntry;
437 /** Set if level 1 data cache should be flushed on EMT scheduling. */
438 bool fL1dFlushOnSched;
439 /** Set if MDS related buffers should be cleared on VM entry. */
440 bool fMdsClearOnVmEntry;
441 /** Set if MDS related buffers should be cleared on EMT scheduling. */
442 bool fMdsClearOnSched;
443 /** Set if host manages speculation control settings.
444 * @todo doesn't do anything ... */
445 bool fSpecCtrlByHost;
446 /** @} */
447
448 /** Set when we've finalized the VMX / SVM initialization in ring-3
449 * (hmR3InitFinalizeR0Intel / hmR3InitFinalizeR0Amd). */
450 bool fInitialized;
451
452 bool afAlignment2[5];
453
454 STAMCOUNTER StatTprPatchSuccess;
455 STAMCOUNTER StatTprPatchFailure;
456 STAMCOUNTER StatTprReplaceSuccessCr8;
457 STAMCOUNTER StatTprReplaceSuccessVmc;
458 STAMCOUNTER StatTprReplaceFailure;
459} HM;
460/** Pointer to HM VM instance data. */
461typedef HM *PHM;
462AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
463AssertCompileMemberAlignment(HM, vmx, 8);
464AssertCompileMemberAlignment(HM, svm, 8);
465AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
466AssertCompile(RTASSERT_OFFSET_OF(HM, PatchTree) <= 64); /* First cache line has the essentials for both VT-x and SVM operation. */
467
468
469/**
470 * Per-VM ring-0 instance data for HM.
471 */
472typedef struct HMR0PERVM
473{
474 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
475 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
476 uint32_t cMaxResumeLoops;
477
478 /** Set if nested paging is enabled. */
479 bool fNestedPaging;
480 /** Set if we can support 64-bit guests or not. */
481 bool fAllow64BitGuests;
482 bool afAlignment1[1];
483
484 /** AMD-V specific data. */
485 struct HMR0SVMVM
486 {
487 /** Set if erratum 170 affects the AMD cpu. */
488 bool fAlwaysFlushTLB;
489 } svm;
490
491 /** VT-x specific data. */
492 struct HMR0VMXVM
493 {
494 /** Set if unrestricted guest execution is in use (real and protected mode
495 * without paging). */
496 bool fUnrestrictedGuest;
497 /** Set if the preemption timer is in use. */
498 bool fUsePreemptTimer;
499 /** Whether to use VMCS shadowing. */
500 bool fUseVmcsShadowing;
501 /** Set if Last Branch Record (LBR) is enabled. */
502 bool fLbr;
503 bool afAlignment2[3];
504
505 /** Set if VPID is supported (copy in HM::vmx::fVpidForRing3). */
506 bool fVpid;
507 /** Tagged-TLB flush type. */
508 VMXTLBFLUSHTYPE enmTlbFlushType;
509 /** Flush type to use for INVEPT. */
510 VMXTLBFLUSHEPT enmTlbFlushEpt;
511 /** Flush type to use for INVVPID. */
512 VMXTLBFLUSHVPID enmTlbFlushVpid;
513
514 /** The host LBR TOS (top-of-stack) MSR id. */
515 uint32_t idLbrTosMsr;
516
517 /** The first valid host LBR branch-from-IP stack range. */
518 uint32_t idLbrFromIpMsrFirst;
519 /** The last valid host LBR branch-from-IP stack range. */
520 uint32_t idLbrFromIpMsrLast;
521
522 /** The first valid host LBR branch-to-IP stack range. */
523 uint32_t idLbrToIpMsrFirst;
524 /** The last valid host LBR branch-to-IP stack range. */
525 uint32_t idLbrToIpMsrLast;
526
527 /** Pointer to the VMREAD bitmap. */
528 R0PTRTYPE(void *) pvVmreadBitmap;
529 /** Pointer to the VMWRITE bitmap. */
530 R0PTRTYPE(void *) pvVmwriteBitmap;
531
532 /** Pointer to the shadow VMCS read-only fields array. */
533 R0PTRTYPE(uint32_t *) paShadowVmcsRoFields;
534 /** Pointer to the shadow VMCS read/write fields array. */
535 R0PTRTYPE(uint32_t *) paShadowVmcsFields;
536 /** Number of elements in the shadow VMCS read-only fields array. */
537 uint32_t cShadowVmcsRoFields;
538 /** Number of elements in the shadow VMCS read-write fields array. */
539 uint32_t cShadowVmcsFields;
540
541 /** Host-physical address of the APIC-access page. */
542 RTHCPHYS HCPhysApicAccess;
543 /** Host-physical address of the VMREAD bitmap. */
544 RTHCPHYS HCPhysVmreadBitmap;
545 /** Host-physical address of the VMWRITE bitmap. */
546 RTHCPHYS HCPhysVmwriteBitmap;
547
548#ifdef VBOX_WITH_CRASHDUMP_MAGIC
549 /** Host-physical address of the crash-dump scratch area. */
550 RTHCPHYS HCPhysScratch;
551 /** Pointer to the crash-dump scratch bitmap. */
552 R0PTRTYPE(uint8_t *) pbScratch;
553#endif
554
555 /** Ring-0 memory object for per-VM VMX structures. */
556 RTR0MEMOBJ hMemObj;
557 /** Virtual address of the APIC-access page (not used). */
558 R0PTRTYPE(uint8_t *) pbApicAccess;
559 } vmx;
560} HMR0PERVM;
561/** Pointer to HM's per-VM ring-0 instance data. */
562typedef HMR0PERVM *PHMR0PERVM;
563
564
565/** @addtogroup grp_hm_int_svm SVM Internal
566 * @{ */
567/** SVM VMRun function, see SVMR0VMRun(). */
568typedef DECLCALLBACKTYPE(int, FNHMSVMVMRUN,(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB));
569/** Pointer to a SVM VMRun function. */
570typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
571
572/**
573 * SVM nested-guest VMCB cache.
574 *
575 * Contains VMCB fields from the nested-guest VMCB before they're modified by
576 * SVM R0 code for hardware-assisted SVM execution of a nested-guest.
577 *
578 * A VMCB field needs to be cached when it needs to be modified for execution using
579 * hardware-assisted SVM and any of the following are true:
580 * - If the original field needs to be inspected during execution of the
581 * nested-guest or \#VMEXIT processing.
582 * - If the field is written back to memory on \#VMEXIT by the physical CPU.
583 *
584 * A VMCB field needs to be restored only when the field is written back to
585 * memory on \#VMEXIT by the physical CPU and thus would be visible to the
586 * guest.
587 *
588 * @remarks Please update hmR3InfoSvmNstGstVmcbCache() when changes are made to
589 * this structure.
590 */
591typedef struct SVMNESTEDVMCBCACHE
592{
593 /** Cache of CRX read intercepts. */
594 uint16_t u16InterceptRdCRx;
595 /** Cache of CRX write intercepts. */
596 uint16_t u16InterceptWrCRx;
597 /** Cache of DRX read intercepts. */
598 uint16_t u16InterceptRdDRx;
599 /** Cache of DRX write intercepts. */
600 uint16_t u16InterceptWrDRx;
601
602 /** Cache of the pause-filter threshold. */
603 uint16_t u16PauseFilterThreshold;
604 /** Cache of the pause-filter count. */
605 uint16_t u16PauseFilterCount;
606
607 /** Cache of exception intercepts. */
608 uint32_t u32InterceptXcpt;
609 /** Cache of control intercepts. */
610 uint64_t u64InterceptCtrl;
611
612 /** Cache of the TSC offset. */
613 uint64_t u64TSCOffset;
614
615 /** Cache of V_INTR_MASKING bit. */
616 bool fVIntrMasking;
617 /** Cache of the nested-paging bit. */
618 bool fNestedPaging;
619 /** Cache of the LBR virtualization bit. */
620 bool fLbrVirt;
621 /** Whether the VMCB is cached by HM. */
622 bool fCacheValid;
623 /** Alignment. */
624 bool afPadding0[4];
625} SVMNESTEDVMCBCACHE;
626/** Pointer to the SVMNESTEDVMCBCACHE structure. */
627typedef SVMNESTEDVMCBCACHE *PSVMNESTEDVMCBCACHE;
628/** Pointer to a const SVMNESTEDVMCBCACHE structure. */
629typedef const SVMNESTEDVMCBCACHE *PCSVMNESTEDVMCBCACHE;
630AssertCompileSizeAlignment(SVMNESTEDVMCBCACHE, 8);
631
632/** @} */
633
634/** @name Host-state restoration flags.
635 * @note If you change these values don't forget to update the assembly
636 * defines as well!
637 * @{
638 */
639#define VMX_RESTORE_HOST_SEL_DS RT_BIT(0)
640#define VMX_RESTORE_HOST_SEL_ES RT_BIT(1)
641#define VMX_RESTORE_HOST_SEL_FS RT_BIT(2)
642#define VMX_RESTORE_HOST_SEL_GS RT_BIT(3)
643#define VMX_RESTORE_HOST_SEL_TR RT_BIT(4)
644#define VMX_RESTORE_HOST_GDTR RT_BIT(5)
645#define VMX_RESTORE_HOST_IDTR RT_BIT(6)
646#define VMX_RESTORE_HOST_GDT_READ_ONLY RT_BIT(7)
647#define VMX_RESTORE_HOST_GDT_NEED_WRITABLE RT_BIT(8)
648#define VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE RT_BIT(9)
649/**
650 * This _must_ be the top most bit, so that we can easily check that it and
651 * something else is set w/o having to do two checks like this:
652 * @code
653 * if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
654 * && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
655 * @endcode
656 * Instead we can then do:
657 * @code
658 * if (pVCpu->hm.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
659 * @endcode
660 */
661#define VMX_RESTORE_HOST_REQUIRED RT_BIT(10)
662/** @} */
663
664/**
665 * Host-state restoration structure.
666 *
667 * This holds host-state fields that require manual restoration.
668 * Assembly version found in HMInternal.mac (should be automatically verified).
669 */
670typedef struct VMXRESTOREHOST
671{
672 RTSEL uHostSelDS; /**< 0x00 */
673 RTSEL uHostSelES; /**< 0x02 */
674 RTSEL uHostSelFS; /**< 0x04 */
675 X86XDTR64 HostGdtr; /**< 0x06 - should be aligned by its 64-bit member. */
676 RTSEL uHostSelGS; /**< 0x10 */
677 RTSEL uHostSelTR; /**< 0x12 */
678 RTSEL uHostSelSS; /**< 0x14 - not restored, just for fetching */
679 X86XDTR64 HostGdtrRw; /**< 0x16 - should be aligned by its 64-bit member. */
680 RTSEL uHostSelCS; /**< 0x20 - not restored, just for fetching */
681 uint8_t abPadding1[4]; /**< 0x22 */
682 X86XDTR64 HostIdtr; /**< 0x26 - should be aligned by its 64-bit member. */
683 uint64_t uHostFSBase; /**< 0x30 */
684 uint64_t uHostGSBase; /**< 0x38 */
685} VMXRESTOREHOST;
686/** Pointer to VMXRESTOREHOST. */
687typedef VMXRESTOREHOST *PVMXRESTOREHOST;
688AssertCompileSize(X86XDTR64, 10);
689AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtr.uAddr, 0x08);
690AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtrRw.uAddr, 0x18);
691AssertCompileMemberOffset(VMXRESTOREHOST, HostIdtr.uAddr, 0x28);
692AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase, 0x30);
693AssertCompileSize(VMXRESTOREHOST, 64);
694AssertCompileSizeAlignment(VMXRESTOREHOST, 8);
695
696/**
697 * VMX StartVM function.
698 *
699 * @returns VBox status code (no informational stuff).
700 * @param pVmcsInfo Pointer to the VMCS info (for cached host RIP and RSP).
701 * @param pVCpu Pointer to the cross context per-CPU structure.
702 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
703 */
704typedef DECLCALLBACKTYPE(int, FNHMVMXSTARTVM,(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume));
705/** Pointer to a VMX StartVM function. */
706typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
707/** @} */
708
709/**
710 * HM VMCPU Instance data.
711 *
712 * Note! If you change members of this struct, make sure to check if the
713 * assembly counterpart in HMInternal.mac needs to be updated as well.
714 *
715 * Note! The members here are ordered and aligned based on estimated frequency of
716 * usage and grouped to fit within a cache line in hot code paths. Even subtle
717 * changes here have a noticeable effect in the bootsector benchmarks. Modify with
718 * care.
719 */
720typedef struct HMCPU
721{
722 /** Set when the TLB has been checked until we return from the world switch. */
723 bool volatile fCheckedTLBFlush;
724 /** Set when we're using VT-x or AMD-V at that moment.
725 * @todo r=bird: Misleading description. For AMD-V this will be set the first
726 * time HMCanExecuteGuest() is called and only cleared again by
727 * HMR3ResetCpu(). For VT-x it will be set by HMCanExecuteGuest when we
728 * can execute something in VT-x mode, and cleared if we cannot.
729 *
730 * The field is much more about recording the last HMCanExecuteGuest
731 * return value than anything about any "moment". */
732 bool fActive;
733
734 /** Whether we should use the debug loop because of single stepping or special
735 * debug breakpoints / events are armed. */
736 bool fUseDebugLoop;
737
738 /** Whether \#UD needs to be intercepted (required by certain GIM providers). */
739 bool fGIMTrapXcptUD;
740 /** Whether \#GP needs to be intercepted for mesa driver workaround. */
741 bool fTrapXcptGpForLovelyMesaDrv;
742 /** Whether we're executing a single instruction. */
743 bool fSingleInstruction;
744
745 bool afAlignment0[2];
746
747 /** An additional error code used for some gurus. */
748 uint32_t u32HMError;
749 /** The last exit-to-ring-3 reason. */
750 int32_t rcLastExitToR3;
751 /** CPU-context changed flags (see HM_CHANGED_xxx). */
752 uint64_t fCtxChanged;
753
754 /** VT-x data. */
755 struct HMCPUVMX
756 {
757 /** @name Guest information.
758 * @{ */
759 /** Guest VMCS information shared with ring-3. */
760 VMXVMCSINFOSHARED VmcsInfo;
761 /** Nested-guest VMCS information shared with ring-3. */
762 VMXVMCSINFOSHARED VmcsInfoNstGst;
763 /** Whether the nested-guest VMCS was the last current VMCS (shadow copy for ring-3).
764 * @see HMR0PERVCPU::vmx.fSwitchedToNstGstVmcs */
765 bool fSwitchedToNstGstVmcsCopyForRing3;
766 /** Whether the static guest VMCS controls has been merged with the
767 * nested-guest VMCS controls. */
768 bool fMergedNstGstCtls;
769 /** Whether the nested-guest VMCS has been copied to the shadow VMCS. */
770 bool fCopiedNstGstToShadowVmcs;
771 /** Whether flushing the TLB is required due to switching to/from the
772 * nested-guest. */
773 bool fSwitchedNstGstFlushTlb;
774 /** Alignment. */
775 bool afAlignment0[4];
776 /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */
777 uint64_t u64GstMsrApicBase;
778 /** @} */
779
780 /** @name Error reporting and diagnostics.
781 * @{ */
782 /** VT-x error-reporting (mainly for ring-3 propagation). */
783 struct
784 {
785 RTCPUID idCurrentCpu;
786 RTCPUID idEnteredCpu;
787 RTHCPHYS HCPhysCurrentVmcs;
788 uint32_t u32VmcsRev;
789 uint32_t u32InstrError;
790 uint32_t u32ExitReason;
791 uint32_t u32GuestIntrState;
792 } LastError;
793 /** @} */
794 } vmx;
795
796 /** SVM data. */
797 struct HMCPUSVM
798 {
799 /** Whether to emulate long mode support for sysenter/sysexit like intel CPUs
800 * does. This means intercepting \#UD to emulate the instructions in
801 * long-mode and to intercept reads and writes to the SYSENTER MSRs in order to
802 * preserve the upper 32 bits written to them (AMD will ignore and discard). */
803 bool fEmulateLongModeSysEnterExit;
804 uint8_t au8Alignment0[7];
805
806 /** Cache of the nested-guest's VMCB fields that we modify in order to run the
807 * nested-guest using AMD-V. This will be restored on \#VMEXIT. */
808 SVMNESTEDVMCBCACHE NstGstVmcbCache;
809 } svm;
810
811 /** Event injection state. */
812 HMEVENT Event;
813
814 /** Current shadow paging mode for updating CR4.
815 * @todo move later (@bugref{9217}). */
816 PGMMODE enmShadowMode;
817 uint32_t u32TemporaryPadding;
818
819 /** The PAE PDPEs used with Nested Paging (only valid when
820 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
821 X86PDPE aPdpes[4];
822
823 /* These two comes because they are accessed from assembly and we don't
824 want to detail all the stats in the assembly version of this structure. */
825 STAMCOUNTER StatVmxWriteHostRip;
826 STAMCOUNTER StatVmxWriteHostRsp;
827 STAMCOUNTER StatVmxVmLaunch;
828 STAMCOUNTER StatVmxVmResume;
829
830 STAMPROFILEADV StatEntry;
831 STAMPROFILEADV StatPreExit;
832 STAMPROFILEADV StatExitHandling;
833 STAMPROFILEADV StatExitIO;
834 STAMPROFILEADV StatExitMovCRx;
835 STAMPROFILEADV StatExitXcptNmi;
836 STAMPROFILEADV StatExitVmentry;
837 STAMPROFILEADV StatImportGuestState;
838 STAMPROFILEADV StatExportGuestState;
839 STAMPROFILEADV StatLoadGuestFpuState;
840 STAMPROFILEADV StatInGC;
841 STAMPROFILEADV StatPoke;
842 STAMPROFILEADV StatSpinPoke;
843 STAMPROFILEADV StatSpinPokeFailed;
844
845 STAMCOUNTER StatInjectInterrupt;
846 STAMCOUNTER StatInjectXcpt;
847 STAMCOUNTER StatInjectReflect;
848 STAMCOUNTER StatInjectConvertDF;
849 STAMCOUNTER StatInjectInterpret;
850 STAMCOUNTER StatInjectReflectNPF;
851
852 STAMCOUNTER StatExitAll;
853 STAMCOUNTER StatNestedExitAll;
854 STAMCOUNTER StatExitShadowNM;
855 STAMCOUNTER StatExitGuestNM;
856 STAMCOUNTER StatExitShadowPF; /**< Misleading, currently used for MMIO \#PFs as well. */
857 STAMCOUNTER StatExitShadowPFEM;
858 STAMCOUNTER StatExitGuestPF;
859 STAMCOUNTER StatExitGuestUD;
860 STAMCOUNTER StatExitGuestSS;
861 STAMCOUNTER StatExitGuestNP;
862 STAMCOUNTER StatExitGuestTS;
863 STAMCOUNTER StatExitGuestOF;
864 STAMCOUNTER StatExitGuestGP;
865 STAMCOUNTER StatExitGuestDE;
866 STAMCOUNTER StatExitGuestDF;
867 STAMCOUNTER StatExitGuestBR;
868 STAMCOUNTER StatExitGuestAC;
869 STAMCOUNTER StatExitGuestACSplitLock;
870 STAMCOUNTER StatExitGuestDB;
871 STAMCOUNTER StatExitGuestMF;
872 STAMCOUNTER StatExitGuestBP;
873 STAMCOUNTER StatExitGuestXF;
874 STAMCOUNTER StatExitGuestXcpUnk;
875 STAMCOUNTER StatExitDRxWrite;
876 STAMCOUNTER StatExitDRxRead;
877 STAMCOUNTER StatExitCR0Read;
878 STAMCOUNTER StatExitCR2Read;
879 STAMCOUNTER StatExitCR3Read;
880 STAMCOUNTER StatExitCR4Read;
881 STAMCOUNTER StatExitCR8Read;
882 STAMCOUNTER StatExitCR0Write;
883 STAMCOUNTER StatExitCR2Write;
884 STAMCOUNTER StatExitCR3Write;
885 STAMCOUNTER StatExitCR4Write;
886 STAMCOUNTER StatExitCR8Write;
887 STAMCOUNTER StatExitRdmsr;
888 STAMCOUNTER StatExitWrmsr;
889 STAMCOUNTER StatExitClts;
890 STAMCOUNTER StatExitXdtrAccess;
891 STAMCOUNTER StatExitLmsw;
892 STAMCOUNTER StatExitIOWrite;
893 STAMCOUNTER StatExitIORead;
894 STAMCOUNTER StatExitIOStringWrite;
895 STAMCOUNTER StatExitIOStringRead;
896 STAMCOUNTER StatExitIntWindow;
897 STAMCOUNTER StatExitExtInt;
898 STAMCOUNTER StatExitHostNmiInGC;
899 STAMCOUNTER StatExitHostNmiInGCIpi;
900 STAMCOUNTER StatExitPreemptTimer;
901 STAMCOUNTER StatExitTprBelowThreshold;
902 STAMCOUNTER StatExitTaskSwitch;
903 STAMCOUNTER StatExitApicAccess;
904 STAMCOUNTER StatExitReasonNpf;
905
906 STAMCOUNTER StatNestedExitReasonNpf;
907
908 STAMCOUNTER StatFlushPage;
909 STAMCOUNTER StatFlushPageManual;
910 STAMCOUNTER StatFlushPhysPageManual;
911 STAMCOUNTER StatFlushTlb;
912 STAMCOUNTER StatFlushTlbNstGst;
913 STAMCOUNTER StatFlushTlbManual;
914 STAMCOUNTER StatFlushTlbWorldSwitch;
915 STAMCOUNTER StatNoFlushTlbWorldSwitch;
916 STAMCOUNTER StatFlushEntire;
917 STAMCOUNTER StatFlushAsid;
918 STAMCOUNTER StatFlushNestedPaging;
919 STAMCOUNTER StatFlushTlbInvlpgVirt;
920 STAMCOUNTER StatFlushTlbInvlpgPhys;
921 STAMCOUNTER StatTlbShootdown;
922 STAMCOUNTER StatTlbShootdownFlush;
923
924 STAMCOUNTER StatSwitchPendingHostIrq;
925 STAMCOUNTER StatSwitchTprMaskedIrq;
926 STAMCOUNTER StatSwitchGuestIrq;
927 STAMCOUNTER StatSwitchHmToR3FF;
928 STAMCOUNTER StatSwitchVmReq;
929 STAMCOUNTER StatSwitchPgmPoolFlush;
930 STAMCOUNTER StatSwitchDma;
931 STAMCOUNTER StatSwitchExitToR3;
932 STAMCOUNTER StatSwitchLongJmpToR3;
933 STAMCOUNTER StatSwitchMaxResumeLoops;
934 STAMCOUNTER StatSwitchHltToR3;
935 STAMCOUNTER StatSwitchApicAccessToR3;
936 STAMCOUNTER StatSwitchPreempt;
937 STAMCOUNTER StatSwitchNstGstVmexit;
938
939 STAMCOUNTER StatTscParavirt;
940 STAMCOUNTER StatTscOffset;
941 STAMCOUNTER StatTscIntercept;
942
943 STAMCOUNTER StatDRxArmed;
944 STAMCOUNTER StatDRxContextSwitch;
945 STAMCOUNTER StatDRxIoCheck;
946
947 STAMCOUNTER StatExportMinimal;
948 STAMCOUNTER StatExportFull;
949 STAMCOUNTER StatLoadGuestFpu;
950 STAMCOUNTER StatExportHostState;
951
952 STAMCOUNTER StatVmxCheckBadRmSelBase;
953 STAMCOUNTER StatVmxCheckBadRmSelLimit;
954 STAMCOUNTER StatVmxCheckBadRmSelAttr;
955 STAMCOUNTER StatVmxCheckBadV86SelBase;
956 STAMCOUNTER StatVmxCheckBadV86SelLimit;
957 STAMCOUNTER StatVmxCheckBadV86SelAttr;
958 STAMCOUNTER StatVmxCheckRmOk;
959 STAMCOUNTER StatVmxCheckBadSel;
960 STAMCOUNTER StatVmxCheckBadRpl;
961 STAMCOUNTER StatVmxCheckPmOk;
962
963 STAMCOUNTER StatVmxPreemptionRecalcingDeadline;
964 STAMCOUNTER StatVmxPreemptionRecalcingDeadlineExpired;
965 STAMCOUNTER StatVmxPreemptionReusingDeadline;
966 STAMCOUNTER StatVmxPreemptionReusingDeadlineExpired;
967
968#ifdef VBOX_WITH_STATISTICS
969 STAMCOUNTER aStatExitReason[MAX_EXITREASON_STAT];
970 STAMCOUNTER aStatNestedExitReason[MAX_EXITREASON_STAT];
971 STAMCOUNTER aStatInjectedIrqs[256];
972 STAMCOUNTER aStatInjectedXcpts[X86_XCPT_LAST + 1];
973#endif
974#ifdef HM_PROFILE_EXIT_DISPATCH
975 STAMPROFILEADV StatExitDispatch;
976#endif
977} HMCPU;
978/** Pointer to HM VMCPU instance data. */
979typedef HMCPU *PHMCPU;
980AssertCompileMemberAlignment(HMCPU, fCheckedTLBFlush, 4);
981AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8);
982AssertCompileMemberAlignment(HMCPU, vmx, 8);
983AssertCompileMemberAlignment(HMCPU, vmx.VmcsInfo, 8);
984AssertCompileMemberAlignment(HMCPU, vmx.VmcsInfoNstGst, 8);
985AssertCompileMemberAlignment(HMCPU, svm, 8);
986AssertCompileMemberAlignment(HMCPU, Event, 8);
987
988
989/**
990 * HM per-VCpu ring-0 only instance data.
991 */
992typedef struct HMR0PERVCPU
993{
994 /** World switch exit counter. */
995 uint32_t volatile cWorldSwitchExits;
996 /** TLB flush count. */
997 uint32_t cTlbFlushes;
998 /** The last CPU we were executing code on (NIL_RTCPUID for the first time). */
999 RTCPUID idLastCpu;
1000 /** The CPU ID of the CPU currently owning the VMCS. Set in
1001 * HMR0Enter and cleared in HMR0Leave. */
1002 RTCPUID idEnteredCpu;
1003 /** Current ASID in use by the VM. */
1004 uint32_t uCurrentAsid;
1005
1006 /** Set if we need to flush the TLB during the world switch. */
1007 bool fForceTLBFlush;
1008 /** Whether we've completed the inner HM leave function. */
1009 bool fLeaveDone;
1010 /** Whether we're using the hyper DR7 or guest DR7. */
1011 bool fUsingHyperDR7;
1012 /** Whether we are currently executing in the debug loop.
1013 * Mainly for assertions. */
1014 bool fUsingDebugLoop;
1015 /** Set if we using the debug loop and wish to intercept RDTSC. */
1016 bool fDebugWantRdTscExit;
1017 /** Set if XCR0 needs to be saved/restored when entering/exiting guest code
1018 * execution. */
1019 bool fLoadSaveGuestXcr0;
1020 /** Set if we need to clear the trap flag because of single stepping. */
1021 bool fClearTrapFlag;
1022
1023 bool afPadding1[1];
1024 /** World switcher flags (HM_WSF_XXX - was CPUMCTX::fWorldSwitcher in 6.1). */
1025 uint32_t fWorldSwitcher;
1026 /** The raw host TSC value from the last VM exit (set by HMR0A.asm). */
1027 uint64_t uTscExit;
1028
1029 /** VT-x data. */
1030 struct HMR0CPUVMX
1031 {
1032 /** Ring-0 pointer to the hardware-assisted VMX execution function. */
1033 PFNHMVMXSTARTVM pfnStartVm;
1034 /** Absolute TSC deadline. */
1035 uint64_t uTscDeadline;
1036 /** The deadline version number. */
1037 uint64_t uTscDeadlineVersion;
1038
1039 /** @name Guest information.
1040 * @{ */
1041 /** Guest VMCS information. */
1042 VMXVMCSINFO VmcsInfo;
1043 /** Nested-guest VMCS information. */
1044 VMXVMCSINFO VmcsInfoNstGst;
1045 /* Whether the nested-guest VMCS was the last current VMCS (authoritative copy).
1046 * @see HMCPU::vmx.fSwitchedToNstGstVmcsCopyForRing3 */
1047 bool fSwitchedToNstGstVmcs;
1048 bool afAlignment0[7];
1049 /** @} */
1050
1051 /** @name Host information.
1052 * @{ */
1053 /** Host LSTAR MSR to restore lazily while leaving VT-x. */
1054 uint64_t u64HostMsrLStar;
1055 /** Host STAR MSR to restore lazily while leaving VT-x. */
1056 uint64_t u64HostMsrStar;
1057 /** Host SF_MASK MSR to restore lazily while leaving VT-x. */
1058 uint64_t u64HostMsrSfMask;
1059 /** Host KernelGS-Base MSR to restore lazily while leaving VT-x. */
1060 uint64_t u64HostMsrKernelGsBase;
1061 /** The mask of lazy MSRs swap/restore state, see VMX_LAZY_MSRS_XXX. */
1062 uint32_t fLazyMsrs;
1063 /** Whether the host MSR values are up-to-date in the auto-load/store MSR area. */
1064 bool fUpdatedHostAutoMsrs;
1065 /** Alignment. */
1066 uint8_t au8Alignment0[3];
1067 /** Which host-state bits to restore before being preempted, see
1068 * VMX_RESTORE_HOST_XXX. */
1069 uint32_t fRestoreHostFlags;
1070 /** Alignment. */
1071 uint32_t u32Alignment0;
1072 /** The host-state restoration structure. */
1073 VMXRESTOREHOST RestoreHost;
1074 /** @} */
1075 } vmx;
1076
1077 /** SVM data. */
1078 struct HMR0CPUSVM
1079 {
1080 /** Ring 0 handlers for VT-x. */
1081 PFNHMSVMVMRUN pfnVMRun;
1082
1083 /** Physical address of the host VMCB which holds additional host-state. */
1084 RTHCPHYS HCPhysVmcbHost;
1085 /** R0 memory object for the host VMCB which holds additional host-state. */
1086 RTR0MEMOBJ hMemObjVmcbHost;
1087
1088 /** Physical address of the guest VMCB. */
1089 RTHCPHYS HCPhysVmcb;
1090 /** R0 memory object for the guest VMCB. */
1091 RTR0MEMOBJ hMemObjVmcb;
1092 /** Pointer to the guest VMCB. */
1093 R0PTRTYPE(PSVMVMCB) pVmcb;
1094
1095 /** Physical address of the MSR bitmap (8 KB). */
1096 RTHCPHYS HCPhysMsrBitmap;
1097 /** R0 memory object for the MSR bitmap (8 KB). */
1098 RTR0MEMOBJ hMemObjMsrBitmap;
1099 /** Pointer to the MSR bitmap. */
1100 R0PTRTYPE(void *) pvMsrBitmap;
1101
1102 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
1103 * we should check if the VTPR changed on every VM-exit. */
1104 bool fSyncVTpr;
1105 bool afAlignment[7];
1106
1107 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
1108 uint64_t u64HostTscAux;
1109
1110 /** For saving stack space, the disassembler state is allocated here
1111 * instead of on the stack. */
1112 DISCPUSTATE DisState;
1113 } svm;
1114} HMR0PERVCPU;
1115/** Pointer to HM ring-0 VMCPU instance data. */
1116typedef HMR0PERVCPU *PHMR0PERVCPU;
1117AssertCompileMemberAlignment(HMR0PERVCPU, cWorldSwitchExits, 4);
1118AssertCompileMemberAlignment(HMR0PERVCPU, fForceTLBFlush, 4);
1119AssertCompileMemberAlignment(HMR0PERVCPU, vmx.RestoreHost, 8);
1120
1121
1122/** @name HM_WSF_XXX - @bugref{9453}, @bugref{9087}
1123 * @note If you change these values don't forget to update the assembly
1124 * defines as well!
1125 * @{ */
1126/** Touch IA32_PRED_CMD.IBPB on VM exit. */
1127#define HM_WSF_IBPB_EXIT RT_BIT_32(0)
1128/** Touch IA32_PRED_CMD.IBPB on VM entry. */
1129#define HM_WSF_IBPB_ENTRY RT_BIT_32(1)
1130/** Touch IA32_FLUSH_CMD.L1D on VM entry. */
1131#define HM_WSF_L1D_ENTRY RT_BIT_32(2)
1132/** Flush MDS buffers on VM entry. */
1133#define HM_WSF_MDS_ENTRY RT_BIT_32(3)
1134
1135/** Touch IA32_FLUSH_CMD.L1D on VM scheduling. */
1136#define HM_WSF_L1D_SCHED RT_BIT_32(16)
1137/** Flush MDS buffers on VM scheduling. */
1138#define HM_WSF_MDS_SCHED RT_BIT_32(17)
1139/** @} */
1140
1141
1142#ifdef IN_RING0
1143extern bool g_fHmVmxSupported;
1144extern uint32_t g_fHmHostKernelFeatures;
1145extern uint32_t g_uHmMaxAsid;
1146extern bool g_fHmVmxUsePreemptTimer;
1147extern uint8_t g_cHmVmxPreemptTimerShift;
1148extern bool g_fHmVmxSupportsVmcsEfer;
1149extern uint64_t g_uHmVmxHostCr4;
1150extern uint64_t g_uHmVmxHostMsrEfer;
1151extern uint64_t g_uHmVmxHostSmmMonitorCtl;
1152extern bool g_fHmSvmSupported;
1153extern uint32_t g_uHmSvmRev;
1154extern uint32_t g_fHmSvmFeatures;
1155
1156extern SUPHWVIRTMSRS g_HmMsrs;
1157
1158
1159VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void);
1160VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPUCC pVCpu);
1161
1162# ifdef VBOX_STRICT
1163# define HM_DUMP_REG_FLAGS_GPRS RT_BIT(0)
1164# define HM_DUMP_REG_FLAGS_FPU RT_BIT(1)
1165# define HM_DUMP_REG_FLAGS_MSRS RT_BIT(2)
1166# define HM_DUMP_REG_FLAGS_ALL (HM_DUMP_REG_FLAGS_GPRS | HM_DUMP_REG_FLAGS_FPU | HM_DUMP_REG_FLAGS_MSRS)
1167
1168VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPUCC pVCpu, uint32_t fFlags);
1169VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
1170# endif
1171
1172DECLASM(void) hmR0MdsClear(void);
1173#endif /* IN_RING0 */
1174
1175
1176/** @addtogroup grp_hm_int_svm SVM Internal
1177 * @{ */
1178VMM_INT_DECL(int) hmEmulateSvmMovTpr(PVMCC pVM, PVMCPUCC pVCpu);
1179
1180/**
1181 * Prepares for and executes VMRUN (64-bit register context).
1182 *
1183 * @returns VBox status code (no informational stuff).
1184 * @param pVM The cross context VM structure. (Not used.)
1185 * @param pVCpu The cross context virtual CPU structure.
1186 * @param HCPhyspVMCB Physical address of the VMCB.
1187 *
1188 * @remarks With spectre mitigations and the usual need for speed (/ micro
1189 * optimizations), we have a bunch of variations of this code depending
1190 * on a few precoditions. In release builds, the code is entirely
1191 * without conditionals. Debug builds have a couple of assertions that
1192 * shouldn't ever be triggered.
1193 *
1194 * @{
1195 */
1196DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1197DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1198DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1199DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1200DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1201DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1202DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1203DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1204/** @} */
1205
1206/** @} */
1207
1208
1209/** @addtogroup grp_hm_int_vmx VMX Internal
1210 * @{ */
1211VMM_INT_DECL(PVMXVMCSINFOSHARED) hmGetVmxActiveVmcsInfoShared(PVMCPUCC pVCpu);
1212
1213/**
1214 * Used on platforms with poor inline assembly support to retrieve all the
1215 * info from the CPU and put it in the @a pRestoreHost structure.
1216 */
1217DECLASM(void) hmR0VmxExportHostSegmentRegsAsmHlp(PVMXRESTOREHOST pRestoreHost, bool fHaveFsGsBase);
1218
1219/**
1220 * Restores some host-state fields that need not be done on every VM-exit.
1221 *
1222 * @returns VBox status code.
1223 * @param fRestoreHostFlags Flags of which host registers needs to be
1224 * restored.
1225 * @param pRestoreHost Pointer to the host-restore structure.
1226 */
1227DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);
1228
1229/**
1230 * VMX StartVM functions.
1231 *
1232 * @returns VBox status code (no informational stuff).
1233 * @param pVmcsInfo Pointer to the VMCS info (for cached host RIP and RSP).
1234 * @param pVCpu Pointer to the cross context per-CPU structure of the
1235 * calling EMT.
1236 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
1237 *
1238 * @remarks With spectre mitigations and the usual need for speed (/ micro
1239 * optimizations), we have a bunch of variations of this code depending
1240 * on a few precoditions. In release builds, the code is entirely
1241 * without conditionals. Debug builds have a couple of assertions that
1242 * shouldn't ever be triggered.
1243 *
1244 * @{
1245 */
1246DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1247DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1248DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1249DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1250DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1251DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1252DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1253DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1254DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1255DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1256DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1257DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1258DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1259DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1260DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1261DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1262DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1263DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1264DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1265DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1266DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1267DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1268DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1269DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1270DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1271DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1272DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1273DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1274DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1275DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1276DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1277DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1278/** @} */
1279
1280/** @} */
1281
1282/** @} */
1283
1284RT_C_DECLS_END
1285
1286#endif /* !VMM_INCLUDED_SRC_include_HMInternal_h */
1287
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette