VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 97405

Last change on this file since 97405 was 97069, checked in by vboxsync, 2 years ago

VMM/HMVMXR0: Working on streamlining CPU state importing from the VMCS. This does cause quite some code bloat (release linux from 93950 to 132120 text bytes), but it is hopefully worth it. This should also provide some basis for addressing the @todo in nemR3DarwinHandleExitCommon (NEM/darwin) where the code imports the entire state for every exit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 56.3 KB
Line 
1/* $Id: HMInternal.h 97069 2022-10-10 15:03:10Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_HMInternal_h
29#define VMM_INCLUDED_SRC_include_HMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/cdefs.h>
35#include <VBox/types.h>
36#include <VBox/vmm/stam.h>
37#include <VBox/dis.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/hm_vmx.h>
40#include <VBox/vmm/hm_svm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/cpum.h>
43#include <VBox/vmm/trpm.h>
44#include <iprt/memobj.h>
45#include <iprt/cpuset.h>
46#include <iprt/mp.h>
47#include <iprt/avl.h>
48#include <iprt/string.h>
49
50#include "VMXInternal.h"
51#include "SVMInternal.h"
52
53#if HC_ARCH_BITS == 32
54# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
55#endif
56
57/** @def HM_PROFILE_EXIT_DISPATCH
58 * Enables profiling of the VM exit handler dispatching. */
59#if 0 || defined(DOXYGEN_RUNNING)
60# define HM_PROFILE_EXIT_DISPATCH
61#endif
62
63RT_C_DECLS_BEGIN
64
65
66/** @defgroup grp_hm_int Internal
67 * @ingroup grp_hm
68 * @internal
69 * @{
70 */
71
72/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
73#define HM_EPT_IDENTITY_PG_TABLE_SIZE HOST_PAGE_SIZE
74/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
75#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * X86_PAGE_SIZE + 1)
76/** Total guest mapped memory needed. */
77#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
78
79
80/** @name Macros for enabling and disabling preemption.
81 * These are really just for hiding the RTTHREADPREEMPTSTATE and asserting that
82 * preemption has already been disabled when there is no context hook.
83 * @{ */
84#ifdef VBOX_STRICT
85# define HM_DISABLE_PREEMPT(a_pVCpu) \
86 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
87 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD) || VMMR0ThreadCtxHookIsEnabled((a_pVCpu))); \
88 RTThreadPreemptDisable(&PreemptStateInternal)
89#else
90# define HM_DISABLE_PREEMPT(a_pVCpu) \
91 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
92 RTThreadPreemptDisable(&PreemptStateInternal)
93#endif /* VBOX_STRICT */
94#define HM_RESTORE_PREEMPT() do { RTThreadPreemptRestore(&PreemptStateInternal); } while(0)
95/** @} */
96
97
98/** @name HM saved state versions.
99 * @{
100 */
101#define HM_SAVED_STATE_VERSION HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT
102#define HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT 6
103#define HM_SAVED_STATE_VERSION_TPR_PATCHING 5
104#define HM_SAVED_STATE_VERSION_NO_TPR_PATCHING 4
105#define HM_SAVED_STATE_VERSION_2_0_X 3
106/** @} */
107
108
109/**
110 * HM physical (host) CPU information.
111 */
112typedef struct HMPHYSCPU
113{
114 /** The CPU ID. */
115 RTCPUID idCpu;
116 /** The VM_HSAVE_AREA (AMD-V) / VMXON region (Intel) memory backing. */
117 RTR0MEMOBJ hMemObj;
118 /** The physical address of the first page in hMemObj (it's a
119 * physcially contigous allocation if it spans multiple pages). */
120 RTHCPHYS HCPhysMemObj;
121 /** The address of the memory (for pfnEnable). */
122 void *pvMemObj;
123 /** Current ASID (AMD-V) / VPID (Intel). */
124 uint32_t uCurrentAsid;
125 /** TLB flush count. */
126 uint32_t cTlbFlushes;
127 /** Whether to flush each new ASID/VPID before use. */
128 bool fFlushAsidBeforeUse;
129 /** Configured for VT-x or AMD-V. */
130 bool fConfigured;
131 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
132 bool fIgnoreAMDVInUseError;
133 /** Whether CR4.VMXE was already enabled prior to us enabling it. */
134 bool fVmxeAlreadyEnabled;
135 /** In use by our code. (for power suspend) */
136 bool volatile fInUse;
137#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
138 /** Nested-guest union (put data common to SVM/VMX outside the union). */
139 union
140 {
141 /** Nested-guest SVM data. */
142 struct
143 {
144 /** The active nested-guest MSR permission bitmap memory backing. */
145 RTR0MEMOBJ hNstGstMsrpm;
146 /** The physical address of the first page in hNstGstMsrpm (physcially
147 * contiguous allocation). */
148 RTHCPHYS HCPhysNstGstMsrpm;
149 /** The address of the active nested-guest MSRPM. */
150 void *pvNstGstMsrpm;
151 } svm;
152 /** @todo Nested-VMX. */
153 } n;
154#endif
155} HMPHYSCPU;
156/** Pointer to HMPHYSCPU struct. */
157typedef HMPHYSCPU *PHMPHYSCPU;
158/** Pointer to a const HMPHYSCPU struct. */
159typedef const HMPHYSCPU *PCHMPHYSCPU;
160
161/**
162 * TPR-instruction type.
163 */
164typedef enum
165{
166 HMTPRINSTR_INVALID,
167 HMTPRINSTR_READ,
168 HMTPRINSTR_READ_SHR4,
169 HMTPRINSTR_WRITE_REG,
170 HMTPRINSTR_WRITE_IMM,
171 HMTPRINSTR_JUMP_REPLACEMENT,
172 /** The usual 32-bit paranoia. */
173 HMTPRINSTR_32BIT_HACK = 0x7fffffff
174} HMTPRINSTR;
175
176/**
177 * TPR patch information.
178 */
179typedef struct
180{
181 /** The key is the address of patched instruction. (32 bits GC ptr) */
182 AVLOU32NODECORE Core;
183 /** Original opcode. */
184 uint8_t aOpcode[16];
185 /** Instruction size. */
186 uint32_t cbOp;
187 /** Replacement opcode. */
188 uint8_t aNewOpcode[16];
189 /** Replacement instruction size. */
190 uint32_t cbNewOp;
191 /** Instruction type. */
192 HMTPRINSTR enmType;
193 /** Source operand. */
194 uint32_t uSrcOperand;
195 /** Destination operand. */
196 uint32_t uDstOperand;
197 /** Number of times the instruction caused a fault. */
198 uint32_t cFaults;
199 /** Patch address of the jump replacement. */
200 RTGCPTR32 pJumpTarget;
201} HMTPRPATCH;
202/** Pointer to HMTPRPATCH. */
203typedef HMTPRPATCH *PHMTPRPATCH;
204/** Pointer to a const HMTPRPATCH. */
205typedef const HMTPRPATCH *PCHMTPRPATCH;
206
207
208/**
209 * Makes a HMEXITSTAT::uKey value from a program counter and an exit code.
210 *
211 * @returns 64-bit key
212 * @param a_uPC The RIP + CS.BASE value of the exit.
213 * @param a_uExit The exit code.
214 * @todo Add CPL?
215 */
216#define HMEXITSTAT_MAKE_KEY(a_uPC, a_uExit) (((a_uPC) & UINT64_C(0x0000ffffffffffff)) | (uint64_t)(a_uExit) << 48)
217
218typedef struct HMEXITINFO
219{
220 /** See HMEXITSTAT_MAKE_KEY(). */
221 uint64_t uKey;
222 /** Number of recent hits (depreciates with time). */
223 uint32_t volatile cHits;
224 /** The age + lock. */
225 uint16_t volatile uAge;
226 /** Action or action table index. */
227 uint16_t iAction;
228} HMEXITINFO;
229AssertCompileSize(HMEXITINFO, 16); /* Lots of these guys, so don't add any unnecessary stuff! */
230
231typedef struct HMEXITHISTORY
232{
233 /** The exit timestamp. */
234 uint64_t uTscExit;
235 /** The index of the corresponding HMEXITINFO entry.
236 * UINT32_MAX if none (too many collisions, race, whatever). */
237 uint32_t iExitInfo;
238 /** Figure out later, needed for padding now. */
239 uint32_t uSomeClueOrSomething;
240} HMEXITHISTORY;
241
242/**
243 * Switcher function, HC to the special 64-bit RC.
244 *
245 * @param pVM The cross context VM structure.
246 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
247 * @returns Return code indicating the action to take.
248 */
249typedef DECLCALLBACKTYPE(int, FNHMSWITCHERHC,(PVM pVM, uint32_t offCpumVCpu));
250/** Pointer to switcher function. */
251typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
252
253
254/**
255 * HM VM Instance data.
256 * Changes to this must checked against the padding of the hm union in VM!
257 */
258typedef struct HM
259{
260 /** Set when the debug facility has breakpoints/events enabled that requires
261 * us to use the debug execution loop in ring-0. */
262 bool fUseDebugLoop;
263 /** Set when TPR patching is allowed. */
264 bool fTprPatchingAllowed;
265 /** Set when TPR patching is active. */
266 bool fTprPatchingActive;
267 /** Alignment padding. */
268 bool afAlignment1[5];
269
270 struct
271 {
272 /** Set by the ring-0 side of HM to indicate VMX is supported by the CPU. */
273 bool fSupported;
274 /** Set when we've enabled VMX. */
275 bool fEnabled;
276 /** The shift mask employed by the VMX-Preemption timer (set by ring-0). */
277 uint8_t cPreemptTimerShift;
278 bool fAlignment1;
279
280 /** @name Configuration (gets copied if problematic)
281 * @{ */
282 /** Set if Last Branch Record (LBR) is enabled. */
283 bool fLbrCfg;
284 /** Set if VT-x VPID is allowed. */
285 bool fAllowVpid;
286 /** Set if unrestricted guest execution is in use (real and protected mode
287 * without paging). */
288 bool fUnrestrictedGuestCfg;
289 /** Set if the preemption timer should be used if available. Ring-0
290 * quietly clears this if the hardware doesn't support the preemption timer. */
291 bool fUsePreemptTimerCfg;
292 /** @} */
293
294 /** Pause-loop exiting (PLE) gap in ticks. */
295 uint32_t cPleGapTicks;
296 /** Pause-loop exiting (PLE) window in ticks. */
297 uint32_t cPleWindowTicks;
298
299 /** Virtual address of the TSS page used for real mode emulation. */
300 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
301 /** Virtual address of the identity page table used for real mode and protected
302 * mode without paging emulation in EPT mode. */
303 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
304 } vmx;
305
306 struct
307 {
308 /** Set by the ring-0 side of HM to indicate SVM is supported by the CPU. */
309 bool fSupported;
310 /** Set when we've enabled SVM. */
311 bool fEnabled;
312 /** Set when the hack to ignore VERR_SVM_IN_USE is active.
313 * @todo Safe? */
314 bool fIgnoreInUseError;
315 /** Whether to use virtualized VMSAVE/VMLOAD feature. */
316 bool fVirtVmsaveVmload;
317 /** Whether to use virtual GIF feature. */
318 bool fVGif;
319 /** Whether to use LBR virtualization feature. */
320 bool fLbrVirt;
321 bool afAlignment1[2];
322
323 /** Pause filter counter. */
324 uint16_t cPauseFilter;
325 /** Pause filter treshold in ticks. */
326 uint16_t cPauseFilterThresholdTicks;
327 uint32_t u32Alignment2;
328 } svm;
329
330 /** AVL tree with all patches (active or disabled) sorted by guest instruction address.
331 * @todo For @bugref{9217} this AVL tree must be eliminated and instead
332 * sort aPatches by address and do a safe binary search on it. */
333 AVLOU32TREE PatchTree;
334 uint32_t cPatches;
335 HMTPRPATCH aPatches[64];
336
337 /** Guest allocated memory for patching purposes. */
338 RTGCPTR pGuestPatchMem;
339 /** Current free pointer inside the patch block. */
340 RTGCPTR pFreeGuestPatchMem;
341 /** Size of the guest patch memory block. */
342 uint32_t cbGuestPatchMem;
343 uint32_t u32Alignment2;
344
345 /** For ring-3 use only. */
346 struct
347 {
348 /** Last recorded error code during HM ring-0 init. */
349 int32_t rcInit;
350 uint32_t u32Alignment3;
351
352 /** Maximum ASID allowed.
353 * This is mainly for the release log. */
354 uint32_t uMaxAsid;
355 /** World switcher flags (HM_WSF_XXX) for the release log. */
356 uint32_t fWorldSwitcher;
357
358 struct
359 {
360 /** Set if VPID is supported (ring-3 copy). */
361 bool fVpid;
362 /** Whether the CPU supports VMCS fields for swapping EFER (set by ring-0 VMX
363 * init, for logging). */
364 bool fSupportsVmcsEfer;
365 /** Whether to use VMCS shadowing. */
366 bool fUseVmcsShadowing;
367 bool fAlignment2;
368
369 /** Host CR4 value (set by ring-0 VMX init, for logging). */
370 uint64_t u64HostCr4;
371 /** Host SMM monitor control (set by ring-0 VMX init, for logging). */
372 uint64_t u64HostSmmMonitorCtl;
373 /** Host EFER value (set by ring-0 VMX init, for logging and guest NX). */
374 uint64_t u64HostMsrEfer;
375 /** Host IA32_FEATURE_CONTROL MSR (set by ring-0 VMX init, for logging). */
376 uint64_t u64HostFeatCtrl;
377
378 /** The first valid host LBR branch-from-IP stack range. */
379 uint32_t idLbrFromIpMsrFirst;
380 /** The last valid host LBR branch-from-IP stack range. */
381 uint32_t idLbrFromIpMsrLast;
382
383 /** The first valid host LBR branch-to-IP stack range. */
384 uint32_t idLbrToIpMsrFirst;
385 /** The last valid host LBR branch-to-IP stack range. */
386 uint32_t idLbrToIpMsrLast;
387
388 /** Host-physical address for a failing VMXON instruction (for diagnostics, ring-3). */
389 RTHCPHYS HCPhysVmxEnableError;
390 /** VMX MSR values (only for ring-3 consumption). */
391 VMXMSRS Msrs;
392
393 /** Tagged-TLB flush type (only for ring-3 consumption). */
394 VMXTLBFLUSHTYPE enmTlbFlushType;
395 /** Flush type to use for INVEPT (only for ring-3 consumption). */
396 VMXTLBFLUSHEPT enmTlbFlushEpt;
397 /** Flush type to use for INVVPID (only for ring-3 consumption). */
398 VMXTLBFLUSHVPID enmTlbFlushVpid;
399 } vmx;
400
401 struct
402 {
403 /** SVM revision. */
404 uint32_t u32Rev;
405 /** SVM feature bits from cpuid 0x8000000a, ring-3 copy. */
406 uint32_t fFeatures;
407 /** HWCR MSR (for diagnostics). */
408 uint64_t u64MsrHwcr;
409 } svm;
410 } ForR3;
411
412 /** @name Configuration not used (much) after VM setup
413 * @{ */
414 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
415 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
416 uint32_t cMaxResumeLoopsCfg;
417 /** Set if nested paging is enabled.
418 * Config value that is copied to HMR0PERVM::fNestedPaging on setup. */
419 bool fNestedPagingCfg;
420 /** Set if large pages are enabled (requires nested paging).
421 * Config only, passed on the PGM where it really belongs.
422 * @todo move to PGM */
423 bool fLargePages;
424 /** Set if we can support 64-bit guests or not.
425 * Config value that is copied to HMR0PERVM::fAllow64BitGuests on setup. */
426 bool fAllow64BitGuestsCfg;
427 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
428 bool fGlobalInit;
429 /** Set if hardware APIC virtualization is enabled.
430 * @todo Not really used by HM, move to APIC where it's actually used. */
431 bool fVirtApicRegs;
432 /** Set if posted interrupt processing is enabled.
433 * @todo Not really used by HM, move to APIC where it's actually used. */
434 bool fPostedIntrs;
435 /** VM needs workaround for missing TLB flush in OS/2, see ticketref:20625.
436 * @note Currently only heeded by AMD-V. */
437 bool fMissingOS2TlbFlushWorkaround;
438 /** @} */
439
440 /** @name Processed into HMR0PERVCPU::fWorldSwitcher by ring-0 on VM init.
441 * @{ */
442 /** Set if indirect branch prediction barrier on VM exit. */
443 bool fIbpbOnVmExit;
444 /** Set if indirect branch prediction barrier on VM entry. */
445 bool fIbpbOnVmEntry;
446 /** Set if level 1 data cache should be flushed on VM entry. */
447 bool fL1dFlushOnVmEntry;
448 /** Set if level 1 data cache should be flushed on EMT scheduling. */
449 bool fL1dFlushOnSched;
450 /** Set if MDS related buffers should be cleared on VM entry. */
451 bool fMdsClearOnVmEntry;
452 /** Set if MDS related buffers should be cleared on EMT scheduling. */
453 bool fMdsClearOnSched;
454 /** Set if host manages speculation control settings.
455 * @todo doesn't do anything ... */
456 bool fSpecCtrlByHost;
457 /** @} */
458
459 /** Set when we've finalized the VMX / SVM initialization in ring-3
460 * (hmR3InitFinalizeR0Intel / hmR3InitFinalizeR0Amd). */
461 bool fInitialized;
462
463 bool afAlignment2[5];
464
465 STAMCOUNTER StatTprPatchSuccess;
466 STAMCOUNTER StatTprPatchFailure;
467 STAMCOUNTER StatTprReplaceSuccessCr8;
468 STAMCOUNTER StatTprReplaceSuccessVmc;
469 STAMCOUNTER StatTprReplaceFailure;
470} HM;
471/** Pointer to HM VM instance data. */
472typedef HM *PHM;
473AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
474AssertCompileMemberAlignment(HM, vmx, 8);
475AssertCompileMemberAlignment(HM, svm, 8);
476AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
477AssertCompile(RTASSERT_OFFSET_OF(HM, PatchTree) <= 64); /* First cache line has the essentials for both VT-x and SVM operation. */
478
479
480/**
481 * Per-VM ring-0 instance data for HM.
482 */
483typedef struct HMR0PERVM
484{
485 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
486 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
487 uint32_t cMaxResumeLoops;
488
489 /** Set if nested paging is enabled. */
490 bool fNestedPaging;
491 /** Set if we can support 64-bit guests or not. */
492 bool fAllow64BitGuests;
493 bool afAlignment1[1];
494
495 /** AMD-V specific data. */
496 struct HMR0SVMVM
497 {
498 /** Set if erratum 170 affects the AMD cpu. */
499 bool fAlwaysFlushTLB;
500 } svm;
501
502 /** VT-x specific data. */
503 struct HMR0VMXVM
504 {
505 /** Set if unrestricted guest execution is in use (real and protected mode
506 * without paging). */
507 bool fUnrestrictedGuest;
508 /** Set if the preemption timer is in use. */
509 bool fUsePreemptTimer;
510 /** Whether to use VMCS shadowing. */
511 bool fUseVmcsShadowing;
512 /** Set if Last Branch Record (LBR) is enabled. */
513 bool fLbr;
514 bool afAlignment2[3];
515
516 /** Set if VPID is supported (copy in HM::vmx::fVpidForRing3). */
517 bool fVpid;
518 /** Tagged-TLB flush type. */
519 VMXTLBFLUSHTYPE enmTlbFlushType;
520 /** Flush type to use for INVEPT. */
521 VMXTLBFLUSHEPT enmTlbFlushEpt;
522 /** Flush type to use for INVVPID. */
523 VMXTLBFLUSHVPID enmTlbFlushVpid;
524
525 /** The host LBR TOS (top-of-stack) MSR id. */
526 uint32_t idLbrTosMsr;
527
528 /** The first valid host LBR branch-from-IP stack range. */
529 uint32_t idLbrFromIpMsrFirst;
530 /** The last valid host LBR branch-from-IP stack range. */
531 uint32_t idLbrFromIpMsrLast;
532
533 /** The first valid host LBR branch-to-IP stack range. */
534 uint32_t idLbrToIpMsrFirst;
535 /** The last valid host LBR branch-to-IP stack range. */
536 uint32_t idLbrToIpMsrLast;
537
538 /** Pointer to the VMREAD bitmap. */
539 R0PTRTYPE(void *) pvVmreadBitmap;
540 /** Pointer to the VMWRITE bitmap. */
541 R0PTRTYPE(void *) pvVmwriteBitmap;
542
543 /** Pointer to the shadow VMCS read-only fields array. */
544 R0PTRTYPE(uint32_t *) paShadowVmcsRoFields;
545 /** Pointer to the shadow VMCS read/write fields array. */
546 R0PTRTYPE(uint32_t *) paShadowVmcsFields;
547 /** Number of elements in the shadow VMCS read-only fields array. */
548 uint32_t cShadowVmcsRoFields;
549 /** Number of elements in the shadow VMCS read-write fields array. */
550 uint32_t cShadowVmcsFields;
551
552 /** Host-physical address of the APIC-access page. */
553 RTHCPHYS HCPhysApicAccess;
554 /** Host-physical address of the VMREAD bitmap. */
555 RTHCPHYS HCPhysVmreadBitmap;
556 /** Host-physical address of the VMWRITE bitmap. */
557 RTHCPHYS HCPhysVmwriteBitmap;
558
559#ifdef VBOX_WITH_CRASHDUMP_MAGIC
560 /** Host-physical address of the crash-dump scratch area. */
561 RTHCPHYS HCPhysScratch;
562 /** Pointer to the crash-dump scratch bitmap. */
563 R0PTRTYPE(uint8_t *) pbScratch;
564#endif
565
566 /** Ring-0 memory object for per-VM VMX structures. */
567 RTR0MEMOBJ hMemObj;
568 /** Virtual address of the APIC-access page (not used). */
569 R0PTRTYPE(uint8_t *) pbApicAccess;
570 } vmx;
571} HMR0PERVM;
572/** Pointer to HM's per-VM ring-0 instance data. */
573typedef HMR0PERVM *PHMR0PERVM;
574
575
576/** @addtogroup grp_hm_int_svm SVM Internal
577 * @{ */
578/** SVM VMRun function, see SVMR0VMRun(). */
579typedef DECLCALLBACKTYPE(int, FNHMSVMVMRUN,(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB));
580/** Pointer to a SVM VMRun function. */
581typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
582
583/**
584 * SVM nested-guest VMCB cache.
585 *
586 * Contains VMCB fields from the nested-guest VMCB before they're modified by
587 * SVM R0 code for hardware-assisted SVM execution of a nested-guest.
588 *
589 * A VMCB field needs to be cached when it needs to be modified for execution using
590 * hardware-assisted SVM and any of the following are true:
591 * - If the original field needs to be inspected during execution of the
592 * nested-guest or \#VMEXIT processing.
593 * - If the field is written back to memory on \#VMEXIT by the physical CPU.
594 *
595 * A VMCB field needs to be restored only when the field is written back to
596 * memory on \#VMEXIT by the physical CPU and thus would be visible to the
597 * guest.
598 *
599 * @remarks Please update hmR3InfoSvmNstGstVmcbCache() when changes are made to
600 * this structure.
601 */
602typedef struct SVMNESTEDVMCBCACHE
603{
604 /** Cache of CRX read intercepts. */
605 uint16_t u16InterceptRdCRx;
606 /** Cache of CRX write intercepts. */
607 uint16_t u16InterceptWrCRx;
608 /** Cache of DRX read intercepts. */
609 uint16_t u16InterceptRdDRx;
610 /** Cache of DRX write intercepts. */
611 uint16_t u16InterceptWrDRx;
612
613 /** Cache of the pause-filter threshold. */
614 uint16_t u16PauseFilterThreshold;
615 /** Cache of the pause-filter count. */
616 uint16_t u16PauseFilterCount;
617
618 /** Cache of exception intercepts. */
619 uint32_t u32InterceptXcpt;
620 /** Cache of control intercepts. */
621 uint64_t u64InterceptCtrl;
622
623 /** Cache of the TSC offset. */
624 uint64_t u64TSCOffset;
625
626 /** Cache of V_INTR_MASKING bit. */
627 bool fVIntrMasking;
628 /** Cache of the nested-paging bit. */
629 bool fNestedPaging;
630 /** Cache of the LBR virtualization bit. */
631 bool fLbrVirt;
632 /** Whether the VMCB is cached by HM. */
633 bool fCacheValid;
634 /** Alignment. */
635 bool afPadding0[4];
636} SVMNESTEDVMCBCACHE;
637/** Pointer to the SVMNESTEDVMCBCACHE structure. */
638typedef SVMNESTEDVMCBCACHE *PSVMNESTEDVMCBCACHE;
639/** Pointer to a const SVMNESTEDVMCBCACHE structure. */
640typedef const SVMNESTEDVMCBCACHE *PCSVMNESTEDVMCBCACHE;
641AssertCompileSizeAlignment(SVMNESTEDVMCBCACHE, 8);
642
643/** @} */
644
645
646/** @addtogroup grp_hm_int_vmx VMX Internal
647 * @{ */
648
649/** @name Host-state restoration flags.
650 * @note If you change these values don't forget to update the assembly
651 * defines as well!
652 * @{
653 */
654#define VMX_RESTORE_HOST_SEL_DS RT_BIT(0)
655#define VMX_RESTORE_HOST_SEL_ES RT_BIT(1)
656#define VMX_RESTORE_HOST_SEL_FS RT_BIT(2)
657#define VMX_RESTORE_HOST_SEL_GS RT_BIT(3)
658#define VMX_RESTORE_HOST_SEL_TR RT_BIT(4)
659#define VMX_RESTORE_HOST_GDTR RT_BIT(5)
660#define VMX_RESTORE_HOST_IDTR RT_BIT(6)
661#define VMX_RESTORE_HOST_GDT_READ_ONLY RT_BIT(7)
662#define VMX_RESTORE_HOST_GDT_NEED_WRITABLE RT_BIT(8)
663#define VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE RT_BIT(9)
664/**
665 * This _must_ be the top most bit, so that we can easily check that it and
666 * something else is set w/o having to do two checks like this:
667 * @code
668 * if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
669 * && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
670 * @endcode
671 * Instead we can then do:
672 * @code
673 * if (pVCpu->hm.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
674 * @endcode
675 */
676#define VMX_RESTORE_HOST_REQUIRED RT_BIT(10)
677/** @} */
678
679/**
680 * Host-state restoration structure.
681 *
682 * This holds host-state fields that require manual restoration.
683 * Assembly version found in HMInternal.mac (should be automatically verified).
684 */
685typedef struct VMXRESTOREHOST
686{
687 RTSEL uHostSelDS; /**< 0x00 */
688 RTSEL uHostSelES; /**< 0x02 */
689 RTSEL uHostSelFS; /**< 0x04 */
690 X86XDTR64 HostGdtr; /**< 0x06 - should be aligned by its 64-bit member. */
691 RTSEL uHostSelGS; /**< 0x10 */
692 RTSEL uHostSelTR; /**< 0x12 */
693 RTSEL uHostSelSS; /**< 0x14 - not restored, just for fetching */
694 X86XDTR64 HostGdtrRw; /**< 0x16 - should be aligned by its 64-bit member. */
695 RTSEL uHostSelCS; /**< 0x20 - not restored, just for fetching */
696 uint8_t abPadding1[4]; /**< 0x22 */
697 X86XDTR64 HostIdtr; /**< 0x26 - should be aligned by its 64-bit member. */
698 uint64_t uHostFSBase; /**< 0x30 */
699 uint64_t uHostGSBase; /**< 0x38 */
700} VMXRESTOREHOST;
701/** Pointer to VMXRESTOREHOST. */
702typedef VMXRESTOREHOST *PVMXRESTOREHOST;
703AssertCompileSize(X86XDTR64, 10);
704AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtr.uAddr, 0x08);
705AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtrRw.uAddr, 0x18);
706AssertCompileMemberOffset(VMXRESTOREHOST, HostIdtr.uAddr, 0x28);
707AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase, 0x30);
708AssertCompileSize(VMXRESTOREHOST, 64);
709AssertCompileSizeAlignment(VMXRESTOREHOST, 8);
710
711/**
712 * VMX StartVM function.
713 *
714 * @returns VBox status code (no informational stuff).
715 * @param pVmcsInfo Pointer to the VMCS info (for cached host RIP and RSP).
716 * @param pVCpu Pointer to the cross context per-CPU structure.
717 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
718 */
719typedef DECLCALLBACKTYPE(int, FNHMVMXSTARTVM,(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume));
720/** Pointer to a VMX StartVM function. */
721typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
722/** @} */
723
724
725/**
726 * HM VMCPU Instance data.
727 *
728 * Note! If you change members of this struct, make sure to check if the
729 * assembly counterpart in HMInternal.mac needs to be updated as well.
730 *
731 * Note! The members here are ordered and aligned based on estimated frequency of
732 * usage and grouped to fit within a cache line in hot code paths. Even subtle
733 * changes here have a noticeable effect in the bootsector benchmarks. Modify with
734 * care.
735 */
736typedef struct HMCPU
737{
738 /** Set when the TLB has been checked until we return from the world switch. */
739 bool volatile fCheckedTLBFlush;
740 /** Set when we're using VT-x or AMD-V at that moment.
741 * @todo r=bird: Misleading description. For AMD-V this will be set the first
742 * time HMCanExecuteGuest() is called and only cleared again by
743 * HMR3ResetCpu(). For VT-x it will be set by HMCanExecuteGuest when we
744 * can execute something in VT-x mode, and cleared if we cannot.
745 *
746 * The field is much more about recording the last HMCanExecuteGuest
747 * return value than anything about any "moment". */
748 bool fActive;
749
750 /** Whether we should use the debug loop because of single stepping or special
751 * debug breakpoints / events are armed. */
752 bool fUseDebugLoop;
753
754 /** Whether \#UD needs to be intercepted (required by certain GIM providers). */
755 bool fGIMTrapXcptUD;
756 /** Whether \#GP needs to be intercepted for mesa driver workaround. */
757 bool fTrapXcptGpForLovelyMesaDrv;
758 /** Whether we're executing a single instruction. */
759 bool fSingleInstruction;
760 /** Whether \#DE needs to be intercepted (may be required by GCM). */
761 bool fGCMTrapXcptDE;
762
763 bool afAlignment0[1];
764
765 /** An additional error code used for some gurus. */
766 uint32_t u32HMError;
767 /** The last exit-to-ring-3 reason. */
768 int32_t rcLastExitToR3;
769 /** CPU-context changed flags (see HM_CHANGED_xxx). */
770 uint64_t fCtxChanged;
771
772 /** VT-x data. */
773 struct HMCPUVMX
774 {
775 /** @name Guest information.
776 * @{ */
777 /** Guest VMCS information shared with ring-3. */
778 VMXVMCSINFOSHARED VmcsInfo;
779 /** Nested-guest VMCS information shared with ring-3. */
780 VMXVMCSINFOSHARED VmcsInfoNstGst;
781 /** Whether the nested-guest VMCS was the last current VMCS (shadow copy for ring-3).
782 * @see HMR0PERVCPU::vmx.fSwitchedToNstGstVmcs */
783 bool fSwitchedToNstGstVmcsCopyForRing3;
784 /** Whether the static guest VMCS controls has been merged with the
785 * nested-guest VMCS controls. */
786 bool fMergedNstGstCtls;
787 /** Whether the nested-guest VMCS has been copied to the shadow VMCS. */
788 bool fCopiedNstGstToShadowVmcs;
789 /** Whether flushing the TLB is required due to switching to/from the
790 * nested-guest. */
791 bool fSwitchedNstGstFlushTlb;
792 /** Alignment. */
793 bool afAlignment0[4];
794 /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */
795 uint64_t u64GstMsrApicBase;
796 /** @} */
797
798 /** @name Error reporting and diagnostics.
799 * @{ */
800 /** VT-x error-reporting (mainly for ring-3 propagation). */
801 struct
802 {
803 RTCPUID idCurrentCpu;
804 RTCPUID idEnteredCpu;
805 RTHCPHYS HCPhysCurrentVmcs;
806 uint32_t u32VmcsRev;
807 uint32_t u32InstrError;
808 uint32_t u32ExitReason;
809 uint32_t u32GuestIntrState;
810 } LastError;
811 /** @} */
812 } vmx;
813
814 /** SVM data. */
815 struct HMCPUSVM
816 {
817 /** Whether to emulate long mode support for sysenter/sysexit like intel CPUs
818 * does. This means intercepting \#UD to emulate the instructions in
819 * long-mode and to intercept reads and writes to the SYSENTER MSRs in order to
820 * preserve the upper 32 bits written to them (AMD will ignore and discard). */
821 bool fEmulateLongModeSysEnterExit;
822 uint8_t au8Alignment0[7];
823
824 /** Cache of the nested-guest's VMCB fields that we modify in order to run the
825 * nested-guest using AMD-V. This will be restored on \#VMEXIT. */
826 SVMNESTEDVMCBCACHE NstGstVmcbCache;
827 } svm;
828
829 /** Event injection state. */
830 HMEVENT Event;
831
832 /** Current shadow paging mode for updating CR4.
833 * @todo move later (@bugref{9217}). */
834 PGMMODE enmShadowMode;
835 uint32_t u32TemporaryPadding;
836
837 /** The PAE PDPEs used with Nested Paging (only valid when
838 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
839 X86PDPE aPdpes[4];
840
841 /* These two comes because they are accessed from assembly and we don't
842 want to detail all the stats in the assembly version of this structure. */
843 STAMCOUNTER StatVmxWriteHostRip;
844 STAMCOUNTER StatVmxWriteHostRsp;
845 STAMCOUNTER StatVmxVmLaunch;
846 STAMCOUNTER StatVmxVmResume;
847
848 STAMPROFILEADV StatEntry;
849 STAMPROFILEADV StatPreExit;
850 STAMPROFILEADV StatExitHandling;
851 STAMPROFILEADV StatExitIO;
852 STAMPROFILEADV StatExitMovCRx;
853 STAMPROFILEADV StatExitXcptNmi;
854 STAMPROFILEADV StatExitVmentry;
855 STAMPROFILEADV StatImportGuestState;
856 STAMPROFILEADV StatExportGuestState;
857 STAMPROFILEADV StatLoadGuestFpuState;
858 STAMPROFILEADV StatInGC;
859 STAMPROFILEADV StatPoke;
860 STAMPROFILEADV StatSpinPoke;
861 STAMPROFILEADV StatSpinPokeFailed;
862
863 STAMCOUNTER StatInjectInterrupt;
864 STAMCOUNTER StatInjectXcpt;
865 STAMCOUNTER StatInjectReflect;
866 STAMCOUNTER StatInjectConvertDF;
867 STAMCOUNTER StatInjectInterpret;
868 STAMCOUNTER StatInjectReflectNPF;
869
870 STAMCOUNTER StatImportGuestStateFallback;
871 STAMCOUNTER StatReadToTransientFallback;
872
873 STAMCOUNTER StatExitAll;
874 STAMCOUNTER StatDebugExitAll;
875 STAMCOUNTER StatNestedExitAll;
876 STAMCOUNTER StatExitShadowNM;
877 STAMCOUNTER StatExitGuestNM;
878 STAMCOUNTER StatExitShadowPF; /**< Misleading, currently used for MMIO \#PFs as well. */
879 STAMCOUNTER StatExitShadowPFEM;
880 STAMCOUNTER StatExitGuestPF;
881 STAMCOUNTER StatExitGuestUD;
882 STAMCOUNTER StatExitGuestSS;
883 STAMCOUNTER StatExitGuestNP;
884 STAMCOUNTER StatExitGuestTS;
885 STAMCOUNTER StatExitGuestOF;
886 STAMCOUNTER StatExitGuestGP;
887 STAMCOUNTER StatExitGuestDE;
888 STAMCOUNTER StatExitGuestDF;
889 STAMCOUNTER StatExitGuestBR;
890 STAMCOUNTER StatExitGuestAC;
891 STAMCOUNTER StatExitGuestACSplitLock;
892 STAMCOUNTER StatExitGuestDB;
893 STAMCOUNTER StatExitGuestMF;
894 STAMCOUNTER StatExitGuestBP;
895 STAMCOUNTER StatExitGuestXF;
896 STAMCOUNTER StatExitGuestXcpUnk;
897 STAMCOUNTER StatExitDRxWrite;
898 STAMCOUNTER StatExitDRxRead;
899 STAMCOUNTER StatExitCR0Read;
900 STAMCOUNTER StatExitCR2Read;
901 STAMCOUNTER StatExitCR3Read;
902 STAMCOUNTER StatExitCR4Read;
903 STAMCOUNTER StatExitCR8Read;
904 STAMCOUNTER StatExitCR0Write;
905 STAMCOUNTER StatExitCR2Write;
906 STAMCOUNTER StatExitCR3Write;
907 STAMCOUNTER StatExitCR4Write;
908 STAMCOUNTER StatExitCR8Write;
909 STAMCOUNTER StatExitRdmsr;
910 STAMCOUNTER StatExitWrmsr;
911 STAMCOUNTER StatExitClts;
912 STAMCOUNTER StatExitXdtrAccess;
913 STAMCOUNTER StatExitLmsw;
914 STAMCOUNTER StatExitIOWrite;
915 STAMCOUNTER StatExitIORead;
916 STAMCOUNTER StatExitIOStringWrite;
917 STAMCOUNTER StatExitIOStringRead;
918 STAMCOUNTER StatExitIntWindow;
919 STAMCOUNTER StatExitExtInt;
920 STAMCOUNTER StatExitHostNmiInGC;
921 STAMCOUNTER StatExitHostNmiInGCIpi;
922 STAMCOUNTER StatExitPreemptTimer;
923 STAMCOUNTER StatExitTprBelowThreshold;
924 STAMCOUNTER StatExitTaskSwitch;
925 STAMCOUNTER StatExitApicAccess;
926 STAMCOUNTER StatExitReasonNpf;
927
928 STAMCOUNTER StatNestedExitReasonNpf;
929
930 STAMCOUNTER StatFlushPage;
931 STAMCOUNTER StatFlushPageManual;
932 STAMCOUNTER StatFlushPhysPageManual;
933 STAMCOUNTER StatFlushTlb;
934 STAMCOUNTER StatFlushTlbNstGst;
935 STAMCOUNTER StatFlushTlbManual;
936 STAMCOUNTER StatFlushTlbWorldSwitch;
937 STAMCOUNTER StatNoFlushTlbWorldSwitch;
938 STAMCOUNTER StatFlushEntire;
939 STAMCOUNTER StatFlushAsid;
940 STAMCOUNTER StatFlushNestedPaging;
941 STAMCOUNTER StatFlushTlbInvlpgVirt;
942 STAMCOUNTER StatFlushTlbInvlpgPhys;
943 STAMCOUNTER StatTlbShootdown;
944 STAMCOUNTER StatTlbShootdownFlush;
945
946 STAMCOUNTER StatSwitchPendingHostIrq;
947 STAMCOUNTER StatSwitchTprMaskedIrq;
948 STAMCOUNTER StatSwitchGuestIrq;
949 STAMCOUNTER StatSwitchHmToR3FF;
950 STAMCOUNTER StatSwitchVmReq;
951 STAMCOUNTER StatSwitchPgmPoolFlush;
952 STAMCOUNTER StatSwitchDma;
953 STAMCOUNTER StatSwitchExitToR3;
954 STAMCOUNTER StatSwitchLongJmpToR3;
955 STAMCOUNTER StatSwitchMaxResumeLoops;
956 STAMCOUNTER StatSwitchHltToR3;
957 STAMCOUNTER StatSwitchApicAccessToR3;
958 STAMCOUNTER StatSwitchPreempt;
959 STAMCOUNTER StatSwitchNstGstVmexit;
960
961 STAMCOUNTER StatTscParavirt;
962 STAMCOUNTER StatTscOffset;
963 STAMCOUNTER StatTscIntercept;
964
965 STAMCOUNTER StatDRxArmed;
966 STAMCOUNTER StatDRxContextSwitch;
967 STAMCOUNTER StatDRxIoCheck;
968
969 STAMCOUNTER StatExportMinimal;
970 STAMCOUNTER StatExportFull;
971 STAMCOUNTER StatLoadGuestFpu;
972 STAMCOUNTER StatExportHostState;
973
974 STAMCOUNTER StatVmxCheckBadRmSelBase;
975 STAMCOUNTER StatVmxCheckBadRmSelLimit;
976 STAMCOUNTER StatVmxCheckBadRmSelAttr;
977 STAMCOUNTER StatVmxCheckBadV86SelBase;
978 STAMCOUNTER StatVmxCheckBadV86SelLimit;
979 STAMCOUNTER StatVmxCheckBadV86SelAttr;
980 STAMCOUNTER StatVmxCheckRmOk;
981 STAMCOUNTER StatVmxCheckBadSel;
982 STAMCOUNTER StatVmxCheckBadRpl;
983 STAMCOUNTER StatVmxCheckPmOk;
984
985 STAMCOUNTER StatVmxPreemptionRecalcingDeadline;
986 STAMCOUNTER StatVmxPreemptionRecalcingDeadlineExpired;
987 STAMCOUNTER StatVmxPreemptionReusingDeadline;
988 STAMCOUNTER StatVmxPreemptionReusingDeadlineExpired;
989
990#ifdef VBOX_WITH_STATISTICS
991 STAMCOUNTER aStatExitReason[MAX_EXITREASON_STAT];
992 STAMCOUNTER aStatNestedExitReason[MAX_EXITREASON_STAT];
993 STAMCOUNTER aStatInjectedIrqs[256];
994 STAMCOUNTER aStatInjectedXcpts[X86_XCPT_LAST + 1];
995#endif
996#ifdef HM_PROFILE_EXIT_DISPATCH
997 STAMPROFILEADV StatExitDispatch;
998#endif
999} HMCPU;
1000/** Pointer to HM VMCPU instance data. */
1001typedef HMCPU *PHMCPU;
1002AssertCompileMemberAlignment(HMCPU, fCheckedTLBFlush, 4);
1003AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8);
1004AssertCompileMemberAlignment(HMCPU, vmx, 8);
1005AssertCompileMemberAlignment(HMCPU, vmx.VmcsInfo, 8);
1006AssertCompileMemberAlignment(HMCPU, vmx.VmcsInfoNstGst, 8);
1007AssertCompileMemberAlignment(HMCPU, svm, 8);
1008AssertCompileMemberAlignment(HMCPU, Event, 8);
1009
1010
1011/**
1012 * HM per-VCpu ring-0 only instance data.
1013 */
1014typedef struct HMR0PERVCPU
1015{
1016 /** World switch exit counter. */
1017 uint32_t volatile cWorldSwitchExits;
1018 /** TLB flush count. */
1019 uint32_t cTlbFlushes;
1020 /** The last CPU we were executing code on (NIL_RTCPUID for the first time). */
1021 RTCPUID idLastCpu;
1022 /** The CPU ID of the CPU currently owning the VMCS. Set in
1023 * HMR0Enter and cleared in HMR0Leave. */
1024 RTCPUID idEnteredCpu;
1025 /** Current ASID in use by the VM. */
1026 uint32_t uCurrentAsid;
1027
1028 /** Set if we need to flush the TLB during the world switch. */
1029 bool fForceTLBFlush;
1030 /** Whether we've completed the inner HM leave function. */
1031 bool fLeaveDone;
1032 /** Whether we're using the hyper DR7 or guest DR7. */
1033 bool fUsingHyperDR7;
1034 /** Whether we are currently executing in the debug loop.
1035 * Mainly for assertions. */
1036 bool fUsingDebugLoop;
1037 /** Set if we using the debug loop and wish to intercept RDTSC. */
1038 bool fDebugWantRdTscExit;
1039 /** Set if XCR0 needs to be saved/restored when entering/exiting guest code
1040 * execution. */
1041 bool fLoadSaveGuestXcr0;
1042 /** Set if we need to clear the trap flag because of single stepping. */
1043 bool fClearTrapFlag;
1044
1045 bool afPadding1[1];
1046 /** World switcher flags (HM_WSF_XXX - was CPUMCTX::fWorldSwitcher in 6.1). */
1047 uint32_t fWorldSwitcher;
1048 /** The raw host TSC value from the last VM exit (set by HMR0A.asm). */
1049 uint64_t uTscExit;
1050
1051 /** VT-x data. */
1052 struct HMR0CPUVMX
1053 {
1054 /** Ring-0 pointer to the hardware-assisted VMX execution function. */
1055 PFNHMVMXSTARTVM pfnStartVm;
1056 /** Absolute TSC deadline. */
1057 uint64_t uTscDeadline;
1058 /** The deadline version number. */
1059 uint64_t uTscDeadlineVersion;
1060
1061 /** @name Guest information.
1062 * @{ */
1063 /** Guest VMCS information. */
1064 VMXVMCSINFO VmcsInfo;
1065 /** Nested-guest VMCS information. */
1066 VMXVMCSINFO VmcsInfoNstGst;
1067 /* Whether the nested-guest VMCS was the last current VMCS (authoritative copy).
1068 * @see HMCPU::vmx.fSwitchedToNstGstVmcsCopyForRing3 */
1069 bool fSwitchedToNstGstVmcs;
1070 bool afAlignment0[7];
1071 /** Pointer to the VMX transient info during VM-exit. */
1072 PVMXTRANSIENT pVmxTransient;
1073 /** @} */
1074
1075 /** @name Host information.
1076 * @{ */
1077 /** Host LSTAR MSR to restore lazily while leaving VT-x. */
1078 uint64_t u64HostMsrLStar;
1079 /** Host STAR MSR to restore lazily while leaving VT-x. */
1080 uint64_t u64HostMsrStar;
1081 /** Host SF_MASK MSR to restore lazily while leaving VT-x. */
1082 uint64_t u64HostMsrSfMask;
1083 /** Host KernelGS-Base MSR to restore lazily while leaving VT-x. */
1084 uint64_t u64HostMsrKernelGsBase;
1085 /** The mask of lazy MSRs swap/restore state, see VMX_LAZY_MSRS_XXX. */
1086 uint32_t fLazyMsrs;
1087 /** Whether the host MSR values are up-to-date in the auto-load/store MSR area. */
1088 bool fUpdatedHostAutoMsrs;
1089 /** Alignment. */
1090 uint8_t au8Alignment0[3];
1091 /** Which host-state bits to restore before being preempted, see
1092 * VMX_RESTORE_HOST_XXX. */
1093 uint32_t fRestoreHostFlags;
1094 /** Alignment. */
1095 uint32_t u32Alignment0;
1096 /** The host-state restoration structure. */
1097 VMXRESTOREHOST RestoreHost;
1098 /** @} */
1099 } vmx;
1100
1101 /** SVM data. */
1102 struct HMR0CPUSVM
1103 {
1104 /** Ring 0 handlers for VT-x. */
1105 PFNHMSVMVMRUN pfnVMRun;
1106
1107 /** Physical address of the host VMCB which holds additional host-state. */
1108 RTHCPHYS HCPhysVmcbHost;
1109 /** R0 memory object for the host VMCB which holds additional host-state. */
1110 RTR0MEMOBJ hMemObjVmcbHost;
1111
1112 /** Physical address of the guest VMCB. */
1113 RTHCPHYS HCPhysVmcb;
1114 /** R0 memory object for the guest VMCB. */
1115 RTR0MEMOBJ hMemObjVmcb;
1116 /** Pointer to the guest VMCB. */
1117 R0PTRTYPE(PSVMVMCB) pVmcb;
1118
1119 /** Physical address of the MSR bitmap (8 KB). */
1120 RTHCPHYS HCPhysMsrBitmap;
1121 /** R0 memory object for the MSR bitmap (8 KB). */
1122 RTR0MEMOBJ hMemObjMsrBitmap;
1123 /** Pointer to the MSR bitmap. */
1124 R0PTRTYPE(void *) pvMsrBitmap;
1125
1126 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
1127 * we should check if the VTPR changed on every VM-exit. */
1128 bool fSyncVTpr;
1129 bool afAlignment[7];
1130
1131 /** Pointer to the SVM transient info during VM-exit. */
1132 PSVMTRANSIENT pSvmTransient;
1133 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
1134 uint64_t u64HostTscAux;
1135
1136 /** For saving stack space, the disassembler state is allocated here
1137 * instead of on the stack. */
1138 DISCPUSTATE DisState;
1139 } svm;
1140} HMR0PERVCPU;
1141/** Pointer to HM ring-0 VMCPU instance data. */
1142typedef HMR0PERVCPU *PHMR0PERVCPU;
1143AssertCompileMemberAlignment(HMR0PERVCPU, cWorldSwitchExits, 4);
1144AssertCompileMemberAlignment(HMR0PERVCPU, fForceTLBFlush, 4);
1145AssertCompileMemberAlignment(HMR0PERVCPU, vmx.RestoreHost, 8);
1146
1147
1148/** @name HM_WSF_XXX - @bugref{9453}, @bugref{9087}
1149 * @note If you change these values don't forget to update the assembly
1150 * defines as well!
1151 * @{ */
1152/** Touch IA32_PRED_CMD.IBPB on VM exit. */
1153#define HM_WSF_IBPB_EXIT RT_BIT_32(0)
1154/** Touch IA32_PRED_CMD.IBPB on VM entry. */
1155#define HM_WSF_IBPB_ENTRY RT_BIT_32(1)
1156/** Touch IA32_FLUSH_CMD.L1D on VM entry. */
1157#define HM_WSF_L1D_ENTRY RT_BIT_32(2)
1158/** Flush MDS buffers on VM entry. */
1159#define HM_WSF_MDS_ENTRY RT_BIT_32(3)
1160
1161/** Touch IA32_FLUSH_CMD.L1D on VM scheduling. */
1162#define HM_WSF_L1D_SCHED RT_BIT_32(16)
1163/** Flush MDS buffers on VM scheduling. */
1164#define HM_WSF_MDS_SCHED RT_BIT_32(17)
1165/** @} */
1166
1167
1168#ifdef IN_RING0
1169extern bool g_fHmVmxSupported;
1170extern uint32_t g_fHmHostKernelFeatures;
1171extern uint32_t g_uHmMaxAsid;
1172extern bool g_fHmVmxUsePreemptTimer;
1173extern uint8_t g_cHmVmxPreemptTimerShift;
1174extern bool g_fHmVmxSupportsVmcsEfer;
1175extern uint64_t g_uHmVmxHostCr4;
1176extern uint64_t g_uHmVmxHostMsrEfer;
1177extern uint64_t g_uHmVmxHostSmmMonitorCtl;
1178extern bool g_fHmSvmSupported;
1179extern uint32_t g_uHmSvmRev;
1180extern uint32_t g_fHmSvmFeatures;
1181
1182extern SUPHWVIRTMSRS g_HmMsrs;
1183
1184
1185VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void);
1186VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPUCC pVCpu);
1187
1188# ifdef VBOX_STRICT
1189# define HM_DUMP_REG_FLAGS_GPRS RT_BIT(0)
1190# define HM_DUMP_REG_FLAGS_FPU RT_BIT(1)
1191# define HM_DUMP_REG_FLAGS_MSRS RT_BIT(2)
1192# define HM_DUMP_REG_FLAGS_ALL (HM_DUMP_REG_FLAGS_GPRS | HM_DUMP_REG_FLAGS_FPU | HM_DUMP_REG_FLAGS_MSRS)
1193
1194VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPUCC pVCpu, uint32_t fFlags);
1195VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
1196# endif
1197
1198DECLASM(void) hmR0MdsClear(void);
1199#endif /* IN_RING0 */
1200
1201
1202/** @addtogroup grp_hm_int_svm SVM Internal
1203 * @{ */
1204VMM_INT_DECL(int) hmEmulateSvmMovTpr(PVMCC pVM, PVMCPUCC pVCpu);
1205
1206/**
1207 * Prepares for and executes VMRUN (64-bit register context).
1208 *
1209 * @returns VBox status code (no informational stuff).
1210 * @param pVM The cross context VM structure. (Not used.)
1211 * @param pVCpu The cross context virtual CPU structure.
1212 * @param HCPhyspVMCB Physical address of the VMCB.
1213 *
1214 * @remarks With spectre mitigations and the usual need for speed (/ micro
1215 * optimizations), we have a bunch of variations of this code depending
1216 * on a few precoditions. In release builds, the code is entirely
1217 * without conditionals. Debug builds have a couple of assertions that
1218 * shouldn't ever be triggered.
1219 *
1220 * @{
1221 */
1222DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1223DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1224DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1225DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1226DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1227DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1228DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1229DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1230/** @} */
1231
1232/** @} */
1233
1234
1235/** @addtogroup grp_hm_int_vmx VMX Internal
1236 * @{ */
1237VMM_INT_DECL(PVMXVMCSINFOSHARED) hmGetVmxActiveVmcsInfoShared(PVMCPUCC pVCpu);
1238
1239/**
1240 * Used on platforms with poor inline assembly support to retrieve all the
1241 * info from the CPU and put it in the @a pRestoreHost structure.
1242 */
1243DECLASM(void) hmR0VmxExportHostSegmentRegsAsmHlp(PVMXRESTOREHOST pRestoreHost, bool fHaveFsGsBase);
1244
1245/**
1246 * Restores some host-state fields that need not be done on every VM-exit.
1247 *
1248 * @returns VBox status code.
1249 * @param fRestoreHostFlags Flags of which host registers needs to be
1250 * restored.
1251 * @param pRestoreHost Pointer to the host-restore structure.
1252 */
1253DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);
1254
1255/**
1256 * VMX StartVM functions.
1257 *
1258 * @returns VBox status code (no informational stuff).
1259 * @param pVmcsInfo Pointer to the VMCS info (for cached host RIP and RSP).
1260 * @param pVCpu Pointer to the cross context per-CPU structure of the
1261 * calling EMT.
1262 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
1263 *
1264 * @remarks With spectre mitigations and the usual need for speed (/ micro
1265 * optimizations), we have a bunch of variations of this code depending
1266 * on a few precoditions. In release builds, the code is entirely
1267 * without conditionals. Debug builds have a couple of assertions that
1268 * shouldn't ever be triggered.
1269 *
1270 * @{
1271 */
1272DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1273DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1274DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1275DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1276DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1277DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1278DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1279DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1280DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1281DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1282DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1283DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1284DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1285DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1286DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1287DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1288DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1289DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1290DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1291DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1292DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1293DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1294DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1295DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1296DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1297DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1298DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1299DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1300DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1301DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1302DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1303DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1304/** @} */
1305
1306/** @} */
1307
1308/** @} */
1309
1310RT_C_DECLS_END
1311
1312#endif /* !VMM_INCLUDED_SRC_include_HMInternal_h */
1313
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette