VirtualBox

source: vbox/trunk/src/VBox/VMM/include/CPUMInternal.h@ 104821

Last change on this file since 104821 was 101603, checked in by vboxsync, 13 months ago

VMM: Nested VMX: Todo regarding reminder to add 'enmHwVirt' when next bumping CPUM SSM version.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 19.5 KB
Line 
1/* $Id: CPUMInternal.h 101603 2023-10-26 11:15:56Z vboxsync $ */
2/** @file
3 * CPUM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_CPUMInternal_h
29#define VMM_INCLUDED_SRC_include_CPUMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#ifndef VBOX_FOR_DTRACE_LIB
35# include <VBox/cdefs.h>
36# include <VBox/types.h>
37# include <VBox/vmm/stam.h>
38# include <iprt/x86.h>
39# include <VBox/vmm/pgm.h>
40#else
41# pragma D depends_on library x86.d
42# pragma D depends_on library cpumctx.d
43# pragma D depends_on library cpum.d
44
45/* Some fudging. */
46typedef uint64_t STAMCOUNTER;
47#endif
48
49
50
51
52/** @defgroup grp_cpum_int Internals
53 * @ingroup grp_cpum
54 * @internal
55 * @{
56 */
57
58/** Use flags (CPUM::fUseFlags).
59 * (Don't forget to sync this with CPUMInternal.mac !)
60 * @note Was part of saved state (6.1 and earlier).
61 * @{ */
62/** Indicates that we've saved the host FPU, SSE, whatever state and that it
63 * needs to be restored. */
64#define CPUM_USED_FPU_HOST RT_BIT(0)
65/** Indicates that we've loaded the guest FPU, SSE, whatever state and that it
66 * needs to be saved.
67 * @note Mirrored in CPUMCTX::fUsedFpuGuest for the HM switcher code. */
68#define CPUM_USED_FPU_GUEST RT_BIT(10)
69/** Used the guest FPU, SSE or such stuff since last we were in REM.
70 * REM syncing is clearing this, lazy FPU is setting it. */
71#define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
72/** The XMM state was manually restored. (AMD only) */
73#define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
74
75/** Host OS is using SYSENTER and we must NULL the CS. */
76#define CPUM_USE_SYSENTER RT_BIT(3)
77/** Host OS is using SYSENTER and we must NULL the CS. */
78#define CPUM_USE_SYSCALL RT_BIT(4)
79
80/** Debug registers are used by host and that DR7 and DR6 must be saved and
81 * disabled when switching to raw-mode. */
82#define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
83/** Records that we've saved the host DRx registers.
84 * In ring-0 this means all (DR0-7), while in raw-mode context this means DR0-3
85 * since DR6 and DR7 are covered by CPUM_USE_DEBUG_REGS_HOST. */
86#define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
87/** Set to indicate that we should save host DR0-7 and load the hypervisor debug
88 * registers in the raw-mode world switchers. (See CPUMRecalcHyperDRx.) */
89#define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
90/** Used in ring-0 to indicate that we have loaded the hypervisor debug
91 * registers. */
92#define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
93/** Used in ring-0 to indicate that we have loaded the guest debug
94 * registers (DR0-3 and maybe DR6) for direct use by the guest.
95 * DR7 (and AMD-V DR6) are handled via the VMCB. */
96#define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
97
98/** Host CPU requires fxsave/fxrstor leaky bit handling. */
99#define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
100/** Set if the VM supports long-mode. */
101#define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
102/** @} */
103
104
105/** @name CPUM Saved State Version.
106 * @{ */
107/** The current saved state version.
108 * @todo When bumping to next version, add CPUMCTX::enmHwVirt to the saved
109 * state. */
110#define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4
111/** The saved state version with u32RestoreProcCtls2 for Nested Microsoft
112 * Hyper-V. */
113#define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4 23
114/** The saved state version with more virtual VMCS fields (HLAT prefix size,
115 * PCONFIG-exiting bitmap, HLAT ptr, VM-exit ctls2) and a CPUMCTX field (VM-exit
116 * ctls2 MSR). */
117#define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3 22
118/** The saved state version with PAE PDPEs added. */
119#define CPUM_SAVED_STATE_VERSION_PAE_PDPES 21
120/** The saved state version with more virtual VMCS fields and CPUMCTX VMX fields. */
121#define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2 20
122/** The saved state version including VMX hardware virtualization state. */
123#define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX 19
124/** The saved state version including SVM hardware virtualization state. */
125#define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 18
126/** The saved state version including XSAVE state. */
127#define CPUM_SAVED_STATE_VERSION_XSAVE 17
128/** The saved state version with good CPUID leaf count. */
129#define CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT 16
130/** CPUID changes with explode forgetting to update the leaf count on
131 * restore, resulting in garbage being saved restoring+saving old states). */
132#define CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT 15
133/** The saved state version before the CPUIDs changes. */
134#define CPUM_SAVED_STATE_VERSION_PUT_STRUCT 14
135/** The saved state version before using SSMR3PutStruct. */
136#define CPUM_SAVED_STATE_VERSION_MEM 13
137/** The saved state version before introducing the MSR size field. */
138#define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE 12
139/** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden
140 * selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */
141#define CPUM_SAVED_STATE_VERSION_VER3_2 11
142/** The saved state version of 3.0 and 3.1 trunk before the teleportation
143 * changes. */
144#define CPUM_SAVED_STATE_VERSION_VER3_0 10
145/** The saved state version for the 2.1 trunk before the MSR changes. */
146#define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 9
147/** The saved state version of 2.0, used for backwards compatibility. */
148#define CPUM_SAVED_STATE_VERSION_VER2_0 8
149/** The saved state version of 1.6, used for backwards compatibility. */
150#define CPUM_SAVED_STATE_VERSION_VER1_6 6
151/** @} */
152
153
154/** @name XSAVE limits.
155 * @{ */
156/** Max size we accept for the XSAVE area.
157 * @see CPUMCTX::abXSave */
158#define CPUM_MAX_XSAVE_AREA_SIZE (0x4000 - 0x300)
159/* Min size we accept for the XSAVE area. */
160#define CPUM_MIN_XSAVE_AREA_SIZE 0x240
161/** @} */
162
163
164/**
165 * CPU info
166 */
167typedef struct CPUMINFO
168{
169 /** The number of MSR ranges (CPUMMSRRANGE) in the array pointed to below. */
170 uint32_t cMsrRanges;
171 /** Mask applied to ECX before looking up the MSR for a RDMSR/WRMSR
172 * instruction. Older hardware has been observed to ignore higher bits. */
173 uint32_t fMsrMask;
174
175 /** MXCSR mask. */
176 uint32_t fMxCsrMask;
177
178 /** The number of CPUID leaves (CPUMCPUIDLEAF) in the array pointed to below. */
179 uint32_t cCpuIdLeaves;
180 /** The index of the first extended CPUID leaf in the array.
181 * Set to cCpuIdLeaves if none present. */
182 uint32_t iFirstExtCpuIdLeaf;
183 /** How to handle unknown CPUID leaves. */
184 CPUMUNKNOWNCPUID enmUnknownCpuIdMethod;
185 /** For use with CPUMUNKNOWNCPUID_DEFAULTS (DB & VM),
186 * CPUMUNKNOWNCPUID_LAST_STD_LEAF (VM) and CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX (VM). */
187 CPUMCPUID DefCpuId;
188
189 /** Scalable bus frequency used for reporting other frequencies. */
190 uint64_t uScalableBusFreq;
191
192 /** Pointer to the MSR ranges (for compatibility with old hyper heap code). */
193 R3PTRTYPE(PCPUMMSRRANGE) paMsrRangesR3;
194 /** Pointer to the CPUID leaves (for compatibility with old hyper heap code). */
195 R3PTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesR3;
196
197 /** CPUID leaves. */
198 CPUMCPUIDLEAF aCpuIdLeaves[256];
199 /** MSR ranges.
200 * @todo This is insane, so might want to move this into a separate
201 * allocation. The insanity is mainly for more recent AMD CPUs. */
202 CPUMMSRRANGE aMsrRanges[8192];
203} CPUMINFO;
204/** Pointer to a CPU info structure. */
205typedef CPUMINFO *PCPUMINFO;
206/** Pointer to a const CPU info structure. */
207typedef CPUMINFO const *CPCPUMINFO;
208
209
210/**
211 * The saved host CPU state.
212 */
213typedef struct CPUMHOSTCTX
214{
215 /** The extended state (FPU/SSE/AVX/AVX-2/XXXX). Must be aligned on 64 bytes. */
216 union /* no tag */
217 {
218 X86XSAVEAREA XState;
219 /** Byte view for simple indexing and space allocation.
220 * @note Must match or exceed the size of CPUMCTX::abXState. */
221 uint8_t abXState[0x4000 - 0x300];
222 } CPUM_UNION_NM(u);
223
224 /** General purpose register, selectors, flags and more
225 * @{ */
226 /** General purpose register ++
227 * { */
228 /*uint64_t rax; - scratch*/
229 uint64_t rbx;
230 /*uint64_t rcx; - scratch*/
231 /*uint64_t rdx; - scratch*/
232 uint64_t rdi;
233 uint64_t rsi;
234 uint64_t rbp;
235 uint64_t rsp;
236 /*uint64_t r8; - scratch*/
237 /*uint64_t r9; - scratch*/
238 uint64_t r10;
239 uint64_t r11;
240 uint64_t r12;
241 uint64_t r13;
242 uint64_t r14;
243 uint64_t r15;
244 /*uint64_t rip; - scratch*/
245 uint64_t rflags;
246 /** @} */
247
248 /** Selector registers
249 * @{ */
250 RTSEL ss;
251 RTSEL ssPadding;
252 RTSEL gs;
253 RTSEL gsPadding;
254 RTSEL fs;
255 RTSEL fsPadding;
256 RTSEL es;
257 RTSEL esPadding;
258 RTSEL ds;
259 RTSEL dsPadding;
260 RTSEL cs;
261 RTSEL csPadding;
262 /** @} */
263
264 /** Control registers.
265 * @{ */
266 /** The CR0 FPU state in HM mode. */
267 uint64_t cr0;
268 /*uint64_t cr2; - scratch*/
269 uint64_t cr3;
270 uint64_t cr4;
271 uint64_t cr8;
272 /** @} */
273
274 /** Debug registers.
275 * @{ */
276 uint64_t dr0;
277 uint64_t dr1;
278 uint64_t dr2;
279 uint64_t dr3;
280 uint64_t dr6;
281 uint64_t dr7;
282 /** @} */
283
284 /** Global Descriptor Table register. */
285 X86XDTR64 gdtr;
286 uint16_t gdtrPadding;
287 /** Interrupt Descriptor Table register. */
288 X86XDTR64 idtr;
289 uint16_t idtrPadding;
290 /** The task register. */
291 RTSEL ldtr;
292 RTSEL ldtrPadding;
293 /** The task register. */
294 RTSEL tr;
295 RTSEL trPadding;
296
297 /** MSRs
298 * @{ */
299 CPUMSYSENTER SysEnter;
300 uint64_t FSbase;
301 uint64_t GSbase;
302 uint64_t efer;
303 /** @} */
304
305 /** The XCR0 register. */
306 uint64_t xcr0;
307 /** The mask to pass to XSAVE/XRSTOR in EDX:EAX. If zero we use
308 * FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */
309 uint64_t fXStateMask;
310
311 /* padding to get 64byte aligned size */
312 uint8_t auPadding[24];
313#if HC_ARCH_BITS != 64
314# error HC_ARCH_BITS not defined or unsupported
315#endif
316} CPUMHOSTCTX;
317#ifndef VBOX_FOR_DTRACE_LIB
318AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
319#endif
320/** Pointer to the saved host CPU state. */
321typedef CPUMHOSTCTX *PCPUMHOSTCTX;
322
323
324/**
325 * The hypervisor context CPU state (just DRx left now).
326 */
327typedef struct CPUMHYPERCTX
328{
329 /** Debug registers.
330 * @remarks DR4 and DR5 should not be used since they are aliases for
331 * DR6 and DR7 respectively on both AMD and Intel CPUs.
332 * @remarks DR8-15 are currently not supported by AMD or Intel, so
333 * neither do we.
334 */
335 uint64_t dr[8];
336 /** @todo eliminiate the rest. */
337 uint64_t cr3;
338 uint64_t au64Padding[7];
339} CPUMHYPERCTX;
340#ifndef VBOX_FOR_DTRACE_LIB
341AssertCompileSizeAlignment(CPUMHYPERCTX, 64);
342#endif
343/** Pointer to the hypervisor context CPU state. */
344typedef CPUMHYPERCTX *PCPUMHYPERCTX;
345
346
347/**
348 * CPUM Data (part of VM)
349 */
350typedef struct CPUM
351{
352 /** Use flags.
353 * These flags indicates which CPU features the host uses.
354 */
355 uint32_t fHostUseFlags;
356
357 /** CR4 mask
358 * @todo obsolete? */
359 struct
360 {
361 uint32_t AndMask; /**< @todo Move these to the per-CPU structure and fix the switchers. Saves a register! */
362 uint32_t OrMask;
363 } CR4;
364
365 /** The (more) portable CPUID level. */
366 uint8_t u8PortableCpuIdLevel;
367 /** Indicates that a state restore is pending.
368 * This is used to verify load order dependencies (PGM). */
369 bool fPendingRestore;
370 /** Whether MTRR reads report valid memory types for memory regions. */
371 bool fMtrrRead;
372 /** Whether the guest's writes to MTRRs are implemented. */
373 bool fMtrrWrite;
374
375 /** XSAVE/XRTOR components we can expose to the guest mask. */
376 uint64_t fXStateGuestMask;
377 /** XSAVE/XRSTOR host mask. Only state components in this mask can be exposed
378 * to the guest. This is 0 if no XSAVE/XRSTOR bits can be exposed. */
379 uint64_t fXStateHostMask;
380
381#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
382 /** The host MXCSR mask (determined at init). */
383 uint32_t fHostMxCsrMask;
384#else
385 uint32_t u32UnusedOnNonX86;
386#endif
387 uint8_t abPadding1[4];
388
389 /** Random value we store in the reserved RFLAGS bits we don't use ourselves so
390 * we can detect corruption. */
391 uint64_t fReservedRFlagsCookie;
392
393 /** Align to 64-byte boundary. */
394 uint8_t abPadding2[16];
395
396 /** Host CPU feature information.
397 * Externaly visible via the VM structure, aligned on 64-byte boundrary. */
398 CPUMFEATURES HostFeatures;
399 /** Guest CPU feature information.
400 * Externaly visible via that VM structure, aligned with HostFeatures. */
401 CPUMFEATURES GuestFeatures;
402 /** Guest CPU info. */
403 CPUMINFO GuestInfo;
404
405 /** The standard set of CpuId leaves. */
406 CPUMCPUID aGuestCpuIdPatmStd[6];
407 /** The extended set of CpuId leaves. */
408 CPUMCPUID aGuestCpuIdPatmExt[10];
409 /** The centaur set of CpuId leaves. */
410 CPUMCPUID aGuestCpuIdPatmCentaur[4];
411
412 /** @name MSR statistics.
413 * @{ */
414 STAMCOUNTER cMsrWrites;
415 STAMCOUNTER cMsrWritesToIgnoredBits;
416 STAMCOUNTER cMsrWritesRaiseGp;
417 STAMCOUNTER cMsrWritesUnknown;
418 STAMCOUNTER cMsrReads;
419 STAMCOUNTER cMsrReadsRaiseGp;
420 STAMCOUNTER cMsrReadsUnknown;
421 /** @} */
422} CPUM;
423#ifndef VBOX_FOR_DTRACE_LIB
424AssertCompileMemberOffset(CPUM, HostFeatures, 64);
425AssertCompileMemberOffset(CPUM, GuestFeatures, 112);
426#endif
427/** Pointer to the CPUM instance data residing in the shared VM structure. */
428typedef CPUM *PCPUM;
429
430/**
431 * CPUM Data (part of VMCPU)
432 */
433typedef struct CPUMCPU
434{
435 /** Guest context.
436 * Aligned on a 64-byte boundary. */
437 CPUMCTX Guest;
438 /** Guest context - misc MSRs
439 * Aligned on a 64-byte boundary. */
440 CPUMCTXMSRS GuestMsrs;
441
442 /** Nested VMX: VMX-preemption timer. */
443 TMTIMERHANDLE hNestedVmxPreemptTimer;
444
445 /** Use flags.
446 * These flags indicates both what is to be used and what has been used. */
447 uint32_t fUseFlags;
448
449 /** Changed flags.
450 * These flags indicates to REM (and others) which important guest
451 * registers which has been changed since last time the flags were cleared.
452 * See the CPUM_CHANGED_* defines for what we keep track of.
453 *
454 * @todo Obsolete, but will probably be refactored so keep it for reference. */
455 uint32_t fChanged;
456
457 /** Temporary storage for the return code of the function called in the
458 * 32-64 switcher. */
459 uint32_t u32RetCode;
460
461 /** Whether the X86_CPUID_FEATURE_EDX_APIC and X86_CPUID_AMD_FEATURE_EDX_APIC
462 * (?) bits are visible or not. (The APIC is responsible for setting this
463 * when loading state, so we won't save it.) */
464 bool fCpuIdApicFeatureVisible;
465
466 /** Align the next member on a 64-byte boundary. */
467 uint8_t abPadding2[64 - 8 - 4*3 - 1];
468
469 /** Saved host context. Only valid while inside RC or HM contexts.
470 * Must be aligned on a 64-byte boundary. */
471 CPUMHOSTCTX Host;
472 /** Old hypervisor context, only used for combined DRx values now.
473 * Must be aligned on a 64-byte boundary. */
474 CPUMHYPERCTX Hyper;
475
476#ifdef VBOX_WITH_CRASHDUMP_MAGIC
477 uint8_t aMagic[56];
478 uint64_t uMagic;
479#endif
480} CPUMCPU;
481#ifndef VBOX_FOR_DTRACE_LIB
482AssertCompileMemberAlignment(CPUMCPU, Host, 64);
483#endif
484/** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */
485typedef CPUMCPU *PCPUMCPU;
486
487#ifndef VBOX_FOR_DTRACE_LIB
488RT_C_DECLS_BEGIN
489
490PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf);
491PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit);
492PCPUMCPUIDLEAF cpumCpuIdGetLeafInt(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf);
493PCPUMCPUIDLEAF cpumCpuIdEnsureSpace(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t cLeaves);
494# ifdef VBOX_STRICT
495void cpumCpuIdAssertOrder(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves);
496# endif
497int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs,
498 PCPUMFEATURES pFeatures);
499
500# ifdef IN_RING3
501int cpumR3DbgInit(PVM pVM);
502int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs);
503void cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCFGMNODE pCpumCfg, PCVMXMSRS pHostVmxMsrs,
504 PVMXMSRS pGuestVmxMsrs);
505void cpumR3CpuIdRing3InitDone(PVM pVM);
506void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM);
507int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pGuestMsrs);
508int cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion);
509DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
510
511int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo);
512int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange);
513int cpumR3MsrReconcileWithCpuId(PVM pVM);
514int cpumR3MsrApplyFudge(PVM pVM);
515int cpumR3MsrRegStats(PVM pVM);
516int cpumR3MsrStrictInitChecks(void);
517PCPUMMSRRANGE cpumLookupMsrRange(PVM pVM, uint32_t idMsr);
518# endif
519
520# ifdef IN_RC
521DECLASM(int) cpumHandleLazyFPUAsm(PCPUMCPU pCPUM);
522# endif
523
524# ifdef IN_RING0
525DECLASM(int) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM);
526DECLASM(void) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM);
527# if ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
528DECLASM(void) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM);
529# endif
530# endif
531
532# if defined(IN_RC) || defined(IN_RING0)
533DECLASM(int) cpumRZSaveHostFPUState(PCPUMCPU pCPUM);
534DECLASM(void) cpumRZSaveGuestFpuState(PCPUMCPU pCPUM, bool fLeaveFpuAccessible);
535DECLASM(void) cpumRZSaveGuestSseRegisters(PCPUMCPU pCPUM);
536DECLASM(void) cpumRZSaveGuestAvxRegisters(PCPUMCPU pCPUM);
537# endif
538
539RT_C_DECLS_END
540#endif /* !VBOX_FOR_DTRACE_LIB */
541
542/** @} */
543
544#endif /* !VMM_INCLUDED_SRC_include_CPUMInternal_h */
545
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette