VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 105486

Last change on this file since 105486 was 105092, checked in by vboxsync, 5 months ago

VMM/CPUM: Fixed ancient bug in CPUMRecalcHyperDRx where it would try switch to hyper-mode when without needing to, upsetting VT-x DRx management. The fix is perhaps not 100% perfect, but it makes bs3-cpu-weird-1 work again. bugref:10715

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 99.7 KB
Line 
1/* $Id: CPUMAllRegs.cpp 105092 2024-07-02 08:55:52Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_CPUM
33#include <VBox/vmm/cpum.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/apic.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/hm.h>
41#include "CPUMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/err.h>
44#include <VBox/dis.h>
45#include <VBox/log.h>
46#include <VBox/vmm/hm.h>
47#include <VBox/vmm/tm.h>
48#include <iprt/assert.h>
49#include <iprt/asm.h>
50#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
51# include <iprt/asm-amd64-x86.h>
52#endif
53#ifdef IN_RING3
54# include <iprt/thread.h>
55#endif
56
57/** Disable stack frame pointer generation here. */
58#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
59# pragma optimize("y", off)
60#endif
61
62AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
63
64
65/*********************************************************************************************************************************
66* Defined Constants And Macros *
67*********************************************************************************************************************************/
68/**
69 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
70 *
71 * @returns Pointer to the Virtual CPU.
72 * @param a_pGuestCtx Pointer to the guest context.
73 */
74#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
75
76/**
77 * Lazily loads the hidden parts of a selector register when using raw-mode.
78 */
79#define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
80 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg))
81
82/** @def CPUM_INT_ASSERT_NOT_EXTRN
83 * Macro for asserting that @a a_fNotExtrn are present.
84 *
85 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
86 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
87 */
88#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
89 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
90 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
91
92
93VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
94{
95 pVCpu->cpum.s.Hyper.cr3 = cr3;
96}
97
98VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
99{
100 return pVCpu->cpum.s.Hyper.cr3;
101}
102
103
104/** @def MAYBE_LOAD_DRx
105 * Macro for updating DRx values in raw-mode and ring-0 contexts.
106 */
107#ifdef IN_RING0
108# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { \
109 if ((a_pVCpu)->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER)) \
110 a_fnLoad(a_uValue); \
111 } while (0)
112#else
113# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
114#endif
115
116static void cpumSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
117{
118 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
119 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
120}
121
122
123static void cpumSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
124{
125 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
126 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
127}
128
129
130static void cpumSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
131{
132 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
133 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
134}
135
136
137static void cpumSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
138{
139 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
140 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
141}
142
143
144VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
145{
146 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
147}
148
149
150VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
151{
152 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
153}
154
155
156VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
157{
158 return pVCpu->cpum.s.Hyper.dr[0];
159}
160
161
162VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
163{
164 return pVCpu->cpum.s.Hyper.dr[1];
165}
166
167
168VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
169{
170 return pVCpu->cpum.s.Hyper.dr[2];
171}
172
173
174VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
175{
176 return pVCpu->cpum.s.Hyper.dr[3];
177}
178
179
180VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
181{
182 return pVCpu->cpum.s.Hyper.dr[6];
183}
184
185
186VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
187{
188 return pVCpu->cpum.s.Hyper.dr[7];
189}
190
191
192/**
193 * Checks that the special cookie stored in unused reserved RFLAGS bits
194 *
195 * @retval true if cookie is ok.
196 * @retval false if cookie is not ok.
197 * @param pVM The cross context VM structure.
198 * @param pVCpu The cross context virtual CPU structure.
199 */
200VMM_INT_DECL(bool) CPUMAssertGuestRFlagsCookie(PVM pVM, PVMCPU pVCpu)
201{
202 AssertLogRelMsgReturn( ( pVCpu->cpum.s.Guest.rflags.uBoth
203 & ~(uint64_t)(CPUMX86EFLAGS_HW_MASK_64 | CPUMX86EFLAGS_INT_MASK_64))
204 == pVM->cpum.s.fReservedRFlagsCookie
205 && (pVCpu->cpum.s.Guest.rflags.uBoth & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK
206 && (pVCpu->cpum.s.Guest.rflags.uBoth & X86_EFL_RAZ_MASK & CPUMX86EFLAGS_HW_MASK_64) == 0,
207 ("rflags=%#RX64 vs fReservedRFlagsCookie=%#RX64\n",
208 pVCpu->cpum.s.Guest.rflags.uBoth, pVM->cpum.s.fReservedRFlagsCookie),
209 false);
210 return true;
211}
212
213
214/**
215 * Queries the pointer to the internal CPUMCTX structure.
216 *
217 * @returns The CPUMCTX pointer.
218 * @param pVCpu The cross context virtual CPU structure.
219 */
220VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
221{
222 return &pVCpu->cpum.s.Guest;
223}
224
225
226/**
227 * Queries the pointer to the internal CPUMCTXMSRS structure.
228 *
229 * This is for NEM only.
230 *
231 * @returns The CPUMCTX pointer.
232 * @param pVCpu The cross context virtual CPU structure.
233 */
234VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
235{
236 return &pVCpu->cpum.s.GuestMsrs;
237}
238
239
240VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
241{
242 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
243 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
244 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
245 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
246 return VINF_SUCCESS; /* formality, consider it void. */
247}
248
249
250VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
251{
252 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
253 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
254 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
255 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
256 return VINF_SUCCESS; /* formality, consider it void. */
257}
258
259
260VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
261{
262 pVCpu->cpum.s.Guest.tr.Sel = tr;
263 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
264 return VINF_SUCCESS; /* formality, consider it void. */
265}
266
267
268VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
269{
270 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
271 /* The caller will set more hidden bits if it has them. */
272 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
273 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
274 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
275 return VINF_SUCCESS; /* formality, consider it void. */
276}
277
278
279/**
280 * Set the guest CR0.
281 *
282 * When called in GC, the hyper CR0 may be updated if that is
283 * required. The caller only has to take special action if AM,
284 * WP, PG or PE changes.
285 *
286 * @returns VINF_SUCCESS (consider it void).
287 * @param pVCpu The cross context virtual CPU structure.
288 * @param cr0 The new CR0 value.
289 */
290VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0)
291{
292 /*
293 * Check for changes causing TLB flushes (for REM).
294 * The caller is responsible for calling PGM when appropriate.
295 */
296 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
297 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
298 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
299 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
300
301 /*
302 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
303 */
304 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
305 PGMCr0WpEnabled(pVCpu);
306
307 /* The ET flag is settable on a 386 and hardwired on 486+. */
308 if ( !(cr0 & X86_CR0_ET)
309 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
310 cr0 |= X86_CR0_ET;
311
312 pVCpu->cpum.s.Guest.cr0 = cr0;
313 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
314 return VINF_SUCCESS;
315}
316
317
318VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
319{
320 pVCpu->cpum.s.Guest.cr2 = cr2;
321 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
322 return VINF_SUCCESS;
323}
324
325
326VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
327{
328 pVCpu->cpum.s.Guest.cr3 = cr3;
329 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
330 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
331 return VINF_SUCCESS;
332}
333
334
335VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
336{
337 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
338
339 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
340 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
341 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
342
343 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
344 pVCpu->cpum.s.Guest.cr4 = cr4;
345 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
346 return VINF_SUCCESS;
347}
348
349
350VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
351{
352 pVCpu->cpum.s.Guest.eflags.u = eflags;
353 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
354 return VINF_SUCCESS;
355}
356
357
358VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
359{
360 pVCpu->cpum.s.Guest.eip = eip;
361 return VINF_SUCCESS;
362}
363
364
365VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
366{
367 pVCpu->cpum.s.Guest.eax = eax;
368 return VINF_SUCCESS;
369}
370
371
372VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
373{
374 pVCpu->cpum.s.Guest.ebx = ebx;
375 return VINF_SUCCESS;
376}
377
378
379VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
380{
381 pVCpu->cpum.s.Guest.ecx = ecx;
382 return VINF_SUCCESS;
383}
384
385
386VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
387{
388 pVCpu->cpum.s.Guest.edx = edx;
389 return VINF_SUCCESS;
390}
391
392
393VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
394{
395 pVCpu->cpum.s.Guest.esp = esp;
396 return VINF_SUCCESS;
397}
398
399
400VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
401{
402 pVCpu->cpum.s.Guest.ebp = ebp;
403 return VINF_SUCCESS;
404}
405
406
407VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
408{
409 pVCpu->cpum.s.Guest.esi = esi;
410 return VINF_SUCCESS;
411}
412
413
414VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
415{
416 pVCpu->cpum.s.Guest.edi = edi;
417 return VINF_SUCCESS;
418}
419
420
421VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
422{
423 pVCpu->cpum.s.Guest.ss.Sel = ss;
424 return VINF_SUCCESS;
425}
426
427
428VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
429{
430 pVCpu->cpum.s.Guest.cs.Sel = cs;
431 return VINF_SUCCESS;
432}
433
434
435VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
436{
437 pVCpu->cpum.s.Guest.ds.Sel = ds;
438 return VINF_SUCCESS;
439}
440
441
442VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
443{
444 pVCpu->cpum.s.Guest.es.Sel = es;
445 return VINF_SUCCESS;
446}
447
448
449VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
450{
451 pVCpu->cpum.s.Guest.fs.Sel = fs;
452 return VINF_SUCCESS;
453}
454
455
456VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
457{
458 pVCpu->cpum.s.Guest.gs.Sel = gs;
459 return VINF_SUCCESS;
460}
461
462
463VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
464{
465 pVCpu->cpum.s.Guest.msrEFER = val;
466 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
467}
468
469
470VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
471{
472 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
473 if (pcbLimit)
474 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
475 return pVCpu->cpum.s.Guest.idtr.pIdt;
476}
477
478
479VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
480{
481 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
482 if (pHidden)
483 *pHidden = pVCpu->cpum.s.Guest.tr;
484 return pVCpu->cpum.s.Guest.tr.Sel;
485}
486
487
488VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
489{
490 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
491 return pVCpu->cpum.s.Guest.cs.Sel;
492}
493
494
495VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
496{
497 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
498 return pVCpu->cpum.s.Guest.ds.Sel;
499}
500
501
502VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
503{
504 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
505 return pVCpu->cpum.s.Guest.es.Sel;
506}
507
508
509VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
510{
511 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
512 return pVCpu->cpum.s.Guest.fs.Sel;
513}
514
515
516VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
517{
518 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
519 return pVCpu->cpum.s.Guest.gs.Sel;
520}
521
522
523VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
524{
525 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
526 return pVCpu->cpum.s.Guest.ss.Sel;
527}
528
529
530VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
531{
532 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
533 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
534 if ( !CPUMIsGuestInLongMode(pVCpu)
535 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
536 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
537 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
538}
539
540
541VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
542{
543 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
544 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
545 if ( !CPUMIsGuestInLongMode(pVCpu)
546 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
547 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
548 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
549}
550
551
552VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
553{
554 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
555 return pVCpu->cpum.s.Guest.ldtr.Sel;
556}
557
558
559VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
560{
561 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
562 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
563 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
564 return pVCpu->cpum.s.Guest.ldtr.Sel;
565}
566
567
568VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
569{
570 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
571 return pVCpu->cpum.s.Guest.cr0;
572}
573
574
575VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
576{
577 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
578 return pVCpu->cpum.s.Guest.cr2;
579}
580
581
582VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
583{
584 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
585 return pVCpu->cpum.s.Guest.cr3;
586}
587
588
589VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
590{
591 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
592 return pVCpu->cpum.s.Guest.cr4;
593}
594
595
596VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu)
597{
598 uint64_t u64;
599 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
600 if (RT_FAILURE(rc))
601 u64 = 0;
602 return u64;
603}
604
605
606VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
607{
608 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
609 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
610}
611
612
613VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
614{
615 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
616 return pVCpu->cpum.s.Guest.eip;
617}
618
619
620VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
621{
622 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
623 return pVCpu->cpum.s.Guest.rip;
624}
625
626
627VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
628{
629 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
630 return pVCpu->cpum.s.Guest.eax;
631}
632
633
634VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
635{
636 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
637 return pVCpu->cpum.s.Guest.ebx;
638}
639
640
641VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
642{
643 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
644 return pVCpu->cpum.s.Guest.ecx;
645}
646
647
648VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
649{
650 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
651 return pVCpu->cpum.s.Guest.edx;
652}
653
654
655VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
656{
657 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
658 return pVCpu->cpum.s.Guest.esi;
659}
660
661
662VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
663{
664 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
665 return pVCpu->cpum.s.Guest.edi;
666}
667
668
669VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
670{
671 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
672 return pVCpu->cpum.s.Guest.esp;
673}
674
675
676VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
677{
678 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
679 return pVCpu->cpum.s.Guest.ebp;
680}
681
682
683VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
684{
685 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
686 return pVCpu->cpum.s.Guest.eflags.u;
687}
688
689
690VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue)
691{
692 switch (iReg)
693 {
694 case DISCREG_CR0:
695 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
696 *pValue = pVCpu->cpum.s.Guest.cr0;
697 break;
698
699 case DISCREG_CR2:
700 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
701 *pValue = pVCpu->cpum.s.Guest.cr2;
702 break;
703
704 case DISCREG_CR3:
705 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
706 *pValue = pVCpu->cpum.s.Guest.cr3;
707 break;
708
709 case DISCREG_CR4:
710 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
711 *pValue = pVCpu->cpum.s.Guest.cr4;
712 break;
713
714 case DISCREG_CR8:
715 {
716 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
717 uint8_t u8Tpr;
718 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
719 if (RT_FAILURE(rc))
720 {
721 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
722 *pValue = 0;
723 return rc;
724 }
725 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
726 break;
727 }
728
729 default:
730 return VERR_INVALID_PARAMETER;
731 }
732 return VINF_SUCCESS;
733}
734
735
736VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
737{
738 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
739 return pVCpu->cpum.s.Guest.dr[0];
740}
741
742
743VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
744{
745 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
746 return pVCpu->cpum.s.Guest.dr[1];
747}
748
749
750VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
751{
752 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
753 return pVCpu->cpum.s.Guest.dr[2];
754}
755
756
757VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
758{
759 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
760 return pVCpu->cpum.s.Guest.dr[3];
761}
762
763
764VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
765{
766 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
767 return pVCpu->cpum.s.Guest.dr[6];
768}
769
770
771VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
772{
773 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
774 return pVCpu->cpum.s.Guest.dr[7];
775}
776
777
778VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
779{
780 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
781 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
782 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
783 if (iReg == 4 || iReg == 5)
784 iReg += 2;
785 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
791{
792 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
793 return pVCpu->cpum.s.Guest.msrEFER;
794}
795
796
797/**
798 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
799 *
800 * @returns Pointer to the leaf if found, NULL if not.
801 *
802 * @param pVM The cross context VM structure.
803 * @param uLeaf The leaf to get.
804 */
805PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
806{
807 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
808 if (iEnd)
809 {
810 unsigned iStart = 0;
811 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
812 for (;;)
813 {
814 unsigned i = iStart + (iEnd - iStart) / 2U;
815 if (uLeaf < paLeaves[i].uLeaf)
816 {
817 if (i <= iStart)
818 return NULL;
819 iEnd = i;
820 }
821 else if (uLeaf > paLeaves[i].uLeaf)
822 {
823 i += 1;
824 if (i >= iEnd)
825 return NULL;
826 iStart = i;
827 }
828 else
829 {
830 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
831 return &paLeaves[i];
832
833 /* This shouldn't normally happen. But in case the it does due
834 to user configuration overrids or something, just return the
835 first sub-leaf. */
836 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
837 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
838 while ( paLeaves[i].uSubLeaf != 0
839 && i > 0
840 && uLeaf == paLeaves[i - 1].uLeaf)
841 i--;
842 return &paLeaves[i];
843 }
844 }
845 }
846
847 return NULL;
848}
849
850
851/**
852 * Looks up a CPUID leaf in the CPUID leaf array.
853 *
854 * @returns Pointer to the leaf if found, NULL if not.
855 *
856 * @param pVM The cross context VM structure.
857 * @param uLeaf The leaf to get.
858 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
859 * isn't.
860 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
861 */
862PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
863{
864 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
865 if (iEnd)
866 {
867 unsigned iStart = 0;
868 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
869 for (;;)
870 {
871 unsigned i = iStart + (iEnd - iStart) / 2U;
872 if (uLeaf < paLeaves[i].uLeaf)
873 {
874 if (i <= iStart)
875 return NULL;
876 iEnd = i;
877 }
878 else if (uLeaf > paLeaves[i].uLeaf)
879 {
880 i += 1;
881 if (i >= iEnd)
882 return NULL;
883 iStart = i;
884 }
885 else
886 {
887 uSubLeaf &= paLeaves[i].fSubLeafMask;
888 if (uSubLeaf == paLeaves[i].uSubLeaf)
889 *pfExactSubLeafHit = true;
890 else
891 {
892 /* Find the right subleaf. We return the last one before
893 uSubLeaf if we don't find an exact match. */
894 if (uSubLeaf < paLeaves[i].uSubLeaf)
895 while ( i > 0
896 && uLeaf == paLeaves[i - 1].uLeaf
897 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
898 i--;
899 else
900 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
901 && uLeaf == paLeaves[i + 1].uLeaf
902 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
903 i++;
904 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
905 }
906 return &paLeaves[i];
907 }
908 }
909 }
910
911 *pfExactSubLeafHit = false;
912 return NULL;
913}
914
915
916/**
917 * Gets a CPUID leaf.
918 *
919 * @param pVCpu The cross context virtual CPU structure.
920 * @param uLeaf The CPUID leaf to get.
921 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
922 * @param f64BitMode A tristate indicate if the caller is in 64-bit mode or
923 * not: 1=true, 0=false, 1=whatever. This affect how the
924 * X86_CPUID_EXT_FEATURE_EDX_SYSCALL flag is returned on
925 * Intel CPUs, where it's only returned in 64-bit mode.
926 * @param pEax Where to store the EAX value.
927 * @param pEbx Where to store the EBX value.
928 * @param pEcx Where to store the ECX value.
929 * @param pEdx Where to store the EDX value.
930 */
931VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t uLeaf, uint32_t uSubLeaf, int f64BitMode,
932 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
933{
934 bool fExactSubLeafHit;
935 PVM pVM = pVCpu->CTX_SUFF(pVM);
936 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
937 if (pLeaf)
938 {
939 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
940 if (fExactSubLeafHit)
941 {
942 *pEax = pLeaf->uEax;
943 *pEbx = pLeaf->uEbx;
944 *pEcx = pLeaf->uEcx;
945 *pEdx = pLeaf->uEdx;
946
947 /*
948 * Deal with CPU specific information.
949 */
950 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
951 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
952 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
953 {
954 if (uLeaf == 1)
955 {
956 /* EBX: Bits 31-24: Initial APIC ID. */
957 Assert(pVCpu->idCpu <= 255);
958 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
959 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
960
961 /* EDX: Bit 9: AND with APICBASE.EN. */
962 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
963 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
964
965 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
966 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
967 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
968 }
969 else if (uLeaf == 0xb)
970 {
971 /* EDX: Initial extended APIC ID. */
972 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
973 *pEdx = pVCpu->idCpu;
974 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
975 }
976 else if (uLeaf == UINT32_C(0x8000001e))
977 {
978 /* EAX: Initial extended APIC ID. */
979 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
980 *pEax = pVCpu->idCpu;
981 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
982 }
983 else if (uLeaf == UINT32_C(0x80000001))
984 {
985 /* EDX: Bit 9: AND with APICBASE.EN. */
986 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
987 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
988 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
989 }
990 else
991 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
992 }
993
994 /* Intel CPUs supresses the SYSCALL bit when not executing in 64-bit mode: */
995 if ( uLeaf == UINT32_C(0x80000001)
996 && f64BitMode == false
997 && (*pEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
998 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
999 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA /*?*/
1000 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_SHANGHAI /*?*/ ) )
1001 *pEdx &= ~X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1002
1003 }
1004 /*
1005 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1006 * them here, but we do the best we can here...
1007 */
1008 else
1009 {
1010 *pEax = *pEbx = *pEcx = *pEdx = 0;
1011 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1012 {
1013 *pEcx = uSubLeaf & 0xff;
1014 *pEdx = pVCpu->idCpu;
1015 }
1016 }
1017 }
1018 else
1019 {
1020 /*
1021 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1022 */
1023 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1024 {
1025 default:
1026 AssertFailed();
1027 RT_FALL_THRU();
1028 case CPUMUNKNOWNCPUID_DEFAULTS:
1029 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1030 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1031 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1032 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1033 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1034 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1035 break;
1036 case CPUMUNKNOWNCPUID_PASSTHRU:
1037 *pEax = uLeaf;
1038 *pEbx = 0;
1039 *pEcx = uSubLeaf;
1040 *pEdx = 0;
1041 break;
1042 }
1043 }
1044 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1045}
1046
1047
1048/**
1049 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1050 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1051 *
1052 * @returns Previous value.
1053 * @param pVCpu The cross context virtual CPU structure to make the
1054 * change on. Usually the calling EMT.
1055 * @param fVisible Whether to make it visible (true) or hide it (false).
1056 *
1057 * @remarks This is "VMMDECL" so that it still links with
1058 * the old APIC code which is in VBoxDD2 and not in
1059 * the VMM module.
1060 */
1061VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1062{
1063 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1064 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1065 return fOld;
1066}
1067
1068
1069/**
1070 * Gets the host CPU vendor.
1071 *
1072 * @returns CPU vendor.
1073 * @param pVM The cross context VM structure.
1074 */
1075VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1076{
1077 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1078}
1079
1080
1081/**
1082 * Gets the host CPU microarchitecture.
1083 *
1084 * @returns CPU microarchitecture.
1085 * @param pVM The cross context VM structure.
1086 */
1087VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
1088{
1089 return pVM->cpum.s.HostFeatures.enmMicroarch;
1090}
1091
1092
1093/**
1094 * Gets the guest CPU vendor.
1095 *
1096 * @returns CPU vendor.
1097 * @param pVM The cross context VM structure.
1098 */
1099VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1100{
1101 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1102}
1103
1104
1105/**
1106 * Gets the guest CPU architecture.
1107 *
1108 * @returns CPU architecture.
1109 * @param pVM The cross context VM structure.
1110 */
1111VMMDECL(CPUMARCH) CPUMGetGuestArch(PCVM pVM)
1112{
1113 RT_NOREF(pVM);
1114 return kCpumArch_X86; /* Static as we are in the x86 VMM module here. */
1115}
1116
1117
1118/**
1119 * Gets the guest CPU microarchitecture.
1120 *
1121 * @returns CPU microarchitecture.
1122 * @param pVM The cross context VM structure.
1123 */
1124VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
1125{
1126 return pVM->cpum.s.GuestFeatures.enmMicroarch;
1127}
1128
1129
1130/**
1131 * Gets the maximum number of physical and linear address bits supported by the
1132 * guest.
1133 *
1134 * @param pVM The cross context VM structure.
1135 * @param pcPhysAddrWidth Where to store the physical address width.
1136 * @param pcLinearAddrWidth Where to store the linear address width.
1137 */
1138VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth)
1139{
1140 AssertPtr(pVM);
1141 AssertReturnVoid(pcPhysAddrWidth);
1142 AssertReturnVoid(pcLinearAddrWidth);
1143 *pcPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
1144 *pcLinearAddrWidth = pVM->cpum.s.GuestFeatures.cMaxLinearAddrWidth;
1145}
1146
1147
1148VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0)
1149{
1150 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1151 return CPUMRecalcHyperDRx(pVCpu, 0);
1152}
1153
1154
1155VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1)
1156{
1157 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1158 return CPUMRecalcHyperDRx(pVCpu, 1);
1159}
1160
1161
1162VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2)
1163{
1164 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1165 return CPUMRecalcHyperDRx(pVCpu, 2);
1166}
1167
1168
1169VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3)
1170{
1171 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1172 return CPUMRecalcHyperDRx(pVCpu, 3);
1173}
1174
1175
1176VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1177{
1178 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1179 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
1180 return VINF_SUCCESS; /* No need to recalc. */
1181}
1182
1183
1184VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7)
1185{
1186 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1187 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
1188 return CPUMRecalcHyperDRx(pVCpu, 7);
1189}
1190
1191
1192VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value)
1193{
1194 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1195 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1196 if (iReg == 4 || iReg == 5)
1197 iReg += 2;
1198 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1199 return CPUMRecalcHyperDRx(pVCpu, iReg);
1200}
1201
1202
1203/**
1204 * Recalculates the hypervisor DRx register values based on current guest
1205 * registers and DBGF breakpoints, updating changed registers depending on the
1206 * context.
1207 *
1208 * This is called whenever a guest DRx register is modified (any context) and
1209 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1210 *
1211 * In raw-mode context this function will reload any (hyper) DRx registers which
1212 * comes out with a different value. It may also have to save the host debug
1213 * registers if that haven't been done already. In this context though, we'll
1214 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1215 * are only important when breakpoints are actually enabled.
1216 *
1217 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1218 * reloaded by the HM code if it changes. Further more, we will only use the
1219 * combined register set when the VBox debugger is actually using hardware BPs,
1220 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1221 * concern us here).
1222 *
1223 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1224 * all the time.
1225 *
1226 * @returns VINF_SUCCESS.
1227 * @param pVCpu The cross context virtual CPU structure.
1228 * @param iGstReg The guest debug register number that was modified.
1229 * UINT8_MAX if not guest register.
1230 */
1231VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg)
1232{
1233 PVM pVM = pVCpu->CTX_SUFF(pVM);
1234#ifndef IN_RING0
1235 RT_NOREF_PV(iGstReg);
1236#endif
1237
1238 /*
1239 * Compare the DR7s first.
1240 *
1241 * We only care about the enabled flags. GD is virtualized when we
1242 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1243 * always have the LE and GE bits set, so no need to check and disable
1244 * stuff if they're cleared like we have to for the guest DR7.
1245 */
1246 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1247 if (uDbgfDr7 & X86_DR7_ENABLED_MASK)
1248 {
1249 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1250
1251 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1252 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1253 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1254 uGstDr7 = 0;
1255 else if (!(uGstDr7 & X86_DR7_LE))
1256 uGstDr7 &= ~X86_DR7_LE_ALL;
1257 else if (!(uGstDr7 & X86_DR7_GE))
1258 uGstDr7 &= ~X86_DR7_GE_ALL;
1259
1260 /*
1261 * Ok, something is enabled. Recalc each of the breakpoints, taking
1262 * the VM debugger ones of the guest ones. In raw-mode context we will
1263 * not allow breakpoints with values inside the hypervisor area.
1264 */
1265 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1266
1267 /* bp 0 */
1268 RTGCUINTREG uNewDr0;
1269 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1270 {
1271 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1272 uNewDr0 = DBGFBpGetDR0(pVM);
1273 }
1274 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1275 {
1276 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1277 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1278 }
1279 else
1280 uNewDr0 = 0;
1281
1282 /* bp 1 */
1283 RTGCUINTREG uNewDr1;
1284 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1285 {
1286 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1287 uNewDr1 = DBGFBpGetDR1(pVM);
1288 }
1289 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1290 {
1291 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1292 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1293 }
1294 else
1295 uNewDr1 = 0;
1296
1297 /* bp 2 */
1298 RTGCUINTREG uNewDr2;
1299 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1300 {
1301 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1302 uNewDr2 = DBGFBpGetDR2(pVM);
1303 }
1304 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1305 {
1306 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1307 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1308 }
1309 else
1310 uNewDr2 = 0;
1311
1312 /* bp 3 */
1313 RTGCUINTREG uNewDr3;
1314 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1315 {
1316 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1317 uNewDr3 = DBGFBpGetDR3(pVM);
1318 }
1319 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1320 {
1321 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1322 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1323 }
1324 else
1325 uNewDr3 = 0;
1326
1327 /*
1328 * Apply the updates.
1329 */
1330 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1331 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1332 cpumSetHyperDR3(pVCpu, uNewDr3);
1333 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1334 cpumSetHyperDR2(pVCpu, uNewDr2);
1335 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1336 cpumSetHyperDR1(pVCpu, uNewDr1);
1337 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1338 cpumSetHyperDR0(pVCpu, uNewDr0);
1339 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1340 CPUMSetHyperDR7(pVCpu, uNewDr7);
1341 }
1342#ifdef IN_RING0
1343 else if (CPUMIsGuestDebugStateActive(pVCpu))
1344 {
1345 /*
1346 * Reload the register that was modified. Normally this won't happen
1347 * as we won't intercept DRx writes when not having the hyper debug
1348 * state loaded, but in case we do for some reason we'll simply deal
1349 * with it.
1350 */
1351 switch (iGstReg)
1352 {
1353 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1354 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1355 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1356 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1357 default:
1358 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1359 }
1360 }
1361#endif
1362 else
1363 {
1364 /*
1365 * No active debug state any more. In raw-mode this means we have to
1366 * make sure DR7 has everything disabled now, if we armed it already.
1367 * In ring-0 we might end up here when just single stepping.
1368 */
1369#ifdef IN_RING0
1370 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1371 {
1372 if (pVCpu->cpum.s.Hyper.dr[0])
1373 ASMSetDR0(0);
1374 if (pVCpu->cpum.s.Hyper.dr[1])
1375 ASMSetDR1(0);
1376 if (pVCpu->cpum.s.Hyper.dr[2])
1377 ASMSetDR2(0);
1378 if (pVCpu->cpum.s.Hyper.dr[3])
1379 ASMSetDR3(0);
1380 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1381 }
1382#endif
1383 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1384
1385 /* Clear all the registers. */
1386 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1387 pVCpu->cpum.s.Hyper.dr[3] = 0;
1388 pVCpu->cpum.s.Hyper.dr[2] = 0;
1389 pVCpu->cpum.s.Hyper.dr[1] = 0;
1390 pVCpu->cpum.s.Hyper.dr[0] = 0;
1391
1392 }
1393 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1394 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1395 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1396 pVCpu->cpum.s.Hyper.dr[7]));
1397
1398 return VINF_SUCCESS;
1399}
1400
1401
1402/**
1403 * Set the guest XCR0 register.
1404 *
1405 * Will load additional state if the FPU state is already loaded (in ring-0 &
1406 * raw-mode context).
1407 *
1408 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1409 * value.
1410 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1411 * @param uNewValue The new value.
1412 * @thread EMT(pVCpu)
1413 */
1414VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue)
1415{
1416 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
1417 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1418 /* The X87 bit cannot be cleared. */
1419 && (uNewValue & XSAVE_C_X87)
1420 /* AVX requires SSE. */
1421 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1422 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1423 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1424 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1425 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1426 )
1427 {
1428 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1429
1430 /* If more state components are enabled, we need to take care to load
1431 them if the FPU/SSE state is already loaded. May otherwise leak
1432 host state to the guest. */
1433 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1434 if (fNewComponents)
1435 {
1436#ifdef IN_RING0
1437 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1438 {
1439 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1440 /* Adding more components. */
1441 ASMXRstor(&pVCpu->cpum.s.Guest.XState, fNewComponents);
1442 else
1443 {
1444 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1445 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1446 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1447 ASMXRstor(&pVCpu->cpum.s.Guest.XState, uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1448 }
1449 }
1450#endif
1451 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1452 }
1453 return VINF_SUCCESS;
1454 }
1455 return VERR_CPUM_RAISE_GP_0;
1456}
1457
1458
1459/**
1460 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1461 *
1462 * @returns true if in real mode, otherwise false.
1463 * @param pVCpu The cross context virtual CPU structure.
1464 */
1465VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
1466{
1467 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1468 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1469}
1470
1471
1472/**
1473 * Tests if the guest has the Page Size Extension enabled (PSE).
1474 *
1475 * @returns true if in real mode, otherwise false.
1476 * @param pVCpu The cross context virtual CPU structure.
1477 */
1478VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
1479{
1480 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1481 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1482 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1483}
1484
1485
1486/**
1487 * Tests if the guest has the paging enabled (PG).
1488 *
1489 * @returns true if in real mode, otherwise false.
1490 * @param pVCpu The cross context virtual CPU structure.
1491 */
1492VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
1493{
1494 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1495 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1496}
1497
1498
1499/**
1500 * Tests if the guest has the paging enabled (PG).
1501 *
1502 * @returns true if in real mode, otherwise false.
1503 * @param pVCpu The cross context virtual CPU structure.
1504 */
1505VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
1506{
1507 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1508 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1509}
1510
1511
1512/**
1513 * Tests if the guest is running in real mode or not.
1514 *
1515 * @returns true if in real mode, otherwise false.
1516 * @param pVCpu The cross context virtual CPU structure.
1517 */
1518VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
1519{
1520 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1521 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1522}
1523
1524
1525/**
1526 * Tests if the guest is running in real or virtual 8086 mode.
1527 *
1528 * @returns @c true if it is, @c false if not.
1529 * @param pVCpu The cross context virtual CPU structure.
1530 */
1531VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
1532{
1533 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
1534 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1535 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1536}
1537
1538
1539/**
1540 * Tests if the guest is running in protected or not.
1541 *
1542 * @returns true if in protected mode, otherwise false.
1543 * @param pVCpu The cross context virtual CPU structure.
1544 */
1545VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
1546{
1547 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1548 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1549}
1550
1551
1552/**
1553 * Tests if the guest is running in paged protected or not.
1554 *
1555 * @returns true if in paged protected mode, otherwise false.
1556 * @param pVCpu The cross context virtual CPU structure.
1557 */
1558VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
1559{
1560 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1561 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1562}
1563
1564
1565/**
1566 * Tests if the guest is running in long mode or not.
1567 *
1568 * @returns true if in long mode, otherwise false.
1569 * @param pVCpu The cross context virtual CPU structure.
1570 */
1571VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
1572{
1573 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1574 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1575}
1576
1577
1578/**
1579 * Tests if the guest is running in PAE mode or not.
1580 *
1581 * @returns true if in PAE mode, otherwise false.
1582 * @param pVCpu The cross context virtual CPU structure.
1583 */
1584VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
1585{
1586 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1587 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1588 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1589 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1590 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1591 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1592}
1593
1594
1595/**
1596 * Tests if the guest is running in 64 bits mode or not.
1597 *
1598 * @returns true if in 64 bits protected mode, otherwise false.
1599 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1600 */
1601VMMDECL(bool) CPUMIsGuestIn64BitCode(PCVMCPU pVCpu)
1602{
1603 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
1604 if (!CPUMIsGuestInLongMode(pVCpu))
1605 return false;
1606 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1607 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1608}
1609
1610
1611/**
1612 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1613 * registers.
1614 *
1615 * @returns true if in 64 bits protected mode, otherwise false.
1616 * @param pCtx Pointer to the current guest CPU context.
1617 */
1618VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCCPUMCTX pCtx)
1619{
1620 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1621}
1622
1623
1624/**
1625 * Sets the specified changed flags (CPUM_CHANGED_*).
1626 *
1627 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1628 * @param fChangedAdd The changed flags to add.
1629 */
1630VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
1631{
1632 pVCpu->cpum.s.fChanged |= fChangedAdd;
1633}
1634
1635
1636/**
1637 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
1638 *
1639 * @returns true if supported.
1640 * @returns false if not supported.
1641 * @param pVM The cross context VM structure.
1642 */
1643VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
1644{
1645 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
1646}
1647
1648
1649/**
1650 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1651 * @returns true if used.
1652 * @returns false if not used.
1653 * @param pVM The cross context VM structure.
1654 */
1655VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1656{
1657 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
1658}
1659
1660
1661/**
1662 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1663 * @returns true if used.
1664 * @returns false if not used.
1665 * @param pVM The cross context VM structure.
1666 */
1667VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1668{
1669 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
1670}
1671
1672
1673/**
1674 * Checks if we activated the FPU/XMM state of the guest OS.
1675 *
1676 * Obsolete: This differs from CPUMIsGuestFPUStateLoaded() in that it refers to
1677 * the next time we'll be executing guest code, so it may return true for
1678 * 64-on-32 when we still haven't actually loaded the FPU status, just scheduled
1679 * it to be loaded the next time we go thru the world switcher
1680 * (CPUM_SYNC_FPU_STATE).
1681 *
1682 * @returns true / false.
1683 * @param pVCpu The cross context virtual CPU structure.
1684 */
1685VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
1686{
1687 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1688 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1689 return fRet;
1690}
1691
1692
1693/**
1694 * Checks if we've really loaded the FPU/XMM state of the guest OS.
1695 *
1696 * @returns true / false.
1697 * @param pVCpu The cross context virtual CPU structure.
1698 */
1699VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
1700{
1701 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1702 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1703 return fRet;
1704}
1705
1706
1707/**
1708 * Checks if we saved the FPU/XMM state of the host OS.
1709 *
1710 * @returns true / false.
1711 * @param pVCpu The cross context virtual CPU structure.
1712 */
1713VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
1714{
1715 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
1716}
1717
1718
1719/**
1720 * Checks if the guest debug state is active.
1721 *
1722 * @returns boolean
1723 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1724 */
1725VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
1726{
1727 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
1728}
1729
1730
1731/**
1732 * Checks if the hyper debug state is active.
1733 *
1734 * @returns boolean
1735 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1736 */
1737VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
1738{
1739 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
1740}
1741
1742
1743/**
1744 * Mark the guest's debug state as inactive.
1745 *
1746 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1747 * @todo This API doesn't make sense any more.
1748 */
1749VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
1750{
1751 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
1752 NOREF(pVCpu);
1753}
1754
1755
1756/**
1757 * Get the current privilege level of the guest.
1758 *
1759 * @returns CPL
1760 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1761 */
1762VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
1763{
1764 /*
1765 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
1766 *
1767 * Note! We used to check CS.DPL here, assuming it was always equal to
1768 * CPL even if a conforming segment was loaded. But this turned out to
1769 * only apply to older AMD-V. With VT-x we had an ACP2 regression
1770 * during install after a far call to ring 2 with VT-x. Then on newer
1771 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
1772 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
1773 *
1774 * So, forget CS.DPL, always use SS.DPL.
1775 *
1776 * Note! The SS RPL is always equal to the CPL, while the CS RPL
1777 * isn't necessarily equal if the segment is conforming.
1778 * See section 4.11.1 in the AMD manual.
1779 *
1780 * Update: Where the heck does it say CS.RPL can differ from CPL other than
1781 * right after real->prot mode switch and when in V8086 mode? That
1782 * section says the RPL specified in a direct transfere (call, jmp,
1783 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
1784 * it would be impossible for an exception handle or the iret
1785 * instruction to figure out whether SS:ESP are part of the frame
1786 * or not. VBox or qemu bug must've lead to this misconception.
1787 *
1788 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
1789 * selector into SS with an RPL other than the CPL when CPL != 3 and
1790 * we're in 64-bit mode. The intel dev box doesn't allow this, on
1791 * RPL = CPL. Weird.
1792 */
1793 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
1794 uint32_t uCpl;
1795 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1796 {
1797 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1798 {
1799 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
1800 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
1801 else
1802 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
1803 }
1804 else
1805 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
1806 }
1807 else
1808 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
1809 return uCpl;
1810}
1811
1812
1813/**
1814 * Gets the current guest CPU mode.
1815 *
1816 * If paging mode is what you need, check out PGMGetGuestMode().
1817 *
1818 * @returns The CPU mode.
1819 * @param pVCpu The cross context virtual CPU structure.
1820 */
1821VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
1822{
1823 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1824 CPUMMODE enmMode;
1825 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1826 enmMode = CPUMMODE_REAL;
1827 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1828 enmMode = CPUMMODE_PROTECTED;
1829 else
1830 enmMode = CPUMMODE_LONG;
1831
1832 return enmMode;
1833}
1834
1835
1836/**
1837 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
1838 *
1839 * @returns 16, 32 or 64.
1840 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1841 */
1842VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
1843{
1844 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1845
1846 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1847 return 16;
1848
1849 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1850 {
1851 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1852 return 16;
1853 }
1854
1855 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1856 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1857 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1858 return 64;
1859
1860 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1861 return 32;
1862
1863 return 16;
1864}
1865
1866
1867VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
1868{
1869 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1870
1871 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1872 return DISCPUMODE_16BIT;
1873
1874 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1875 {
1876 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1877 return DISCPUMODE_16BIT;
1878 }
1879
1880 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1881 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1882 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1883 return DISCPUMODE_64BIT;
1884
1885 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1886 return DISCPUMODE_32BIT;
1887
1888 return DISCPUMODE_16BIT;
1889}
1890
1891
1892/**
1893 * Gets the guest MXCSR_MASK value.
1894 *
1895 * This does not access the x87 state, but the value we determined at VM
1896 * initialization.
1897 *
1898 * @returns MXCSR mask.
1899 * @param pVM The cross context VM structure.
1900 */
1901VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
1902{
1903 return pVM->cpum.s.GuestInfo.fMxCsrMask;
1904}
1905
1906
1907/**
1908 * Returns whether the guest has physical interrupts enabled.
1909 *
1910 * @returns @c true if interrupts are enabled, @c false otherwise.
1911 * @param pVCpu The cross context virtual CPU structure.
1912 *
1913 * @remarks Warning! This function does -not- take into account the global-interrupt
1914 * flag (GIF).
1915 */
1916VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
1917{
1918 switch (CPUMGetGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
1919 {
1920 case CPUMHWVIRT_NONE:
1921 default:
1922 return pVCpu->cpum.s.Guest.eflags.Bits.u1IF;
1923 case CPUMHWVIRT_VMX:
1924 return CPUMIsGuestVmxPhysIntrEnabled(&pVCpu->cpum.s.Guest);
1925 case CPUMHWVIRT_SVM:
1926 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1927 }
1928}
1929
1930
1931/**
1932 * Returns whether the nested-guest has virtual interrupts enabled.
1933 *
1934 * @returns @c true if interrupts are enabled, @c false otherwise.
1935 * @param pVCpu The cross context virtual CPU structure.
1936 *
1937 * @remarks Warning! This function does -not- take into account the global-interrupt
1938 * flag (GIF).
1939 */
1940VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
1941{
1942 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1943 Assert(CPUMIsGuestInNestedHwvirtMode(pCtx));
1944
1945 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1946 return CPUMIsGuestVmxVirtIntrEnabled(pCtx);
1947
1948 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
1949 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
1950}
1951
1952
1953/**
1954 * Calculates the interruptiblity of the guest.
1955 *
1956 * @returns Interruptibility level.
1957 * @param pVCpu The cross context virtual CPU structure.
1958 */
1959VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
1960{
1961#if 1
1962 /* Global-interrupt flag blocks pretty much everything we care about here. */
1963 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
1964 {
1965 /*
1966 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
1967 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
1968 * or raw-mode). Hence we use the function below which handles the details.
1969 */
1970 if ( !(pVCpu->cpum.s.Guest.eflags.uBoth & CPUMCTX_INHIBIT_ALL_MASK)
1971 || ( !(pVCpu->cpum.s.Guest.eflags.uBoth & CPUMCTX_INHIBIT_NMI)
1972 && pVCpu->cpum.s.Guest.uRipInhibitInt != pVCpu->cpum.s.Guest.rip))
1973 {
1974 /** @todo OPT: this next call should be inlined! */
1975 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
1976 {
1977 /** @todo OPT: type this out as it repeats tests. */
1978 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
1979 || CPUMIsGuestVirtIntrEnabled(pVCpu))
1980 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1981
1982 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
1983 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
1984 }
1985 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1986 }
1987
1988 /*
1989 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
1990 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
1991 * However, there is some uncertainity regarding the converse, i.e. whether
1992 * NMI-blocking until IRET blocks delivery of physical interrupts.
1993 *
1994 * See Intel spec. 25.4.1 "Event Blocking".
1995 */
1996 /** @todo r=bird: The above comment mixes up VMX root-mode and non-root. Section
1997 * 25.4.1 is only applicable to VMX non-root mode. In root mode /
1998 * non-VMX mode, I have not see any evidence in the intel manuals that
1999 * NMIs are not blocked when in an interrupt shadow. Section "6.7
2000 * NONMASKABLE INTERRUPT (NMI)" in SDM 3A seems pretty clear to me.
2001 */
2002 if (!(pVCpu->cpum.s.Guest.eflags.uBoth & CPUMCTX_INHIBIT_NMI))
2003 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2004 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2005 }
2006 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2007#else
2008 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
2009 {
2010 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2011 {
2012 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
2013 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
2014
2015 /** @todo does blocking NMIs mean interrupts are also inhibited? */
2016 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2017 {
2018 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2019 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2020 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2021 }
2022 AssertFailed();
2023 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2024 }
2025 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2026 }
2027 else
2028 {
2029 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2030 {
2031 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2032 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2033 return CPUMINTERRUPTIBILITY_INT_DISABLED;
2034 }
2035 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2036 }
2037#endif
2038}
2039
2040
2041/**
2042 * Checks whether the SVM nested-guest has physical interrupts enabled.
2043 *
2044 * @returns true if interrupts are enabled, false otherwise.
2045 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2046 * @param pCtx The guest-CPU context.
2047 *
2048 * @remarks This does -not- take into account the global-interrupt flag.
2049 */
2050VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2051{
2052 /** @todo Optimization: Avoid this function call and use a pointer to the
2053 * relevant eflags instead (setup during VMRUN instruction emulation). */
2054 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2055
2056 X86EFLAGS fEFlags;
2057 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2058 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2059 else
2060 fEFlags.u = pCtx->eflags.u;
2061
2062 return fEFlags.Bits.u1IF;
2063}
2064
2065
2066/**
2067 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2068 * for injection by VMRUN instruction) interrupts.
2069 *
2070 * @returns VBox status code.
2071 * @retval true if it's ready, false otherwise.
2072 *
2073 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2074 * @param pCtx The guest-CPU context.
2075 */
2076VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2077{
2078 RT_NOREF(pVCpu);
2079 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2080
2081 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.Vmcb.ctrl;
2082 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2083 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
2084 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2085 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2086 return false;
2087
2088 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2089}
2090
2091
2092/**
2093 * Gets the pending SVM nested-guest interruptvector.
2094 *
2095 * @returns The nested-guest interrupt to inject.
2096 * @param pCtx The guest-CPU context.
2097 */
2098VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
2099{
2100 return pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VIntrVector;
2101}
2102
2103
2104/**
2105 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2106 *
2107 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2108 * @param pCtx The guest-CPU context.
2109 */
2110VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx)
2111{
2112 /*
2113 * Reload the guest's "host state".
2114 */
2115 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2116 pCtx->es = pHostState->es;
2117 pCtx->cs = pHostState->cs;
2118 pCtx->ss = pHostState->ss;
2119 pCtx->ds = pHostState->ds;
2120 pCtx->gdtr = pHostState->gdtr;
2121 pCtx->idtr = pHostState->idtr;
2122 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2123 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2124 pCtx->cr3 = pHostState->uCr3;
2125 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2126 pCtx->rflags.u = pHostState->rflags.u;
2127 pCtx->rflags.Bits.u1VM = 0;
2128 pCtx->rip = pHostState->uRip;
2129 pCtx->rsp = pHostState->uRsp;
2130 pCtx->rax = pHostState->uRax;
2131 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2132 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2133 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2134
2135 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2136 * raise \#GP(0) in the guest. */
2137
2138 /** @todo check the loaded host-state for consistency. Figure out what
2139 * exactly this involves? */
2140}
2141
2142
2143/**
2144 * Saves the host-state to the host-state save area as part of a VMRUN.
2145 *
2146 * @param pCtx The guest-CPU context.
2147 * @param cbInstr The length of the VMRUN instruction in bytes.
2148 */
2149VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2150{
2151 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2152 pHostState->es = pCtx->es;
2153 pHostState->cs = pCtx->cs;
2154 pHostState->ss = pCtx->ss;
2155 pHostState->ds = pCtx->ds;
2156 pHostState->gdtr = pCtx->gdtr;
2157 pHostState->idtr = pCtx->idtr;
2158 pHostState->uEferMsr = pCtx->msrEFER;
2159 pHostState->uCr0 = pCtx->cr0;
2160 pHostState->uCr3 = pCtx->cr3;
2161 pHostState->uCr4 = pCtx->cr4;
2162 pHostState->rflags.u = pCtx->rflags.u;
2163 pHostState->uRip = pCtx->rip + cbInstr;
2164 pHostState->uRsp = pCtx->rsp;
2165 pHostState->uRax = pCtx->rax;
2166}
2167
2168
2169/**
2170 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
2171 * nested-guest.
2172 *
2173 * @returns The TSC offset after applying any nested-guest TSC offset.
2174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2175 * @param uTscValue The guest TSC.
2176 *
2177 * @sa CPUMRemoveNestedGuestTscOffset.
2178 */
2179VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2180{
2181 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2182 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2183 {
2184 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2185 return uTscValue + pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
2186 return uTscValue;
2187 }
2188
2189 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2190 {
2191 uint64_t offTsc;
2192 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2193 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
2194 return uTscValue + offTsc;
2195 }
2196 return uTscValue;
2197}
2198
2199
2200/**
2201 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
2202 * guest.
2203 *
2204 * @returns The TSC offset after removing any nested-guest TSC offset.
2205 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2206 * @param uTscValue The nested-guest TSC.
2207 *
2208 * @sa CPUMApplyNestedGuestTscOffset.
2209 */
2210VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2211{
2212 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2213 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2214 {
2215 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2216 return uTscValue - pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
2217 return uTscValue;
2218 }
2219
2220 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2221 {
2222 uint64_t offTsc;
2223 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2224 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
2225 return uTscValue - offTsc;
2226 }
2227 return uTscValue;
2228}
2229
2230
2231/**
2232 * Used to dynamically imports state residing in NEM or HM.
2233 *
2234 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
2235 *
2236 * @returns VBox status code.
2237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2238 * @param fExtrnImport The fields to import.
2239 * @thread EMT(pVCpu)
2240 */
2241VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
2242{
2243 VMCPU_ASSERT_EMT(pVCpu);
2244 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
2245 {
2246 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
2247 {
2248 case CPUMCTX_EXTRN_KEEPER_NEM:
2249 {
2250 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
2251 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2252 return rc;
2253 }
2254
2255 case CPUMCTX_EXTRN_KEEPER_HM:
2256 {
2257#ifdef IN_RING0
2258 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
2259 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2260 return rc;
2261#else
2262 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
2263 return VINF_SUCCESS;
2264#endif
2265 }
2266 default:
2267 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
2268 }
2269 }
2270 return VINF_SUCCESS;
2271}
2272
2273
2274/**
2275 * Gets valid CR4 bits for the guest.
2276 *
2277 * @returns Valid CR4 bits.
2278 * @param pVM The cross context VM structure.
2279 */
2280VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
2281{
2282 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
2283 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
2284 | X86_CR4_TSD | X86_CR4_DE
2285 | X86_CR4_MCE | X86_CR4_PCE;
2286 if (pGuestFeatures->fPae)
2287 fMask |= X86_CR4_PAE;
2288 if (pGuestFeatures->fPge)
2289 fMask |= X86_CR4_PGE;
2290 if (pGuestFeatures->fPse)
2291 fMask |= X86_CR4_PSE;
2292 if (pGuestFeatures->fFxSaveRstor)
2293 fMask |= X86_CR4_OSFXSR;
2294 if (pGuestFeatures->fVmx)
2295 fMask |= X86_CR4_VMXE;
2296 if (pGuestFeatures->fXSaveRstor)
2297 fMask |= X86_CR4_OSXSAVE;
2298 if (pGuestFeatures->fPcid)
2299 fMask |= X86_CR4_PCIDE;
2300 if (pGuestFeatures->fFsGsBase)
2301 fMask |= X86_CR4_FSGSBASE;
2302 if (pGuestFeatures->fSse)
2303 fMask |= X86_CR4_OSXMMEEXCPT;
2304 return fMask;
2305}
2306
2307
2308/**
2309 * Sets the PAE PDPEs for the guest.
2310 *
2311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2312 * @param paPaePdpes The PAE PDPEs to set.
2313 */
2314VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes)
2315{
2316 Assert(paPaePdpes);
2317 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2318 pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u;
2319 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
2320}
2321
2322
2323/**
2324 * Gets the PAE PDPTEs for the guest.
2325 *
2326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2327 * @param paPaePdpes Where to store the PAE PDPEs.
2328 */
2329VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes)
2330{
2331 Assert(paPaePdpes);
2332 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
2333 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2334 paPaePdpes[i].u = pVCpu->cpum.s.Guest.aPaePdpes[i].u;
2335}
2336
2337
2338/**
2339 * Starts a VMX-preemption timer to expire as specified by the nested hypervisor.
2340 *
2341 * @returns VBox status code.
2342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2343 * @param uTimer The VMCS preemption timer value.
2344 * @param cShift The VMX-preemption timer shift (usually based on guest
2345 * VMX MSR rate).
2346 * @param pu64EntryTick Where to store the current tick when the timer is
2347 * programmed.
2348 * @thread EMT(pVCpu)
2349 */
2350VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick)
2351{
2352 Assert(uTimer);
2353 Assert(cShift <= 31);
2354 Assert(pu64EntryTick);
2355 VMCPU_ASSERT_EMT(pVCpu);
2356 uint64_t const cTicksToNext = uTimer << cShift;
2357 return TMTimerSetRelative(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.hNestedVmxPreemptTimer, cTicksToNext, pu64EntryTick);
2358}
2359
2360
2361/**
2362 * Stops the VMX-preemption timer from firing.
2363 *
2364 * @returns VBox status code.
2365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2366 * @thread EMT.
2367 *
2368 * @remarks This can be called during VM reset, so we cannot assume it will be on
2369 * the EMT corresponding to @c pVCpu.
2370 */
2371VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu)
2372{
2373 /*
2374 * CPUM gets initialized before TM, so we defer creation of timers till CPUMR3InitCompleted().
2375 * However, we still get called during CPUMR3Init() and hence we need to check if we have
2376 * a valid timer object before trying to stop it.
2377 */
2378 int rc;
2379 TMTIMERHANDLE hTimer = pVCpu->cpum.s.hNestedVmxPreemptTimer;
2380 if (hTimer != NIL_TMTIMERHANDLE)
2381 {
2382 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2383 rc = TMTimerLock(pVM, hTimer, VERR_IGNORED);
2384 if (rc == VINF_SUCCESS)
2385 {
2386 if (TMTimerIsActive(pVM, hTimer))
2387 TMTimerStop(pVM, hTimer);
2388 TMTimerUnlock(pVM, hTimer);
2389 }
2390 }
2391 else
2392 rc = VERR_NOT_FOUND;
2393 return rc;
2394}
2395
2396
2397/**
2398 * Gets the read and write permission bits for an MSR in an MSR bitmap.
2399 *
2400 * @returns VMXMSRPM_XXX - the MSR permission.
2401 * @param pvMsrBitmap Pointer to the MSR bitmap.
2402 * @param idMsr The MSR to get permissions for.
2403 *
2404 * @sa hmR0VmxSetMsrPermission.
2405 */
2406VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
2407{
2408 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
2409
2410 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
2411
2412 /*
2413 * MSR Layout:
2414 * Byte index MSR range Interpreted as
2415 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
2416 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
2417 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
2418 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
2419 *
2420 * A bit corresponding to an MSR within the above range causes a VM-exit
2421 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
2422 * the MSR range, it always cause a VM-exit.
2423 *
2424 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
2425 */
2426 uint32_t const offBitmapRead = 0;
2427 uint32_t const offBitmapWrite = 0x800;
2428 uint32_t offMsr;
2429 uint32_t iBit;
2430 if (idMsr <= UINT32_C(0x00001fff))
2431 {
2432 offMsr = 0;
2433 iBit = idMsr;
2434 }
2435 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
2436 {
2437 offMsr = 0x400;
2438 iBit = idMsr - UINT32_C(0xc0000000);
2439 }
2440 else
2441 {
2442 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
2443 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
2444 }
2445
2446 /*
2447 * Get the MSR read permissions.
2448 */
2449 uint32_t fRet;
2450 uint32_t const offMsrRead = offBitmapRead + offMsr;
2451 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
2452 if (ASMBitTest(pbMsrBitmap, (offMsrRead << 3) + iBit))
2453 fRet = VMXMSRPM_EXIT_RD;
2454 else
2455 fRet = VMXMSRPM_ALLOW_RD;
2456
2457 /*
2458 * Get the MSR write permissions.
2459 */
2460 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
2461 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
2462 if (ASMBitTest(pbMsrBitmap, (offMsrWrite << 3) + iBit))
2463 fRet |= VMXMSRPM_EXIT_WR;
2464 else
2465 fRet |= VMXMSRPM_ALLOW_WR;
2466
2467 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
2468 return fRet;
2469}
2470
2471
2472/**
2473 * Checks the permission bits for the specified I/O port from the given I/O bitmap
2474 * to see if causes a VM-exit.
2475 *
2476 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
2477 * @param pbIoBitmap Pointer to I/O bitmap.
2478 * @param uPort The I/O port being accessed.
2479 * @param cbAccess e size of the I/O access in bytes (1, 2 or 4 bytes).
2480 */
2481static bool cpumGetVmxIoBitmapPermission(uint8_t const *pbIoBitmap, uint16_t uPort, uint8_t cbAccess)
2482{
2483 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
2484
2485 /*
2486 * If the I/O port access wraps around the 16-bit port I/O space, we must cause a
2487 * VM-exit.
2488 *
2489 * Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc are valid and do not
2490 * constitute a wrap around. However, reading 2 bytes at port 0xffff or 4 bytes
2491 * from port 0xffff/0xfffe/0xfffd constitute a wrap around. In other words, any
2492 * access to -both- ports 0xffff and port 0 is a wrap around.
2493 *
2494 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2495 */
2496 uint32_t const uPortLast = uPort + cbAccess;
2497 if (uPortLast > 0x10000)
2498 return true;
2499
2500 /*
2501 * If any bit corresponding to the I/O access is set, we must cause a VM-exit.
2502 */
2503 uint16_t const offPerm = uPort >> 3; /* Byte offset of the port. */
2504 uint16_t const idxPermBit = uPort - (offPerm << 3); /* Bit offset within byte. */
2505 Assert(idxPermBit < 8);
2506 static const uint8_t s_afMask[] = { 0x0, 0x1, 0x3, 0x7, 0xf }; /* Bit-mask for all access sizes. */
2507 uint16_t const fMask = s_afMask[cbAccess] << idxPermBit; /* Bit-mask of the access. */
2508
2509 /* Fetch 8 or 16-bits depending on whether the access spans 8-bit boundary. */
2510 RTUINT16U uPerm;
2511 uPerm.s.Lo = pbIoBitmap[offPerm];
2512 if (idxPermBit + cbAccess > 8)
2513 uPerm.s.Hi = pbIoBitmap[offPerm + 1];
2514 else
2515 uPerm.s.Hi = 0;
2516
2517 /* If any bit for the access is 1, we must cause a VM-exit. */
2518 if (uPerm.u & fMask)
2519 return true;
2520
2521 return false;
2522}
2523
2524
2525/**
2526 * Returns whether the given VMCS field is valid and supported for the guest.
2527 *
2528 * @param pVM The cross context VM structure.
2529 * @param u64VmcsField The VMCS field.
2530 *
2531 * @remarks This takes into account the CPU features exposed to the guest.
2532 */
2533VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField)
2534{
2535 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
2536 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
2537 if (!uFieldEncHi)
2538 { /* likely */ }
2539 else
2540 return false;
2541
2542 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
2543 switch (uFieldEncLo)
2544 {
2545 /*
2546 * 16-bit fields.
2547 */
2548 /* Control fields. */
2549 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
2550 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
2551 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
2552
2553 /* Guest-state fields. */
2554 case VMX_VMCS16_GUEST_ES_SEL:
2555 case VMX_VMCS16_GUEST_CS_SEL:
2556 case VMX_VMCS16_GUEST_SS_SEL:
2557 case VMX_VMCS16_GUEST_DS_SEL:
2558 case VMX_VMCS16_GUEST_FS_SEL:
2559 case VMX_VMCS16_GUEST_GS_SEL:
2560 case VMX_VMCS16_GUEST_LDTR_SEL:
2561 case VMX_VMCS16_GUEST_TR_SEL: return true;
2562 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
2563 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
2564
2565 /* Host-state fields. */
2566 case VMX_VMCS16_HOST_ES_SEL:
2567 case VMX_VMCS16_HOST_CS_SEL:
2568 case VMX_VMCS16_HOST_SS_SEL:
2569 case VMX_VMCS16_HOST_DS_SEL:
2570 case VMX_VMCS16_HOST_FS_SEL:
2571 case VMX_VMCS16_HOST_GS_SEL:
2572 case VMX_VMCS16_HOST_TR_SEL: return true;
2573
2574 /*
2575 * 64-bit fields.
2576 */
2577 /* Control fields. */
2578 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
2579 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
2580 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
2581 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
2582 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
2583 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
2584 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
2585 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
2586 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
2587 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
2588 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
2589 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
2590 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
2591 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
2592 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
2593 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
2594 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
2595 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
2596 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
2597 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
2598 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
2599 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
2600 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
2601 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
2602 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
2603 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
2604 case VMX_VMCS64_CTRL_EPTP_FULL:
2605 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
2606 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
2607 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
2608 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
2609 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
2610 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
2611 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
2612 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
2613 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
2614 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
2615 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
2616 {
2617 PCVMCPU pVCpu = pVM->CTX_SUFF(apCpus)[0];
2618 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
2619 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
2620 }
2621 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
2622 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
2623 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
2624 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
2625 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL:
2626 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
2627 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
2628 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
2629 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
2630 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
2631 case VMX_VMCS64_CTRL_PROC_EXEC3_FULL:
2632 case VMX_VMCS64_CTRL_PROC_EXEC3_HIGH: return pFeat->fVmxTertiaryExecCtls;
2633
2634 /* Read-only data fields. */
2635 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
2636 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
2637
2638 /* Guest-state fields. */
2639 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
2640 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
2641 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
2642 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
2643 case VMX_VMCS64_GUEST_PAT_FULL:
2644 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
2645 case VMX_VMCS64_GUEST_EFER_FULL:
2646 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
2647 case VMX_VMCS64_GUEST_PDPTE0_FULL:
2648 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
2649 case VMX_VMCS64_GUEST_PDPTE1_FULL:
2650 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
2651 case VMX_VMCS64_GUEST_PDPTE2_FULL:
2652 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
2653 case VMX_VMCS64_GUEST_PDPTE3_FULL:
2654 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
2655
2656 /* Host-state fields. */
2657 case VMX_VMCS64_HOST_PAT_FULL:
2658 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
2659 case VMX_VMCS64_HOST_EFER_FULL:
2660 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
2661
2662 /*
2663 * 32-bit fields.
2664 */
2665 /* Control fields. */
2666 case VMX_VMCS32_CTRL_PIN_EXEC:
2667 case VMX_VMCS32_CTRL_PROC_EXEC:
2668 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
2669 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
2670 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
2671 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
2672 case VMX_VMCS32_CTRL_EXIT:
2673 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
2674 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
2675 case VMX_VMCS32_CTRL_ENTRY:
2676 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
2677 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
2678 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
2679 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
2680 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
2681 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
2682 case VMX_VMCS32_CTRL_PLE_GAP:
2683 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
2684
2685 /* Read-only data fields. */
2686 case VMX_VMCS32_RO_VM_INSTR_ERROR:
2687 case VMX_VMCS32_RO_EXIT_REASON:
2688 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
2689 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
2690 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
2691 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
2692 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
2693 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
2694
2695 /* Guest-state fields. */
2696 case VMX_VMCS32_GUEST_ES_LIMIT:
2697 case VMX_VMCS32_GUEST_CS_LIMIT:
2698 case VMX_VMCS32_GUEST_SS_LIMIT:
2699 case VMX_VMCS32_GUEST_DS_LIMIT:
2700 case VMX_VMCS32_GUEST_FS_LIMIT:
2701 case VMX_VMCS32_GUEST_GS_LIMIT:
2702 case VMX_VMCS32_GUEST_LDTR_LIMIT:
2703 case VMX_VMCS32_GUEST_TR_LIMIT:
2704 case VMX_VMCS32_GUEST_GDTR_LIMIT:
2705 case VMX_VMCS32_GUEST_IDTR_LIMIT:
2706 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
2707 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
2708 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
2709 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
2710 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
2711 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
2712 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
2713 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
2714 case VMX_VMCS32_GUEST_INT_STATE:
2715 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
2716 case VMX_VMCS32_GUEST_SMBASE:
2717 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
2718 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
2719
2720 /* Host-state fields. */
2721 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
2722
2723 /*
2724 * Natural-width fields.
2725 */
2726 /* Control fields. */
2727 case VMX_VMCS_CTRL_CR0_MASK:
2728 case VMX_VMCS_CTRL_CR4_MASK:
2729 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
2730 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
2731 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
2732 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
2733 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
2734 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
2735
2736 /* Read-only data fields. */
2737 case VMX_VMCS_RO_EXIT_QUALIFICATION:
2738 case VMX_VMCS_RO_IO_RCX:
2739 case VMX_VMCS_RO_IO_RSI:
2740 case VMX_VMCS_RO_IO_RDI:
2741 case VMX_VMCS_RO_IO_RIP:
2742 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
2743
2744 /* Guest-state fields. */
2745 case VMX_VMCS_GUEST_CR0:
2746 case VMX_VMCS_GUEST_CR3:
2747 case VMX_VMCS_GUEST_CR4:
2748 case VMX_VMCS_GUEST_ES_BASE:
2749 case VMX_VMCS_GUEST_CS_BASE:
2750 case VMX_VMCS_GUEST_SS_BASE:
2751 case VMX_VMCS_GUEST_DS_BASE:
2752 case VMX_VMCS_GUEST_FS_BASE:
2753 case VMX_VMCS_GUEST_GS_BASE:
2754 case VMX_VMCS_GUEST_LDTR_BASE:
2755 case VMX_VMCS_GUEST_TR_BASE:
2756 case VMX_VMCS_GUEST_GDTR_BASE:
2757 case VMX_VMCS_GUEST_IDTR_BASE:
2758 case VMX_VMCS_GUEST_DR7:
2759 case VMX_VMCS_GUEST_RSP:
2760 case VMX_VMCS_GUEST_RIP:
2761 case VMX_VMCS_GUEST_RFLAGS:
2762 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
2763 case VMX_VMCS_GUEST_SYSENTER_ESP:
2764 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
2765
2766 /* Host-state fields. */
2767 case VMX_VMCS_HOST_CR0:
2768 case VMX_VMCS_HOST_CR3:
2769 case VMX_VMCS_HOST_CR4:
2770 case VMX_VMCS_HOST_FS_BASE:
2771 case VMX_VMCS_HOST_GS_BASE:
2772 case VMX_VMCS_HOST_TR_BASE:
2773 case VMX_VMCS_HOST_GDTR_BASE:
2774 case VMX_VMCS_HOST_IDTR_BASE:
2775 case VMX_VMCS_HOST_SYSENTER_ESP:
2776 case VMX_VMCS_HOST_SYSENTER_EIP:
2777 case VMX_VMCS_HOST_RSP:
2778 case VMX_VMCS_HOST_RIP: return true;
2779 }
2780
2781 return false;
2782}
2783
2784
2785/**
2786 * Checks whether the given I/O access should cause a nested-guest VM-exit.
2787 *
2788 * @returns @c true if it causes a VM-exit, @c false otherwise.
2789 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2790 * @param u16Port The I/O port being accessed.
2791 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2792 */
2793VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
2794{
2795 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2796 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
2797 return true;
2798
2799 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
2800 return cpumGetVmxIoBitmapPermission(pCtx->hwvirt.vmx.abIoBitmap, u16Port, cbAccess);
2801
2802 return false;
2803}
2804
2805
2806/**
2807 * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
2808 *
2809 * @returns @c true if it causes a VM-exit, @c false otherwise.
2810 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2811 * @param uNewCr3 The CR3 value being written.
2812 */
2813VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
2814{
2815 /*
2816 * If the CR3-load exiting control is set and the new CR3 value does not
2817 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
2818 *
2819 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2820 */
2821 PCCPUMCTX const pCtx = &pVCpu->cpum.s.Guest;
2822 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
2823 {
2824 uint32_t const uCr3TargetCount = pCtx->hwvirt.vmx.Vmcs.u32Cr3TargetCount;
2825 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
2826
2827 /* If the CR3-target count is 0, cause a VM-exit. */
2828 if (uCr3TargetCount == 0)
2829 return true;
2830
2831 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
2832 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
2833 if ( uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target0.u
2834 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target1.u
2835 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target2.u
2836 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target3.u)
2837 return true;
2838 }
2839 return false;
2840}
2841
2842
2843/**
2844 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
2845 * VM-exit or not.
2846 *
2847 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
2848 * @param pVCpu The cross context virtual CPU structure.
2849 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
2850 * VMX_EXIT_VMREAD).
2851 * @param u64VmcsField The VMCS field.
2852 */
2853VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
2854{
2855 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
2856 Assert( uExitReason == VMX_EXIT_VMREAD
2857 || uExitReason == VMX_EXIT_VMWRITE);
2858
2859 /*
2860 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
2861 */
2862 if (!CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
2863 return true;
2864
2865 /*
2866 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
2867 * is intercepted. This excludes any reserved bits in the valid parts of the field
2868 * encoding (i.e. bit 12).
2869 */
2870 if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
2871 return true;
2872
2873 /*
2874 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
2875 */
2876 uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
2877 uint8_t const * const pbBitmap = uExitReason == VMX_EXIT_VMREAD
2878 ? &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmreadBitmap[0]
2879 : &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmwriteBitmap[0];
2880 Assert(pbBitmap);
2881 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
2882 return ASMBitTest(pbBitmap, (u32VmcsField << 3) + (u32VmcsField & 7));
2883}
2884
2885
2886
2887/**
2888 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
2889 *
2890 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
2891 * @param u16Port The IO port being accessed.
2892 * @param enmIoType The type of IO access.
2893 * @param cbReg The IO operand size in bytes.
2894 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
2895 * @param iEffSeg The effective segment number.
2896 * @param fRep Whether this is a repeating IO instruction (REP prefix).
2897 * @param fStrIo Whether this is a string IO instruction.
2898 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
2899 * Optional, can be NULL.
2900 */
2901VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
2902 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
2903 PSVMIOIOEXITINFO pIoExitInfo)
2904{
2905 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
2906 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
2907
2908 /*
2909 * The IOPM layout:
2910 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
2911 * two 4K pages.
2912 *
2913 * For IO instructions that access more than a single byte, the permission bits
2914 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
2915 *
2916 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
2917 * we need 3 extra bits beyond the second 4K page.
2918 */
2919 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
2920
2921 uint16_t const offIopm = u16Port >> 3;
2922 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
2923 uint8_t const cShift = u16Port - (offIopm << 3);
2924 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
2925
2926 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
2927 Assert(pbIopm);
2928 pbIopm += offIopm;
2929 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
2930 if (u16Iopm & fIopmMask)
2931 {
2932 if (pIoExitInfo)
2933 {
2934 static const uint32_t s_auIoOpSize[] =
2935 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
2936
2937 static const uint32_t s_auIoAddrSize[] =
2938 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
2939
2940 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
2941 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
2942 pIoExitInfo->n.u1Str = fStrIo;
2943 pIoExitInfo->n.u1Rep = fRep;
2944 pIoExitInfo->n.u3Seg = iEffSeg & 7;
2945 pIoExitInfo->n.u1Type = enmIoType;
2946 pIoExitInfo->n.u16Port = u16Port;
2947 }
2948 return true;
2949 }
2950
2951 /** @todo remove later (for debugging as VirtualBox always traps all IO
2952 * intercepts). */
2953 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
2954 return false;
2955}
2956
2957
2958/**
2959 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
2960 *
2961 * @returns VBox status code.
2962 * @param idMsr The MSR being requested.
2963 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
2964 * bitmap for @a idMsr.
2965 * @param puMsrpmBit Where to store the bit offset starting at the byte
2966 * returned in @a pbOffMsrpm.
2967 */
2968VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
2969{
2970 Assert(pbOffMsrpm);
2971 Assert(puMsrpmBit);
2972
2973 /*
2974 * MSRPM Layout:
2975 * Byte offset MSR range
2976 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
2977 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
2978 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
2979 * 0x1800 - 0x1fff Reserved
2980 *
2981 * Each MSR is represented by 2 permission bits (read and write).
2982 */
2983 if (idMsr <= 0x00001fff)
2984 {
2985 /* Pentium-compatible MSRs. */
2986 uint32_t const bitoffMsr = idMsr << 1;
2987 *pbOffMsrpm = bitoffMsr >> 3;
2988 *puMsrpmBit = bitoffMsr & 7;
2989 return VINF_SUCCESS;
2990 }
2991
2992 if ( idMsr >= 0xc0000000
2993 && idMsr <= 0xc0001fff)
2994 {
2995 /* AMD Sixth Generation x86 Processor MSRs. */
2996 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
2997 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
2998 *puMsrpmBit = bitoffMsr & 7;
2999 return VINF_SUCCESS;
3000 }
3001
3002 if ( idMsr >= 0xc0010000
3003 && idMsr <= 0xc0011fff)
3004 {
3005 /* AMD Seventh and Eighth Generation Processor MSRs. */
3006 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
3007 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
3008 *puMsrpmBit = bitoffMsr & 7;
3009 return VINF_SUCCESS;
3010 }
3011
3012 *pbOffMsrpm = 0;
3013 *puMsrpmBit = 0;
3014 return VERR_OUT_OF_RANGE;
3015}
3016
3017
3018/**
3019 * Checks whether the guest is in VMX non-root mode and using EPT paging.
3020 *
3021 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
3022 * @param pVCpu The cross context virtual CPU structure.
3023 */
3024VMM_INT_DECL(bool) CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu)
3025{
3026 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest);
3027}
3028
3029
3030/**
3031 * Checks whether the guest is in VMX non-root mode and using EPT paging and the
3032 * nested-guest is in PAE mode.
3033 *
3034 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
3035 * @param pVCpu The cross context virtual CPU structure.
3036 */
3037VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu)
3038{
3039 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest)
3040 && CPUMIsGuestInPAEModeEx(&pVCpu->cpum.s.Guest);
3041}
3042
3043
3044/**
3045 * Returns the guest-physical address of the APIC-access page when executing a
3046 * nested-guest.
3047 *
3048 * @returns The APIC-access page guest-physical address.
3049 * @param pVCpu The cross context virtual CPU structure.
3050 */
3051VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu)
3052{
3053 return CPUMGetGuestVmxApicAccessPageAddrEx(&pVCpu->cpum.s.Guest);
3054}
3055
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette