VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 95351

Last change on this file since 95351 was 95351, checked in by vboxsync, 2 years ago

VMM: Nested VMX: bugref:10092 Use CPUMIsGuestVmxCurrentVmcsValid in CPUMIsGuestVmxApicAccessPageAddr.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 102.0 KB
Line 
1/* $Id: CPUMAllRegs.cpp 95351 2022-06-23 06:32:40Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/apic.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/hm.h>
31#include "CPUMInternal.h"
32#include <VBox/vmm/vmcc.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <VBox/vmm/hm.h>
37#include <VBox/vmm/tm.h>
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
41# include <iprt/asm-amd64-x86.h>
42#endif
43#ifdef IN_RING3
44# include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
49# pragma optimize("y", off)
50#endif
51
52AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
53
54
55/*********************************************************************************************************************************
56* Defined Constants And Macros *
57*********************************************************************************************************************************/
58/**
59 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
60 *
61 * @returns Pointer to the Virtual CPU.
62 * @param a_pGuestCtx Pointer to the guest context.
63 */
64#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
65
66/**
67 * Lazily loads the hidden parts of a selector register when using raw-mode.
68 */
69#define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
70 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg))
71
72/** @def CPUM_INT_ASSERT_NOT_EXTRN
73 * Macro for asserting that @a a_fNotExtrn are present.
74 *
75 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
76 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
77 */
78#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
79 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
80 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
81
82
83VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
84{
85 pVCpu->cpum.s.Hyper.cr3 = cr3;
86}
87
88VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
89{
90 return pVCpu->cpum.s.Hyper.cr3;
91}
92
93
94/** @def MAYBE_LOAD_DRx
95 * Macro for updating DRx values in raw-mode and ring-0 contexts.
96 */
97#ifdef IN_RING0
98# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { a_fnLoad(a_uValue); } while (0)
99#else
100# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
101#endif
102
103VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
104{
105 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
106 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
107}
108
109
110VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
111{
112 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
113 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
114}
115
116
117VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
118{
119 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
120 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
121}
122
123
124VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
125{
126 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
127 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
128}
129
130
131VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
132{
133 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
134}
135
136
137VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
138{
139 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
140}
141
142
143VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
144{
145 return pVCpu->cpum.s.Hyper.dr[0];
146}
147
148
149VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
150{
151 return pVCpu->cpum.s.Hyper.dr[1];
152}
153
154
155VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
156{
157 return pVCpu->cpum.s.Hyper.dr[2];
158}
159
160
161VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
162{
163 return pVCpu->cpum.s.Hyper.dr[3];
164}
165
166
167VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
168{
169 return pVCpu->cpum.s.Hyper.dr[6];
170}
171
172
173VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
174{
175 return pVCpu->cpum.s.Hyper.dr[7];
176}
177
178
179/**
180 * Gets the pointer to the internal CPUMCTXCORE structure.
181 * This is only for reading in order to save a few calls.
182 *
183 * @param pVCpu The cross context virtual CPU structure.
184 */
185VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
186{
187 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
188}
189
190
191/**
192 * Queries the pointer to the internal CPUMCTX structure.
193 *
194 * @returns The CPUMCTX pointer.
195 * @param pVCpu The cross context virtual CPU structure.
196 */
197VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
198{
199 return &pVCpu->cpum.s.Guest;
200}
201
202
203/**
204 * Queries the pointer to the internal CPUMCTXMSRS structure.
205 *
206 * This is for NEM only.
207 *
208 * @returns The CPUMCTX pointer.
209 * @param pVCpu The cross context virtual CPU structure.
210 */
211VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
212{
213 return &pVCpu->cpum.s.GuestMsrs;
214}
215
216
217VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
218{
219 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
220 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
221 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
222 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
223 return VINF_SUCCESS; /* formality, consider it void. */
224}
225
226
227VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
228{
229 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
230 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
231 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
232 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
233 return VINF_SUCCESS; /* formality, consider it void. */
234}
235
236
237VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
238{
239 pVCpu->cpum.s.Guest.tr.Sel = tr;
240 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
241 return VINF_SUCCESS; /* formality, consider it void. */
242}
243
244
245VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
246{
247 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
248 /* The caller will set more hidden bits if it has them. */
249 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
250 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
251 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
252 return VINF_SUCCESS; /* formality, consider it void. */
253}
254
255
256/**
257 * Set the guest CR0.
258 *
259 * When called in GC, the hyper CR0 may be updated if that is
260 * required. The caller only has to take special action if AM,
261 * WP, PG or PE changes.
262 *
263 * @returns VINF_SUCCESS (consider it void).
264 * @param pVCpu The cross context virtual CPU structure.
265 * @param cr0 The new CR0 value.
266 */
267VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0)
268{
269 /*
270 * Check for changes causing TLB flushes (for REM).
271 * The caller is responsible for calling PGM when appropriate.
272 */
273 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
274 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
275 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
276 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
277
278 /*
279 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
280 */
281 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
282 PGMCr0WpEnabled(pVCpu);
283
284 /* The ET flag is settable on a 386 and hardwired on 486+. */
285 if ( !(cr0 & X86_CR0_ET)
286 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
287 cr0 |= X86_CR0_ET;
288
289 pVCpu->cpum.s.Guest.cr0 = cr0;
290 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
291 return VINF_SUCCESS;
292}
293
294
295VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
296{
297 pVCpu->cpum.s.Guest.cr2 = cr2;
298 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
299 return VINF_SUCCESS;
300}
301
302
303VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
304{
305 pVCpu->cpum.s.Guest.cr3 = cr3;
306 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
307 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
308 return VINF_SUCCESS;
309}
310
311
312VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
313{
314 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
315
316 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
317 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
318 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
319
320 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
321 pVCpu->cpum.s.Guest.cr4 = cr4;
322 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
323 return VINF_SUCCESS;
324}
325
326
327VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
328{
329 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
330 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
331 return VINF_SUCCESS;
332}
333
334
335VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
336{
337 pVCpu->cpum.s.Guest.eip = eip;
338 return VINF_SUCCESS;
339}
340
341
342VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
343{
344 pVCpu->cpum.s.Guest.eax = eax;
345 return VINF_SUCCESS;
346}
347
348
349VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
350{
351 pVCpu->cpum.s.Guest.ebx = ebx;
352 return VINF_SUCCESS;
353}
354
355
356VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
357{
358 pVCpu->cpum.s.Guest.ecx = ecx;
359 return VINF_SUCCESS;
360}
361
362
363VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
364{
365 pVCpu->cpum.s.Guest.edx = edx;
366 return VINF_SUCCESS;
367}
368
369
370VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
371{
372 pVCpu->cpum.s.Guest.esp = esp;
373 return VINF_SUCCESS;
374}
375
376
377VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
378{
379 pVCpu->cpum.s.Guest.ebp = ebp;
380 return VINF_SUCCESS;
381}
382
383
384VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
385{
386 pVCpu->cpum.s.Guest.esi = esi;
387 return VINF_SUCCESS;
388}
389
390
391VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
392{
393 pVCpu->cpum.s.Guest.edi = edi;
394 return VINF_SUCCESS;
395}
396
397
398VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
399{
400 pVCpu->cpum.s.Guest.ss.Sel = ss;
401 return VINF_SUCCESS;
402}
403
404
405VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
406{
407 pVCpu->cpum.s.Guest.cs.Sel = cs;
408 return VINF_SUCCESS;
409}
410
411
412VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
413{
414 pVCpu->cpum.s.Guest.ds.Sel = ds;
415 return VINF_SUCCESS;
416}
417
418
419VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
420{
421 pVCpu->cpum.s.Guest.es.Sel = es;
422 return VINF_SUCCESS;
423}
424
425
426VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
427{
428 pVCpu->cpum.s.Guest.fs.Sel = fs;
429 return VINF_SUCCESS;
430}
431
432
433VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
434{
435 pVCpu->cpum.s.Guest.gs.Sel = gs;
436 return VINF_SUCCESS;
437}
438
439
440VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
441{
442 pVCpu->cpum.s.Guest.msrEFER = val;
443 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
444}
445
446
447VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
448{
449 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
450 if (pcbLimit)
451 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
452 return pVCpu->cpum.s.Guest.idtr.pIdt;
453}
454
455
456VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
457{
458 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
459 if (pHidden)
460 *pHidden = pVCpu->cpum.s.Guest.tr;
461 return pVCpu->cpum.s.Guest.tr.Sel;
462}
463
464
465VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
466{
467 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
468 return pVCpu->cpum.s.Guest.cs.Sel;
469}
470
471
472VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
473{
474 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
475 return pVCpu->cpum.s.Guest.ds.Sel;
476}
477
478
479VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
480{
481 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
482 return pVCpu->cpum.s.Guest.es.Sel;
483}
484
485
486VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
487{
488 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
489 return pVCpu->cpum.s.Guest.fs.Sel;
490}
491
492
493VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
494{
495 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
496 return pVCpu->cpum.s.Guest.gs.Sel;
497}
498
499
500VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
501{
502 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
503 return pVCpu->cpum.s.Guest.ss.Sel;
504}
505
506
507VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
508{
509 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
510 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
511 if ( !CPUMIsGuestInLongMode(pVCpu)
512 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
513 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
514 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
515}
516
517
518VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
519{
520 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
521 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
522 if ( !CPUMIsGuestInLongMode(pVCpu)
523 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
524 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
525 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
526}
527
528
529VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
530{
531 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
532 return pVCpu->cpum.s.Guest.ldtr.Sel;
533}
534
535
536VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
537{
538 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
539 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
540 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
541 return pVCpu->cpum.s.Guest.ldtr.Sel;
542}
543
544
545VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
546{
547 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
548 return pVCpu->cpum.s.Guest.cr0;
549}
550
551
552VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
553{
554 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
555 return pVCpu->cpum.s.Guest.cr2;
556}
557
558
559VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
560{
561 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
562 return pVCpu->cpum.s.Guest.cr3;
563}
564
565
566VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
567{
568 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
569 return pVCpu->cpum.s.Guest.cr4;
570}
571
572
573VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu)
574{
575 uint64_t u64;
576 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
577 if (RT_FAILURE(rc))
578 u64 = 0;
579 return u64;
580}
581
582
583VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
584{
585 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
586 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
587}
588
589
590VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
591{
592 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
593 return pVCpu->cpum.s.Guest.eip;
594}
595
596
597VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
598{
599 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
600 return pVCpu->cpum.s.Guest.rip;
601}
602
603
604VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
605{
606 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
607 return pVCpu->cpum.s.Guest.eax;
608}
609
610
611VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
612{
613 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
614 return pVCpu->cpum.s.Guest.ebx;
615}
616
617
618VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
619{
620 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
621 return pVCpu->cpum.s.Guest.ecx;
622}
623
624
625VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
626{
627 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
628 return pVCpu->cpum.s.Guest.edx;
629}
630
631
632VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
633{
634 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
635 return pVCpu->cpum.s.Guest.esi;
636}
637
638
639VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
640{
641 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
642 return pVCpu->cpum.s.Guest.edi;
643}
644
645
646VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
647{
648 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
649 return pVCpu->cpum.s.Guest.esp;
650}
651
652
653VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
654{
655 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
656 return pVCpu->cpum.s.Guest.ebp;
657}
658
659
660VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
661{
662 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
663 return pVCpu->cpum.s.Guest.eflags.u32;
664}
665
666
667VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue)
668{
669 switch (iReg)
670 {
671 case DISCREG_CR0:
672 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
673 *pValue = pVCpu->cpum.s.Guest.cr0;
674 break;
675
676 case DISCREG_CR2:
677 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
678 *pValue = pVCpu->cpum.s.Guest.cr2;
679 break;
680
681 case DISCREG_CR3:
682 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
683 *pValue = pVCpu->cpum.s.Guest.cr3;
684 break;
685
686 case DISCREG_CR4:
687 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
688 *pValue = pVCpu->cpum.s.Guest.cr4;
689 break;
690
691 case DISCREG_CR8:
692 {
693 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
694 uint8_t u8Tpr;
695 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
696 if (RT_FAILURE(rc))
697 {
698 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
699 *pValue = 0;
700 return rc;
701 }
702 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
703 break;
704 }
705
706 default:
707 return VERR_INVALID_PARAMETER;
708 }
709 return VINF_SUCCESS;
710}
711
712
713VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
714{
715 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
716 return pVCpu->cpum.s.Guest.dr[0];
717}
718
719
720VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
721{
722 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
723 return pVCpu->cpum.s.Guest.dr[1];
724}
725
726
727VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
728{
729 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
730 return pVCpu->cpum.s.Guest.dr[2];
731}
732
733
734VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
735{
736 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
737 return pVCpu->cpum.s.Guest.dr[3];
738}
739
740
741VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
742{
743 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
744 return pVCpu->cpum.s.Guest.dr[6];
745}
746
747
748VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
749{
750 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
751 return pVCpu->cpum.s.Guest.dr[7];
752}
753
754
755VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
756{
757 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
758 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
759 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
760 if (iReg == 4 || iReg == 5)
761 iReg += 2;
762 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
763 return VINF_SUCCESS;
764}
765
766
767VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
768{
769 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
770 return pVCpu->cpum.s.Guest.msrEFER;
771}
772
773
774/**
775 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
776 *
777 * @returns Pointer to the leaf if found, NULL if not.
778 *
779 * @param pVM The cross context VM structure.
780 * @param uLeaf The leaf to get.
781 */
782PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
783{
784 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
785 if (iEnd)
786 {
787 unsigned iStart = 0;
788 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
789 for (;;)
790 {
791 unsigned i = iStart + (iEnd - iStart) / 2U;
792 if (uLeaf < paLeaves[i].uLeaf)
793 {
794 if (i <= iStart)
795 return NULL;
796 iEnd = i;
797 }
798 else if (uLeaf > paLeaves[i].uLeaf)
799 {
800 i += 1;
801 if (i >= iEnd)
802 return NULL;
803 iStart = i;
804 }
805 else
806 {
807 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
808 return &paLeaves[i];
809
810 /* This shouldn't normally happen. But in case the it does due
811 to user configuration overrids or something, just return the
812 first sub-leaf. */
813 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
814 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
815 while ( paLeaves[i].uSubLeaf != 0
816 && i > 0
817 && uLeaf == paLeaves[i - 1].uLeaf)
818 i--;
819 return &paLeaves[i];
820 }
821 }
822 }
823
824 return NULL;
825}
826
827
828/**
829 * Looks up a CPUID leaf in the CPUID leaf array.
830 *
831 * @returns Pointer to the leaf if found, NULL if not.
832 *
833 * @param pVM The cross context VM structure.
834 * @param uLeaf The leaf to get.
835 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
836 * isn't.
837 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
838 */
839PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
840{
841 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
842 if (iEnd)
843 {
844 unsigned iStart = 0;
845 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
846 for (;;)
847 {
848 unsigned i = iStart + (iEnd - iStart) / 2U;
849 if (uLeaf < paLeaves[i].uLeaf)
850 {
851 if (i <= iStart)
852 return NULL;
853 iEnd = i;
854 }
855 else if (uLeaf > paLeaves[i].uLeaf)
856 {
857 i += 1;
858 if (i >= iEnd)
859 return NULL;
860 iStart = i;
861 }
862 else
863 {
864 uSubLeaf &= paLeaves[i].fSubLeafMask;
865 if (uSubLeaf == paLeaves[i].uSubLeaf)
866 *pfExactSubLeafHit = true;
867 else
868 {
869 /* Find the right subleaf. We return the last one before
870 uSubLeaf if we don't find an exact match. */
871 if (uSubLeaf < paLeaves[i].uSubLeaf)
872 while ( i > 0
873 && uLeaf == paLeaves[i - 1].uLeaf
874 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
875 i--;
876 else
877 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
878 && uLeaf == paLeaves[i + 1].uLeaf
879 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
880 i++;
881 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
882 }
883 return &paLeaves[i];
884 }
885 }
886 }
887
888 *pfExactSubLeafHit = false;
889 return NULL;
890}
891
892
893/**
894 * Gets a CPUID leaf.
895 *
896 * @param pVCpu The cross context virtual CPU structure.
897 * @param uLeaf The CPUID leaf to get.
898 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
899 * @param f64BitMode A tristate indicate if the caller is in 64-bit mode or
900 * not: 1=true, 0=false, 1=whatever. This affect how the
901 * X86_CPUID_EXT_FEATURE_EDX_SYSCALL flag is returned on
902 * Intel CPUs, where it's only returned in 64-bit mode.
903 * @param pEax Where to store the EAX value.
904 * @param pEbx Where to store the EBX value.
905 * @param pEcx Where to store the ECX value.
906 * @param pEdx Where to store the EDX value.
907 */
908VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t uLeaf, uint32_t uSubLeaf, int f64BitMode,
909 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
910{
911 bool fExactSubLeafHit;
912 PVM pVM = pVCpu->CTX_SUFF(pVM);
913 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
914 if (pLeaf)
915 {
916 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
917 if (fExactSubLeafHit)
918 {
919 *pEax = pLeaf->uEax;
920 *pEbx = pLeaf->uEbx;
921 *pEcx = pLeaf->uEcx;
922 *pEdx = pLeaf->uEdx;
923
924 /*
925 * Deal with CPU specific information.
926 */
927 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
928 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
929 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
930 {
931 if (uLeaf == 1)
932 {
933 /* EBX: Bits 31-24: Initial APIC ID. */
934 Assert(pVCpu->idCpu <= 255);
935 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
936 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
937
938 /* EDX: Bit 9: AND with APICBASE.EN. */
939 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
940 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
941
942 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
943 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
944 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
945 }
946 else if (uLeaf == 0xb)
947 {
948 /* EDX: Initial extended APIC ID. */
949 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
950 *pEdx = pVCpu->idCpu;
951 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
952 }
953 else if (uLeaf == UINT32_C(0x8000001e))
954 {
955 /* EAX: Initial extended APIC ID. */
956 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
957 *pEax = pVCpu->idCpu;
958 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
959 }
960 else if (uLeaf == UINT32_C(0x80000001))
961 {
962 /* EDX: Bit 9: AND with APICBASE.EN. */
963 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
964 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
965 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
966 }
967 else
968 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
969 }
970
971 /* Intel CPUs supresses the SYSCALL bit when not executing in 64-bit mode: */
972 if ( uLeaf == UINT32_C(0x80000001)
973 && f64BitMode == false
974 && (*pEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
975 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
976 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA /*?*/
977 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_SHANGHAI /*?*/ ) )
978 *pEdx &= ~X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
979
980 }
981 /*
982 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
983 * them here, but we do the best we can here...
984 */
985 else
986 {
987 *pEax = *pEbx = *pEcx = *pEdx = 0;
988 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
989 {
990 *pEcx = uSubLeaf & 0xff;
991 *pEdx = pVCpu->idCpu;
992 }
993 }
994 }
995 else
996 {
997 /*
998 * Different CPUs have different ways of dealing with unknown CPUID leaves.
999 */
1000 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1001 {
1002 default:
1003 AssertFailed();
1004 RT_FALL_THRU();
1005 case CPUMUNKNOWNCPUID_DEFAULTS:
1006 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1007 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1008 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1009 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1010 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1011 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1012 break;
1013 case CPUMUNKNOWNCPUID_PASSTHRU:
1014 *pEax = uLeaf;
1015 *pEbx = 0;
1016 *pEcx = uSubLeaf;
1017 *pEdx = 0;
1018 break;
1019 }
1020 }
1021 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1022}
1023
1024
1025/**
1026 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1027 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1028 *
1029 * @returns Previous value.
1030 * @param pVCpu The cross context virtual CPU structure to make the
1031 * change on. Usually the calling EMT.
1032 * @param fVisible Whether to make it visible (true) or hide it (false).
1033 *
1034 * @remarks This is "VMMDECL" so that it still links with
1035 * the old APIC code which is in VBoxDD2 and not in
1036 * the VMM module.
1037 */
1038VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1039{
1040 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1041 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1042 return fOld;
1043}
1044
1045
1046/**
1047 * Gets the host CPU vendor.
1048 *
1049 * @returns CPU vendor.
1050 * @param pVM The cross context VM structure.
1051 */
1052VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1053{
1054 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1055}
1056
1057
1058/**
1059 * Gets the host CPU microarchitecture.
1060 *
1061 * @returns CPU microarchitecture.
1062 * @param pVM The cross context VM structure.
1063 */
1064VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
1065{
1066 return pVM->cpum.s.HostFeatures.enmMicroarch;
1067}
1068
1069
1070/**
1071 * Gets the guest CPU vendor.
1072 *
1073 * @returns CPU vendor.
1074 * @param pVM The cross context VM structure.
1075 */
1076VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1077{
1078 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1079}
1080
1081
1082/**
1083 * Gets the guest CPU microarchitecture.
1084 *
1085 * @returns CPU microarchitecture.
1086 * @param pVM The cross context VM structure.
1087 */
1088VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
1089{
1090 return pVM->cpum.s.GuestFeatures.enmMicroarch;
1091}
1092
1093
1094/**
1095 * Gets the maximum number of physical and linear address bits supported by the
1096 * guest.
1097 *
1098 * @param pVM The cross context VM structure.
1099 * @param pcPhysAddrWidth Where to store the physical address width.
1100 * @param pcLinearAddrWidth Where to store the linear address width.
1101 */
1102VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth)
1103{
1104 AssertPtr(pVM);
1105 AssertReturnVoid(pcPhysAddrWidth);
1106 AssertReturnVoid(pcLinearAddrWidth);
1107 *pcPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
1108 *pcLinearAddrWidth = pVM->cpum.s.GuestFeatures.cMaxLinearAddrWidth;
1109}
1110
1111
1112VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0)
1113{
1114 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1115 return CPUMRecalcHyperDRx(pVCpu, 0);
1116}
1117
1118
1119VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1)
1120{
1121 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1122 return CPUMRecalcHyperDRx(pVCpu, 1);
1123}
1124
1125
1126VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2)
1127{
1128 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1129 return CPUMRecalcHyperDRx(pVCpu, 2);
1130}
1131
1132
1133VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3)
1134{
1135 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1136 return CPUMRecalcHyperDRx(pVCpu, 3);
1137}
1138
1139
1140VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1141{
1142 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1143 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
1144 return VINF_SUCCESS; /* No need to recalc. */
1145}
1146
1147
1148VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7)
1149{
1150 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1151 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
1152 return CPUMRecalcHyperDRx(pVCpu, 7);
1153}
1154
1155
1156VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value)
1157{
1158 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1159 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1160 if (iReg == 4 || iReg == 5)
1161 iReg += 2;
1162 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1163 return CPUMRecalcHyperDRx(pVCpu, iReg);
1164}
1165
1166
1167/**
1168 * Recalculates the hypervisor DRx register values based on current guest
1169 * registers and DBGF breakpoints, updating changed registers depending on the
1170 * context.
1171 *
1172 * This is called whenever a guest DRx register is modified (any context) and
1173 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1174 *
1175 * In raw-mode context this function will reload any (hyper) DRx registers which
1176 * comes out with a different value. It may also have to save the host debug
1177 * registers if that haven't been done already. In this context though, we'll
1178 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1179 * are only important when breakpoints are actually enabled.
1180 *
1181 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1182 * reloaded by the HM code if it changes. Further more, we will only use the
1183 * combined register set when the VBox debugger is actually using hardware BPs,
1184 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1185 * concern us here).
1186 *
1187 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1188 * all the time.
1189 *
1190 * @returns VINF_SUCCESS.
1191 * @param pVCpu The cross context virtual CPU structure.
1192 * @param iGstReg The guest debug register number that was modified.
1193 * UINT8_MAX if not guest register.
1194 */
1195VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg)
1196{
1197 PVM pVM = pVCpu->CTX_SUFF(pVM);
1198#ifndef IN_RING0
1199 RT_NOREF_PV(iGstReg);
1200#endif
1201
1202 /*
1203 * Compare the DR7s first.
1204 *
1205 * We only care about the enabled flags. GD is virtualized when we
1206 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1207 * always have the LE and GE bits set, so no need to check and disable
1208 * stuff if they're cleared like we have to for the guest DR7.
1209 */
1210 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1211 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1212 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1213 uGstDr7 = 0;
1214 else if (!(uGstDr7 & X86_DR7_LE))
1215 uGstDr7 &= ~X86_DR7_LE_ALL;
1216 else if (!(uGstDr7 & X86_DR7_GE))
1217 uGstDr7 &= ~X86_DR7_GE_ALL;
1218
1219 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1220 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1221 {
1222 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1223
1224 /*
1225 * Ok, something is enabled. Recalc each of the breakpoints, taking
1226 * the VM debugger ones of the guest ones. In raw-mode context we will
1227 * not allow breakpoints with values inside the hypervisor area.
1228 */
1229 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1230
1231 /* bp 0 */
1232 RTGCUINTREG uNewDr0;
1233 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1234 {
1235 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1236 uNewDr0 = DBGFBpGetDR0(pVM);
1237 }
1238 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1239 {
1240 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1241 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1242 }
1243 else
1244 uNewDr0 = 0;
1245
1246 /* bp 1 */
1247 RTGCUINTREG uNewDr1;
1248 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1249 {
1250 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1251 uNewDr1 = DBGFBpGetDR1(pVM);
1252 }
1253 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1254 {
1255 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1256 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1257 }
1258 else
1259 uNewDr1 = 0;
1260
1261 /* bp 2 */
1262 RTGCUINTREG uNewDr2;
1263 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1264 {
1265 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1266 uNewDr2 = DBGFBpGetDR2(pVM);
1267 }
1268 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1269 {
1270 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1271 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1272 }
1273 else
1274 uNewDr2 = 0;
1275
1276 /* bp 3 */
1277 RTGCUINTREG uNewDr3;
1278 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1279 {
1280 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1281 uNewDr3 = DBGFBpGetDR3(pVM);
1282 }
1283 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1284 {
1285 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1286 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1287 }
1288 else
1289 uNewDr3 = 0;
1290
1291 /*
1292 * Apply the updates.
1293 */
1294 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1295 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1296 CPUMSetHyperDR3(pVCpu, uNewDr3);
1297 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1298 CPUMSetHyperDR2(pVCpu, uNewDr2);
1299 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1300 CPUMSetHyperDR1(pVCpu, uNewDr1);
1301 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1302 CPUMSetHyperDR0(pVCpu, uNewDr0);
1303 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1304 CPUMSetHyperDR7(pVCpu, uNewDr7);
1305 }
1306#ifdef IN_RING0
1307 else if (CPUMIsGuestDebugStateActive(pVCpu))
1308 {
1309 /*
1310 * Reload the register that was modified. Normally this won't happen
1311 * as we won't intercept DRx writes when not having the hyper debug
1312 * state loaded, but in case we do for some reason we'll simply deal
1313 * with it.
1314 */
1315 switch (iGstReg)
1316 {
1317 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1318 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1319 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1320 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1321 default:
1322 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1323 }
1324 }
1325#endif
1326 else
1327 {
1328 /*
1329 * No active debug state any more. In raw-mode this means we have to
1330 * make sure DR7 has everything disabled now, if we armed it already.
1331 * In ring-0 we might end up here when just single stepping.
1332 */
1333#ifdef IN_RING0
1334 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1335 {
1336 if (pVCpu->cpum.s.Hyper.dr[0])
1337 ASMSetDR0(0);
1338 if (pVCpu->cpum.s.Hyper.dr[1])
1339 ASMSetDR1(0);
1340 if (pVCpu->cpum.s.Hyper.dr[2])
1341 ASMSetDR2(0);
1342 if (pVCpu->cpum.s.Hyper.dr[3])
1343 ASMSetDR3(0);
1344 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1345 }
1346#endif
1347 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1348
1349 /* Clear all the registers. */
1350 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1351 pVCpu->cpum.s.Hyper.dr[3] = 0;
1352 pVCpu->cpum.s.Hyper.dr[2] = 0;
1353 pVCpu->cpum.s.Hyper.dr[1] = 0;
1354 pVCpu->cpum.s.Hyper.dr[0] = 0;
1355
1356 }
1357 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1358 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1359 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1360 pVCpu->cpum.s.Hyper.dr[7]));
1361
1362 return VINF_SUCCESS;
1363}
1364
1365
1366/**
1367 * Set the guest XCR0 register.
1368 *
1369 * Will load additional state if the FPU state is already loaded (in ring-0 &
1370 * raw-mode context).
1371 *
1372 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1373 * value.
1374 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1375 * @param uNewValue The new value.
1376 * @thread EMT(pVCpu)
1377 */
1378VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue)
1379{
1380 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
1381 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1382 /* The X87 bit cannot be cleared. */
1383 && (uNewValue & XSAVE_C_X87)
1384 /* AVX requires SSE. */
1385 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1386 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1387 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1388 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1389 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1390 )
1391 {
1392 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1393
1394 /* If more state components are enabled, we need to take care to load
1395 them if the FPU/SSE state is already loaded. May otherwise leak
1396 host state to the guest. */
1397 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1398 if (fNewComponents)
1399 {
1400#ifdef IN_RING0
1401 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1402 {
1403 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1404 /* Adding more components. */
1405 ASMXRstor(&pVCpu->cpum.s.Guest.XState, fNewComponents);
1406 else
1407 {
1408 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1409 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1410 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1411 ASMXRstor(&pVCpu->cpum.s.Guest.XState, uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1412 }
1413 }
1414#endif
1415 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1416 }
1417 return VINF_SUCCESS;
1418 }
1419 return VERR_CPUM_RAISE_GP_0;
1420}
1421
1422
1423/**
1424 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1425 *
1426 * @returns true if in real mode, otherwise false.
1427 * @param pVCpu The cross context virtual CPU structure.
1428 */
1429VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
1430{
1431 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1432 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1433}
1434
1435
1436/**
1437 * Tests if the guest has the Page Size Extension enabled (PSE).
1438 *
1439 * @returns true if in real mode, otherwise false.
1440 * @param pVCpu The cross context virtual CPU structure.
1441 */
1442VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
1443{
1444 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1445 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1446 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1447}
1448
1449
1450/**
1451 * Tests if the guest has the paging enabled (PG).
1452 *
1453 * @returns true if in real mode, otherwise false.
1454 * @param pVCpu The cross context virtual CPU structure.
1455 */
1456VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
1457{
1458 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1459 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1460}
1461
1462
1463/**
1464 * Tests if the guest has the paging enabled (PG).
1465 *
1466 * @returns true if in real mode, otherwise false.
1467 * @param pVCpu The cross context virtual CPU structure.
1468 */
1469VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
1470{
1471 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1472 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1473}
1474
1475
1476/**
1477 * Tests if the guest is running in real mode or not.
1478 *
1479 * @returns true if in real mode, otherwise false.
1480 * @param pVCpu The cross context virtual CPU structure.
1481 */
1482VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
1483{
1484 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1485 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1486}
1487
1488
1489/**
1490 * Tests if the guest is running in real or virtual 8086 mode.
1491 *
1492 * @returns @c true if it is, @c false if not.
1493 * @param pVCpu The cross context virtual CPU structure.
1494 */
1495VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
1496{
1497 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
1498 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1499 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1500}
1501
1502
1503/**
1504 * Tests if the guest is running in protected or not.
1505 *
1506 * @returns true if in protected mode, otherwise false.
1507 * @param pVCpu The cross context virtual CPU structure.
1508 */
1509VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
1510{
1511 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1512 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1513}
1514
1515
1516/**
1517 * Tests if the guest is running in paged protected or not.
1518 *
1519 * @returns true if in paged protected mode, otherwise false.
1520 * @param pVCpu The cross context virtual CPU structure.
1521 */
1522VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
1523{
1524 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1525 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1526}
1527
1528
1529/**
1530 * Tests if the guest is running in long mode or not.
1531 *
1532 * @returns true if in long mode, otherwise false.
1533 * @param pVCpu The cross context virtual CPU structure.
1534 */
1535VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
1536{
1537 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1538 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1539}
1540
1541
1542/**
1543 * Tests if the guest is running in PAE mode or not.
1544 *
1545 * @returns true if in PAE mode, otherwise false.
1546 * @param pVCpu The cross context virtual CPU structure.
1547 */
1548VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
1549{
1550 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1551 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1552 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1553 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1554 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1555 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1556}
1557
1558
1559/**
1560 * Tests if the guest is running in 64 bits mode or not.
1561 *
1562 * @returns true if in 64 bits protected mode, otherwise false.
1563 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1564 */
1565VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1566{
1567 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
1568 if (!CPUMIsGuestInLongMode(pVCpu))
1569 return false;
1570 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1571 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1572}
1573
1574
1575/**
1576 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1577 * registers.
1578 *
1579 * @returns true if in 64 bits protected mode, otherwise false.
1580 * @param pCtx Pointer to the current guest CPU context.
1581 */
1582VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
1583{
1584 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1585}
1586
1587
1588/**
1589 * Sets the specified changed flags (CPUM_CHANGED_*).
1590 *
1591 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1592 * @param fChangedAdd The changed flags to add.
1593 */
1594VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
1595{
1596 pVCpu->cpum.s.fChanged |= fChangedAdd;
1597}
1598
1599
1600/**
1601 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
1602 *
1603 * @returns true if supported.
1604 * @returns false if not supported.
1605 * @param pVM The cross context VM structure.
1606 */
1607VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
1608{
1609 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
1610}
1611
1612
1613/**
1614 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1615 * @returns true if used.
1616 * @returns false if not used.
1617 * @param pVM The cross context VM structure.
1618 */
1619VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1620{
1621 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
1622}
1623
1624
1625/**
1626 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1627 * @returns true if used.
1628 * @returns false if not used.
1629 * @param pVM The cross context VM structure.
1630 */
1631VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1632{
1633 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
1634}
1635
1636
1637/**
1638 * Checks if we activated the FPU/XMM state of the guest OS.
1639 *
1640 * Obsolete: This differs from CPUMIsGuestFPUStateLoaded() in that it refers to
1641 * the next time we'll be executing guest code, so it may return true for
1642 * 64-on-32 when we still haven't actually loaded the FPU status, just scheduled
1643 * it to be loaded the next time we go thru the world switcher
1644 * (CPUM_SYNC_FPU_STATE).
1645 *
1646 * @returns true / false.
1647 * @param pVCpu The cross context virtual CPU structure.
1648 */
1649VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
1650{
1651 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1652 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1653 return fRet;
1654}
1655
1656
1657/**
1658 * Checks if we've really loaded the FPU/XMM state of the guest OS.
1659 *
1660 * @returns true / false.
1661 * @param pVCpu The cross context virtual CPU structure.
1662 */
1663VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
1664{
1665 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1666 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1667 return fRet;
1668}
1669
1670
1671/**
1672 * Checks if we saved the FPU/XMM state of the host OS.
1673 *
1674 * @returns true / false.
1675 * @param pVCpu The cross context virtual CPU structure.
1676 */
1677VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
1678{
1679 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
1680}
1681
1682
1683/**
1684 * Checks if the guest debug state is active.
1685 *
1686 * @returns boolean
1687 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1688 */
1689VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
1690{
1691 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
1692}
1693
1694
1695/**
1696 * Checks if the hyper debug state is active.
1697 *
1698 * @returns boolean
1699 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1700 */
1701VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
1702{
1703 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
1704}
1705
1706
1707/**
1708 * Mark the guest's debug state as inactive.
1709 *
1710 * @returns boolean
1711 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1712 * @todo This API doesn't make sense any more.
1713 */
1714VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
1715{
1716 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
1717 NOREF(pVCpu);
1718}
1719
1720
1721/**
1722 * Get the current privilege level of the guest.
1723 *
1724 * @returns CPL
1725 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1726 */
1727VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
1728{
1729 /*
1730 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
1731 *
1732 * Note! We used to check CS.DPL here, assuming it was always equal to
1733 * CPL even if a conforming segment was loaded. But this turned out to
1734 * only apply to older AMD-V. With VT-x we had an ACP2 regression
1735 * during install after a far call to ring 2 with VT-x. Then on newer
1736 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
1737 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
1738 *
1739 * So, forget CS.DPL, always use SS.DPL.
1740 *
1741 * Note! The SS RPL is always equal to the CPL, while the CS RPL
1742 * isn't necessarily equal if the segment is conforming.
1743 * See section 4.11.1 in the AMD manual.
1744 *
1745 * Update: Where the heck does it say CS.RPL can differ from CPL other than
1746 * right after real->prot mode switch and when in V8086 mode? That
1747 * section says the RPL specified in a direct transfere (call, jmp,
1748 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
1749 * it would be impossible for an exception handle or the iret
1750 * instruction to figure out whether SS:ESP are part of the frame
1751 * or not. VBox or qemu bug must've lead to this misconception.
1752 *
1753 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
1754 * selector into SS with an RPL other than the CPL when CPL != 3 and
1755 * we're in 64-bit mode. The intel dev box doesn't allow this, on
1756 * RPL = CPL. Weird.
1757 */
1758 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
1759 uint32_t uCpl;
1760 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1761 {
1762 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1763 {
1764 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
1765 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
1766 else
1767 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
1768 }
1769 else
1770 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
1771 }
1772 else
1773 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
1774 return uCpl;
1775}
1776
1777
1778/**
1779 * Gets the current guest CPU mode.
1780 *
1781 * If paging mode is what you need, check out PGMGetGuestMode().
1782 *
1783 * @returns The CPU mode.
1784 * @param pVCpu The cross context virtual CPU structure.
1785 */
1786VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
1787{
1788 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1789 CPUMMODE enmMode;
1790 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1791 enmMode = CPUMMODE_REAL;
1792 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1793 enmMode = CPUMMODE_PROTECTED;
1794 else
1795 enmMode = CPUMMODE_LONG;
1796
1797 return enmMode;
1798}
1799
1800
1801/**
1802 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
1803 *
1804 * @returns 16, 32 or 64.
1805 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1806 */
1807VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
1808{
1809 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1810
1811 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1812 return 16;
1813
1814 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1815 {
1816 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1817 return 16;
1818 }
1819
1820 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1821 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1822 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1823 return 64;
1824
1825 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1826 return 32;
1827
1828 return 16;
1829}
1830
1831
1832VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
1833{
1834 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1835
1836 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1837 return DISCPUMODE_16BIT;
1838
1839 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1840 {
1841 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1842 return DISCPUMODE_16BIT;
1843 }
1844
1845 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1846 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1847 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1848 return DISCPUMODE_64BIT;
1849
1850 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1851 return DISCPUMODE_32BIT;
1852
1853 return DISCPUMODE_16BIT;
1854}
1855
1856
1857/**
1858 * Gets the guest MXCSR_MASK value.
1859 *
1860 * This does not access the x87 state, but the value we determined at VM
1861 * initialization.
1862 *
1863 * @returns MXCSR mask.
1864 * @param pVM The cross context VM structure.
1865 */
1866VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
1867{
1868 return pVM->cpum.s.GuestInfo.fMxCsrMask;
1869}
1870
1871
1872/**
1873 * Returns whether the guest has physical interrupts enabled.
1874 *
1875 * @returns @c true if interrupts are enabled, @c false otherwise.
1876 * @param pVCpu The cross context virtual CPU structure.
1877 *
1878 * @remarks Warning! This function does -not- take into account the global-interrupt
1879 * flag (GIF).
1880 */
1881VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
1882{
1883 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
1884 {
1885 uint32_t const fEFlags = pVCpu->cpum.s.Guest.eflags.u;
1886 return RT_BOOL(fEFlags & X86_EFL_IF);
1887 }
1888
1889 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
1890 return CPUMIsGuestVmxPhysIntrEnabled(&pVCpu->cpum.s.Guest);
1891
1892 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
1893 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1894}
1895
1896
1897/**
1898 * Returns whether the nested-guest has virtual interrupts enabled.
1899 *
1900 * @returns @c true if interrupts are enabled, @c false otherwise.
1901 * @param pVCpu The cross context virtual CPU structure.
1902 *
1903 * @remarks Warning! This function does -not- take into account the global-interrupt
1904 * flag (GIF).
1905 */
1906VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
1907{
1908 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1909 Assert(CPUMIsGuestInNestedHwvirtMode(pCtx));
1910
1911 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1912 return CPUMIsGuestVmxVirtIntrEnabled(pCtx);
1913
1914 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
1915 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
1916}
1917
1918
1919/**
1920 * Calculates the interruptiblity of the guest.
1921 *
1922 * @returns Interruptibility level.
1923 * @param pVCpu The cross context virtual CPU structure.
1924 */
1925VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
1926{
1927#if 1
1928 /* Global-interrupt flag blocks pretty much everything we care about here. */
1929 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
1930 {
1931 /*
1932 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
1933 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
1934 * or raw-mode). Hence we use the function below which handles the details.
1935 */
1936 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
1937 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
1938 {
1939 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
1940 || CPUMIsGuestVirtIntrEnabled(pVCpu))
1941 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1942
1943 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
1944 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
1945 }
1946
1947 /*
1948 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
1949 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
1950 * However, there is some uncertainity regarding the converse, i.e. whether
1951 * NMI-blocking until IRET blocks delivery of physical interrupts.
1952 *
1953 * See Intel spec. 25.4.1 "Event Blocking".
1954 */
1955 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1956 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1957
1958 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1959 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1960
1961 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1962 }
1963 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1964#else
1965 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
1966 {
1967 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1968 {
1969 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
1970 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1971
1972 /** @todo does blocking NMIs mean interrupts are also inhibited? */
1973 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1974 {
1975 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1976 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1977 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1978 }
1979 AssertFailed();
1980 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1981 }
1982 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1983 }
1984 else
1985 {
1986 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1987 {
1988 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1989 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1990 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1991 }
1992 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1993 }
1994#endif
1995}
1996
1997
1998/**
1999 * Gets whether the guest (or nested-guest) is currently blocking delivery of NMIs.
2000 *
2001 * @returns @c true if NMIs are blocked, @c false otherwise.
2002 * @param pVCpu The cross context virtual CPU structure.
2003 */
2004VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu)
2005{
2006 /*
2007 * Return the state of guest-NMI blocking in any of the following cases:
2008 * - We're not executing a nested-guest.
2009 * - We're executing an SVM nested-guest[1].
2010 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2011 *
2012 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2013 * SVM hypervisors must track NMI blocking themselves by intercepting
2014 * the IRET instruction after injection of an NMI.
2015 */
2016 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2017 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2018 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2019 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
2020 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2021
2022 /*
2023 * Return the state of virtual-NMI blocking, if we are executing a
2024 * VMX nested-guest with virtual-NMIs enabled.
2025 */
2026 return CPUMIsGuestVmxVirtNmiBlocking(pCtx);
2027}
2028
2029
2030/**
2031 * Sets blocking delivery of NMIs to the guest.
2032 *
2033 * @param pVCpu The cross context virtual CPU structure.
2034 * @param fBlock Whether NMIs are blocked or not.
2035 */
2036VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock)
2037{
2038 /*
2039 * Set the state of guest-NMI blocking in any of the following cases:
2040 * - We're not executing a nested-guest.
2041 * - We're executing an SVM nested-guest[1].
2042 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2043 *
2044 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2045 * SVM hypervisors must track NMI blocking themselves by intercepting
2046 * the IRET instruction after injection of an NMI.
2047 */
2048 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2049 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2050 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2051 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
2052 {
2053 if (fBlock)
2054 {
2055 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2056 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2057 }
2058 else
2059 {
2060 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2061 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2062 }
2063 return;
2064 }
2065
2066 /*
2067 * Set the state of virtual-NMI blocking, if we are executing a
2068 * VMX nested-guest with virtual-NMIs enabled.
2069 */
2070 return CPUMSetGuestVmxVirtNmiBlocking(pCtx, fBlock);
2071}
2072
2073
2074/**
2075 * Checks whether the SVM nested-guest has physical interrupts enabled.
2076 *
2077 * @returns true if interrupts are enabled, false otherwise.
2078 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2079 * @param pCtx The guest-CPU context.
2080 *
2081 * @remarks This does -not- take into account the global-interrupt flag.
2082 */
2083VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2084{
2085 /** @todo Optimization: Avoid this function call and use a pointer to the
2086 * relevant eflags instead (setup during VMRUN instruction emulation). */
2087 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2088
2089 X86EFLAGS fEFlags;
2090 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2091 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2092 else
2093 fEFlags.u = pCtx->eflags.u;
2094
2095 return fEFlags.Bits.u1IF;
2096}
2097
2098
2099/**
2100 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2101 * for injection by VMRUN instruction) interrupts.
2102 *
2103 * @returns VBox status code.
2104 * @retval true if it's ready, false otherwise.
2105 *
2106 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2107 * @param pCtx The guest-CPU context.
2108 */
2109VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2110{
2111 RT_NOREF(pVCpu);
2112 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2113
2114 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.Vmcb.ctrl;
2115 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2116 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
2117 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2118 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2119 return false;
2120
2121 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2122}
2123
2124
2125/**
2126 * Gets the pending SVM nested-guest interruptvector.
2127 *
2128 * @returns The nested-guest interrupt to inject.
2129 * @param pCtx The guest-CPU context.
2130 */
2131VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
2132{
2133 return pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VIntrVector;
2134}
2135
2136
2137/**
2138 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2139 *
2140 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2141 * @param pCtx The guest-CPU context.
2142 */
2143VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx)
2144{
2145 /*
2146 * Reload the guest's "host state".
2147 */
2148 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2149 pCtx->es = pHostState->es;
2150 pCtx->cs = pHostState->cs;
2151 pCtx->ss = pHostState->ss;
2152 pCtx->ds = pHostState->ds;
2153 pCtx->gdtr = pHostState->gdtr;
2154 pCtx->idtr = pHostState->idtr;
2155 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2156 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2157 pCtx->cr3 = pHostState->uCr3;
2158 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2159 pCtx->rflags = pHostState->rflags;
2160 pCtx->rflags.Bits.u1VM = 0;
2161 pCtx->rip = pHostState->uRip;
2162 pCtx->rsp = pHostState->uRsp;
2163 pCtx->rax = pHostState->uRax;
2164 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2165 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2166 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2167
2168 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2169 * raise \#GP(0) in the guest. */
2170
2171 /** @todo check the loaded host-state for consistency. Figure out what
2172 * exactly this involves? */
2173}
2174
2175
2176/**
2177 * Saves the host-state to the host-state save area as part of a VMRUN.
2178 *
2179 * @param pCtx The guest-CPU context.
2180 * @param cbInstr The length of the VMRUN instruction in bytes.
2181 */
2182VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2183{
2184 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2185 pHostState->es = pCtx->es;
2186 pHostState->cs = pCtx->cs;
2187 pHostState->ss = pCtx->ss;
2188 pHostState->ds = pCtx->ds;
2189 pHostState->gdtr = pCtx->gdtr;
2190 pHostState->idtr = pCtx->idtr;
2191 pHostState->uEferMsr = pCtx->msrEFER;
2192 pHostState->uCr0 = pCtx->cr0;
2193 pHostState->uCr3 = pCtx->cr3;
2194 pHostState->uCr4 = pCtx->cr4;
2195 pHostState->rflags = pCtx->rflags;
2196 pHostState->uRip = pCtx->rip + cbInstr;
2197 pHostState->uRsp = pCtx->rsp;
2198 pHostState->uRax = pCtx->rax;
2199}
2200
2201
2202/**
2203 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
2204 * nested-guest.
2205 *
2206 * @returns The TSC offset after applying any nested-guest TSC offset.
2207 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2208 * @param uTscValue The guest TSC.
2209 *
2210 * @sa CPUMRemoveNestedGuestTscOffset.
2211 */
2212VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2213{
2214 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2215 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2216 {
2217 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2218 return uTscValue + pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
2219 return uTscValue;
2220 }
2221
2222 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2223 {
2224 uint64_t offTsc;
2225 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2226 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
2227 return uTscValue + offTsc;
2228 }
2229 return uTscValue;
2230}
2231
2232
2233/**
2234 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
2235 * guest.
2236 *
2237 * @returns The TSC offset after removing any nested-guest TSC offset.
2238 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2239 * @param uTscValue The nested-guest TSC.
2240 *
2241 * @sa CPUMApplyNestedGuestTscOffset.
2242 */
2243VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2244{
2245 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2246 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2247 {
2248 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2249 return uTscValue - pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
2250 return uTscValue;
2251 }
2252
2253 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2254 {
2255 uint64_t offTsc;
2256 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2257 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
2258 return uTscValue - offTsc;
2259 }
2260 return uTscValue;
2261}
2262
2263
2264/**
2265 * Used to dynamically imports state residing in NEM or HM.
2266 *
2267 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
2268 *
2269 * @returns VBox status code.
2270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2271 * @param fExtrnImport The fields to import.
2272 * @thread EMT(pVCpu)
2273 */
2274VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
2275{
2276 VMCPU_ASSERT_EMT(pVCpu);
2277 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
2278 {
2279 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
2280 {
2281 case CPUMCTX_EXTRN_KEEPER_NEM:
2282 {
2283 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
2284 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2285 return rc;
2286 }
2287
2288 case CPUMCTX_EXTRN_KEEPER_HM:
2289 {
2290#ifdef IN_RING0
2291 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
2292 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2293 return rc;
2294#else
2295 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
2296 return VINF_SUCCESS;
2297#endif
2298 }
2299 default:
2300 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
2301 }
2302 }
2303 return VINF_SUCCESS;
2304}
2305
2306
2307/**
2308 * Gets valid CR4 bits for the guest.
2309 *
2310 * @returns Valid CR4 bits.
2311 * @param pVM The cross context VM structure.
2312 */
2313VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
2314{
2315 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
2316 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
2317 | X86_CR4_TSD | X86_CR4_DE
2318 | X86_CR4_MCE | X86_CR4_PCE;
2319 if (pGuestFeatures->fPae)
2320 fMask |= X86_CR4_PAE;
2321 if (pGuestFeatures->fPge)
2322 fMask |= X86_CR4_PGE;
2323 if (pGuestFeatures->fPse)
2324 fMask |= X86_CR4_PSE;
2325 if (pGuestFeatures->fFxSaveRstor)
2326 fMask |= X86_CR4_OSFXSR;
2327 if (pGuestFeatures->fVmx)
2328 fMask |= X86_CR4_VMXE;
2329 if (pGuestFeatures->fXSaveRstor)
2330 fMask |= X86_CR4_OSXSAVE;
2331 if (pGuestFeatures->fPcid)
2332 fMask |= X86_CR4_PCIDE;
2333 if (pGuestFeatures->fFsGsBase)
2334 fMask |= X86_CR4_FSGSBASE;
2335 if (pGuestFeatures->fSse)
2336 fMask |= X86_CR4_OSXMMEEXCPT;
2337 return fMask;
2338}
2339
2340
2341/**
2342 * Sets the PAE PDPEs for the guest.
2343 *
2344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2345 * @param paPaePdpes The PAE PDPEs to set.
2346 */
2347VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes)
2348{
2349 Assert(paPaePdpes);
2350 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2351 pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u;
2352 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
2353}
2354
2355
2356/**
2357 * Gets the PAE PDPTEs for the guest.
2358 *
2359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2360 * @param paPaePdpes Where to store the PAE PDPEs.
2361 */
2362VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes)
2363{
2364 Assert(paPaePdpes);
2365 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
2366 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2367 paPaePdpes[i].u = pVCpu->cpum.s.Guest.aPaePdpes[i].u;
2368}
2369
2370
2371/**
2372 * Starts a VMX-preemption timer to expire as specified by the nested hypervisor.
2373 *
2374 * @returns VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2376 * @param uTimer The VMCS preemption timer value.
2377 * @param cShift The VMX-preemption timer shift (usually based on guest
2378 * VMX MSR rate).
2379 * @param pu64EntryTick Where to store the current tick when the timer is
2380 * programmed.
2381 * @thread EMT(pVCpu)
2382 */
2383VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick)
2384{
2385 Assert(uTimer);
2386 Assert(cShift <= 31);
2387 Assert(pu64EntryTick);
2388 VMCPU_ASSERT_EMT(pVCpu);
2389 uint64_t const cTicksToNext = uTimer << cShift;
2390 return TMTimerSetRelative(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.hNestedVmxPreemptTimer, cTicksToNext, pu64EntryTick);
2391}
2392
2393
2394/**
2395 * Stops the VMX-preemption timer from firing.
2396 *
2397 * @returns VBox status code.
2398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2399 * @thread EMT.
2400 *
2401 * @remarks This can be called during VM reset, so we cannot assume it will be on
2402 * the EMT corresponding to @c pVCpu.
2403 */
2404VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu)
2405{
2406 /*
2407 * CPUM gets initialized before TM, so we defer creation of timers till CPUMR3InitCompleted().
2408 * However, we still get called during CPUMR3Init() and hence we need to check if we have
2409 * a valid timer object before trying to stop it.
2410 */
2411 int rc;
2412 TMTIMERHANDLE hTimer = pVCpu->cpum.s.hNestedVmxPreemptTimer;
2413 if (hTimer != NIL_TMTIMERHANDLE)
2414 {
2415 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2416 rc = TMTimerLock(pVM, hTimer, VERR_IGNORED);
2417 if (rc == VINF_SUCCESS)
2418 {
2419 if (TMTimerIsActive(pVM, hTimer))
2420 TMTimerStop(pVM, hTimer);
2421 TMTimerUnlock(pVM, hTimer);
2422 }
2423 }
2424 else
2425 rc = VERR_NOT_FOUND;
2426 return rc;
2427}
2428
2429
2430/**
2431 * Gets the read and write permission bits for an MSR in an MSR bitmap.
2432 *
2433 * @returns VMXMSRPM_XXX - the MSR permission.
2434 * @param pvMsrBitmap Pointer to the MSR bitmap.
2435 * @param idMsr The MSR to get permissions for.
2436 *
2437 * @sa hmR0VmxSetMsrPermission.
2438 */
2439VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
2440{
2441 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
2442
2443 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
2444
2445 /*
2446 * MSR Layout:
2447 * Byte index MSR range Interpreted as
2448 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
2449 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
2450 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
2451 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
2452 *
2453 * A bit corresponding to an MSR within the above range causes a VM-exit
2454 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
2455 * the MSR range, it always cause a VM-exit.
2456 *
2457 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
2458 */
2459 uint32_t const offBitmapRead = 0;
2460 uint32_t const offBitmapWrite = 0x800;
2461 uint32_t offMsr;
2462 uint32_t iBit;
2463 if (idMsr <= UINT32_C(0x00001fff))
2464 {
2465 offMsr = 0;
2466 iBit = idMsr;
2467 }
2468 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
2469 {
2470 offMsr = 0x400;
2471 iBit = idMsr - UINT32_C(0xc0000000);
2472 }
2473 else
2474 {
2475 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
2476 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
2477 }
2478
2479 /*
2480 * Get the MSR read permissions.
2481 */
2482 uint32_t fRet;
2483 uint32_t const offMsrRead = offBitmapRead + offMsr;
2484 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
2485 if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit))
2486 fRet = VMXMSRPM_EXIT_RD;
2487 else
2488 fRet = VMXMSRPM_ALLOW_RD;
2489
2490 /*
2491 * Get the MSR write permissions.
2492 */
2493 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
2494 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
2495 if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit))
2496 fRet |= VMXMSRPM_EXIT_WR;
2497 else
2498 fRet |= VMXMSRPM_ALLOW_WR;
2499
2500 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
2501 return fRet;
2502}
2503
2504
2505/**
2506 * Checks the permission bits for the specified I/O port from the given I/O bitmap
2507 * to see if causes a VM-exit.
2508 *
2509 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
2510 * @param pbIoBitmap Pointer to I/O bitmap.
2511 * @param uPort The I/O port being accessed.
2512 * @param cbAccess e size of the I/O access in bytes (1, 2 or 4 bytes).
2513 */
2514static bool cpumGetVmxIoBitmapPermission(uint8_t const *pbIoBitmap, uint16_t uPort, uint8_t cbAccess)
2515{
2516 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
2517
2518 /*
2519 * If the I/O port access wraps around the 16-bit port I/O space, we must cause a
2520 * VM-exit.
2521 *
2522 * Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc are valid and do not
2523 * constitute a wrap around. However, reading 2 bytes at port 0xffff or 4 bytes
2524 * from port 0xffff/0xfffe/0xfffd constitute a wrap around. In other words, any
2525 * access to -both- ports 0xffff and port 0 is a wrap around.
2526 *
2527 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2528 */
2529 uint32_t const uPortLast = uPort + cbAccess;
2530 if (uPortLast > 0x10000)
2531 return true;
2532
2533 /*
2534 * If any bit corresponding to the I/O access is set, we must cause a VM-exit.
2535 */
2536 uint16_t const offPerm = uPort >> 3; /* Byte offset of the port. */
2537 uint16_t const idxPermBit = uPort - (offPerm << 3); /* Bit offset within byte. */
2538 Assert(idxPermBit < 8);
2539 static const uint8_t s_afMask[] = { 0x0, 0x1, 0x3, 0x7, 0xf }; /* Bit-mask for all access sizes. */
2540 uint16_t const fMask = s_afMask[cbAccess] << idxPermBit; /* Bit-mask of the access. */
2541
2542 /* Fetch 8 or 16-bits depending on whether the access spans 8-bit boundary. */
2543 RTUINT16U uPerm;
2544 uPerm.s.Lo = pbIoBitmap[offPerm];
2545 if (idxPermBit + cbAccess > 8)
2546 uPerm.s.Hi = pbIoBitmap[offPerm + 1];
2547 else
2548 uPerm.s.Hi = 0;
2549
2550 /* If any bit for the access is 1, we must cause a VM-exit. */
2551 if (uPerm.u & fMask)
2552 return true;
2553
2554 return false;
2555}
2556
2557
2558/**
2559 * Returns whether the given VMCS field is valid and supported for the guest.
2560 *
2561 * @param pVM The cross context VM structure.
2562 * @param u64VmcsField The VMCS field.
2563 *
2564 * @remarks This takes into account the CPU features exposed to the guest.
2565 */
2566VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField)
2567{
2568 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
2569 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
2570 if (!uFieldEncHi)
2571 { /* likely */ }
2572 else
2573 return false;
2574
2575 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
2576 switch (uFieldEncLo)
2577 {
2578 /*
2579 * 16-bit fields.
2580 */
2581 /* Control fields. */
2582 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
2583 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
2584 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
2585
2586 /* Guest-state fields. */
2587 case VMX_VMCS16_GUEST_ES_SEL:
2588 case VMX_VMCS16_GUEST_CS_SEL:
2589 case VMX_VMCS16_GUEST_SS_SEL:
2590 case VMX_VMCS16_GUEST_DS_SEL:
2591 case VMX_VMCS16_GUEST_FS_SEL:
2592 case VMX_VMCS16_GUEST_GS_SEL:
2593 case VMX_VMCS16_GUEST_LDTR_SEL:
2594 case VMX_VMCS16_GUEST_TR_SEL: return true;
2595 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
2596 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
2597
2598 /* Host-state fields. */
2599 case VMX_VMCS16_HOST_ES_SEL:
2600 case VMX_VMCS16_HOST_CS_SEL:
2601 case VMX_VMCS16_HOST_SS_SEL:
2602 case VMX_VMCS16_HOST_DS_SEL:
2603 case VMX_VMCS16_HOST_FS_SEL:
2604 case VMX_VMCS16_HOST_GS_SEL:
2605 case VMX_VMCS16_HOST_TR_SEL: return true;
2606
2607 /*
2608 * 64-bit fields.
2609 */
2610 /* Control fields. */
2611 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
2612 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
2613 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
2614 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
2615 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
2616 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
2617 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
2618 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
2619 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
2620 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
2621 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
2622 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
2623 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
2624 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
2625 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
2626 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
2627 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
2628 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
2629 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
2630 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
2631 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
2632 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
2633 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
2634 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
2635 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
2636 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
2637 case VMX_VMCS64_CTRL_EPTP_FULL:
2638 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
2639 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
2640 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
2641 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
2642 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
2643 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
2644 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
2645 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
2646 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
2647 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
2648 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
2649 {
2650 PCVMCPU pVCpu = pVM->CTX_SUFF(apCpus)[0];
2651 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
2652 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
2653 }
2654 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
2655 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
2656 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
2657 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
2658 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL:
2659 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
2660 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
2661 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
2662 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
2663 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
2664 case VMX_VMCS64_CTRL_PROC_EXEC3_FULL:
2665 case VMX_VMCS64_CTRL_PROC_EXEC3_HIGH: return pFeat->fVmxTertiaryExecCtls;
2666
2667 /* Read-only data fields. */
2668 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
2669 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
2670
2671 /* Guest-state fields. */
2672 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
2673 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
2674 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
2675 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
2676 case VMX_VMCS64_GUEST_PAT_FULL:
2677 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
2678 case VMX_VMCS64_GUEST_EFER_FULL:
2679 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
2680 case VMX_VMCS64_GUEST_PDPTE0_FULL:
2681 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
2682 case VMX_VMCS64_GUEST_PDPTE1_FULL:
2683 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
2684 case VMX_VMCS64_GUEST_PDPTE2_FULL:
2685 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
2686 case VMX_VMCS64_GUEST_PDPTE3_FULL:
2687 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
2688
2689 /* Host-state fields. */
2690 case VMX_VMCS64_HOST_PAT_FULL:
2691 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
2692 case VMX_VMCS64_HOST_EFER_FULL:
2693 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
2694
2695 /*
2696 * 32-bit fields.
2697 */
2698 /* Control fields. */
2699 case VMX_VMCS32_CTRL_PIN_EXEC:
2700 case VMX_VMCS32_CTRL_PROC_EXEC:
2701 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
2702 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
2703 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
2704 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
2705 case VMX_VMCS32_CTRL_EXIT:
2706 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
2707 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
2708 case VMX_VMCS32_CTRL_ENTRY:
2709 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
2710 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
2711 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
2712 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
2713 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
2714 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
2715 case VMX_VMCS32_CTRL_PLE_GAP:
2716 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
2717
2718 /* Read-only data fields. */
2719 case VMX_VMCS32_RO_VM_INSTR_ERROR:
2720 case VMX_VMCS32_RO_EXIT_REASON:
2721 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
2722 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
2723 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
2724 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
2725 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
2726 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
2727
2728 /* Guest-state fields. */
2729 case VMX_VMCS32_GUEST_ES_LIMIT:
2730 case VMX_VMCS32_GUEST_CS_LIMIT:
2731 case VMX_VMCS32_GUEST_SS_LIMIT:
2732 case VMX_VMCS32_GUEST_DS_LIMIT:
2733 case VMX_VMCS32_GUEST_FS_LIMIT:
2734 case VMX_VMCS32_GUEST_GS_LIMIT:
2735 case VMX_VMCS32_GUEST_LDTR_LIMIT:
2736 case VMX_VMCS32_GUEST_TR_LIMIT:
2737 case VMX_VMCS32_GUEST_GDTR_LIMIT:
2738 case VMX_VMCS32_GUEST_IDTR_LIMIT:
2739 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
2740 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
2741 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
2742 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
2743 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
2744 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
2745 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
2746 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
2747 case VMX_VMCS32_GUEST_INT_STATE:
2748 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
2749 case VMX_VMCS32_GUEST_SMBASE:
2750 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
2751 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
2752
2753 /* Host-state fields. */
2754 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
2755
2756 /*
2757 * Natural-width fields.
2758 */
2759 /* Control fields. */
2760 case VMX_VMCS_CTRL_CR0_MASK:
2761 case VMX_VMCS_CTRL_CR4_MASK:
2762 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
2763 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
2764 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
2765 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
2766 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
2767 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
2768
2769 /* Read-only data fields. */
2770 case VMX_VMCS_RO_EXIT_QUALIFICATION:
2771 case VMX_VMCS_RO_IO_RCX:
2772 case VMX_VMCS_RO_IO_RSI:
2773 case VMX_VMCS_RO_IO_RDI:
2774 case VMX_VMCS_RO_IO_RIP:
2775 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
2776
2777 /* Guest-state fields. */
2778 case VMX_VMCS_GUEST_CR0:
2779 case VMX_VMCS_GUEST_CR3:
2780 case VMX_VMCS_GUEST_CR4:
2781 case VMX_VMCS_GUEST_ES_BASE:
2782 case VMX_VMCS_GUEST_CS_BASE:
2783 case VMX_VMCS_GUEST_SS_BASE:
2784 case VMX_VMCS_GUEST_DS_BASE:
2785 case VMX_VMCS_GUEST_FS_BASE:
2786 case VMX_VMCS_GUEST_GS_BASE:
2787 case VMX_VMCS_GUEST_LDTR_BASE:
2788 case VMX_VMCS_GUEST_TR_BASE:
2789 case VMX_VMCS_GUEST_GDTR_BASE:
2790 case VMX_VMCS_GUEST_IDTR_BASE:
2791 case VMX_VMCS_GUEST_DR7:
2792 case VMX_VMCS_GUEST_RSP:
2793 case VMX_VMCS_GUEST_RIP:
2794 case VMX_VMCS_GUEST_RFLAGS:
2795 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
2796 case VMX_VMCS_GUEST_SYSENTER_ESP:
2797 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
2798
2799 /* Host-state fields. */
2800 case VMX_VMCS_HOST_CR0:
2801 case VMX_VMCS_HOST_CR3:
2802 case VMX_VMCS_HOST_CR4:
2803 case VMX_VMCS_HOST_FS_BASE:
2804 case VMX_VMCS_HOST_GS_BASE:
2805 case VMX_VMCS_HOST_TR_BASE:
2806 case VMX_VMCS_HOST_GDTR_BASE:
2807 case VMX_VMCS_HOST_IDTR_BASE:
2808 case VMX_VMCS_HOST_SYSENTER_ESP:
2809 case VMX_VMCS_HOST_SYSENTER_EIP:
2810 case VMX_VMCS_HOST_RSP:
2811 case VMX_VMCS_HOST_RIP: return true;
2812 }
2813
2814 return false;
2815}
2816
2817
2818/**
2819 * Checks whether the given I/O access should cause a nested-guest VM-exit.
2820 *
2821 * @returns @c true if it causes a VM-exit, @c false otherwise.
2822 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2823 * @param u16Port The I/O port being accessed.
2824 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2825 */
2826VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
2827{
2828 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2829 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
2830 return true;
2831
2832 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
2833 return cpumGetVmxIoBitmapPermission(pCtx->hwvirt.vmx.abIoBitmap, u16Port, cbAccess);
2834
2835 return false;
2836}
2837
2838
2839/**
2840 * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
2841 *
2842 * @returns @c true if it causes a VM-exit, @c false otherwise.
2843 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2844 * @param uNewCr3 The CR3 value being written.
2845 */
2846VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
2847{
2848 /*
2849 * If the CR3-load exiting control is set and the new CR3 value does not
2850 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
2851 *
2852 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2853 */
2854 PCCPUMCTX const pCtx = &pVCpu->cpum.s.Guest;
2855 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
2856 {
2857 uint32_t const uCr3TargetCount = pCtx->hwvirt.vmx.Vmcs.u32Cr3TargetCount;
2858 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
2859
2860 /* If the CR3-target count is 0, cause a VM-exit. */
2861 if (uCr3TargetCount == 0)
2862 return true;
2863
2864 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
2865 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
2866 if ( uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target0.u
2867 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target1.u
2868 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target2.u
2869 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target3.u)
2870 return true;
2871 }
2872 return false;
2873}
2874
2875
2876/**
2877 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
2878 * VM-exit or not.
2879 *
2880 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
2881 * @param pVCpu The cross context virtual CPU structure.
2882 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
2883 * VMX_EXIT_VMREAD).
2884 * @param u64VmcsField The VMCS field.
2885 */
2886VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
2887{
2888 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
2889 Assert( uExitReason == VMX_EXIT_VMREAD
2890 || uExitReason == VMX_EXIT_VMWRITE);
2891
2892 /*
2893 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
2894 */
2895 if (!CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
2896 return true;
2897
2898 /*
2899 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
2900 * is intercepted. This excludes any reserved bits in the valid parts of the field
2901 * encoding (i.e. bit 12).
2902 */
2903 if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
2904 return true;
2905
2906 /*
2907 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
2908 */
2909 uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
2910 uint8_t const * const pbBitmap = uExitReason == VMX_EXIT_VMREAD
2911 ? &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmreadBitmap[0]
2912 : &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmwriteBitmap[0];
2913 Assert(pbBitmap);
2914 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
2915 return ASMBitTest(&pbBitmap[u32VmcsField >> 3], u32VmcsField & 7);
2916}
2917
2918
2919
2920/**
2921 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
2922 *
2923 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
2924 * @param u16Port The IO port being accessed.
2925 * @param enmIoType The type of IO access.
2926 * @param cbReg The IO operand size in bytes.
2927 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
2928 * @param iEffSeg The effective segment number.
2929 * @param fRep Whether this is a repeating IO instruction (REP prefix).
2930 * @param fStrIo Whether this is a string IO instruction.
2931 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
2932 * Optional, can be NULL.
2933 */
2934VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
2935 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
2936 PSVMIOIOEXITINFO pIoExitInfo)
2937{
2938 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
2939 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
2940
2941 /*
2942 * The IOPM layout:
2943 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
2944 * two 4K pages.
2945 *
2946 * For IO instructions that access more than a single byte, the permission bits
2947 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
2948 *
2949 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
2950 * we need 3 extra bits beyond the second 4K page.
2951 */
2952 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
2953
2954 uint16_t const offIopm = u16Port >> 3;
2955 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
2956 uint8_t const cShift = u16Port - (offIopm << 3);
2957 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
2958
2959 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
2960 Assert(pbIopm);
2961 pbIopm += offIopm;
2962 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
2963 if (u16Iopm & fIopmMask)
2964 {
2965 if (pIoExitInfo)
2966 {
2967 static const uint32_t s_auIoOpSize[] =
2968 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
2969
2970 static const uint32_t s_auIoAddrSize[] =
2971 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
2972
2973 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
2974 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
2975 pIoExitInfo->n.u1Str = fStrIo;
2976 pIoExitInfo->n.u1Rep = fRep;
2977 pIoExitInfo->n.u3Seg = iEffSeg & 7;
2978 pIoExitInfo->n.u1Type = enmIoType;
2979 pIoExitInfo->n.u16Port = u16Port;
2980 }
2981 return true;
2982 }
2983
2984 /** @todo remove later (for debugging as VirtualBox always traps all IO
2985 * intercepts). */
2986 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
2987 return false;
2988}
2989
2990
2991/**
2992 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
2993 *
2994 * @returns VBox status code.
2995 * @param idMsr The MSR being requested.
2996 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
2997 * bitmap for @a idMsr.
2998 * @param puMsrpmBit Where to store the bit offset starting at the byte
2999 * returned in @a pbOffMsrpm.
3000 */
3001VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
3002{
3003 Assert(pbOffMsrpm);
3004 Assert(puMsrpmBit);
3005
3006 /*
3007 * MSRPM Layout:
3008 * Byte offset MSR range
3009 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
3010 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
3011 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
3012 * 0x1800 - 0x1fff Reserved
3013 *
3014 * Each MSR is represented by 2 permission bits (read and write).
3015 */
3016 if (idMsr <= 0x00001fff)
3017 {
3018 /* Pentium-compatible MSRs. */
3019 uint32_t const bitoffMsr = idMsr << 1;
3020 *pbOffMsrpm = bitoffMsr >> 3;
3021 *puMsrpmBit = bitoffMsr & 7;
3022 return VINF_SUCCESS;
3023 }
3024
3025 if ( idMsr >= 0xc0000000
3026 && idMsr <= 0xc0001fff)
3027 {
3028 /* AMD Sixth Generation x86 Processor MSRs. */
3029 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
3030 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
3031 *puMsrpmBit = bitoffMsr & 7;
3032 return VINF_SUCCESS;
3033 }
3034
3035 if ( idMsr >= 0xc0010000
3036 && idMsr <= 0xc0011fff)
3037 {
3038 /* AMD Seventh and Eighth Generation Processor MSRs. */
3039 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
3040 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
3041 *puMsrpmBit = bitoffMsr & 7;
3042 return VINF_SUCCESS;
3043 }
3044
3045 *pbOffMsrpm = 0;
3046 *puMsrpmBit = 0;
3047 return VERR_OUT_OF_RANGE;
3048}
3049
3050
3051/**
3052 * Checks whether the guest is in VMX non-root mode and using EPT paging.
3053 *
3054 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
3055 * @param pVCpu The cross context virtual CPU structure.
3056 */
3057VMM_INT_DECL(bool) CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu)
3058{
3059 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest);
3060}
3061
3062
3063/**
3064 * Checks whether the guest is in VMX non-root mode and using EPT paging and the
3065 * nested-guest is in PAE mode.
3066 *
3067 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
3068 * @param pVCpu The cross context virtual CPU structure.
3069 */
3070VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu)
3071{
3072 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest)
3073 && CPUMIsGuestInPAEModeEx(&pVCpu->cpum.s.Guest);
3074}
3075
3076
3077/**
3078 * Returns the guest-physical address of the APIC-access page when executing a
3079 * nested-guest.
3080 *
3081 * @returns The APIC-access page guest-physical address.
3082 * @param pVCpu The cross context virtual CPU structure.
3083 */
3084VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu)
3085{
3086 return CPUMGetGuestVmxApicAccessPageAddrEx(&pVCpu->cpum.s.Guest);
3087}
3088
3089
3090/**
3091 * Returns whether the given page is the active APIC-access page.
3092 *
3093 * @returns @c true if the page is the active APIC-access page, @c false otherwises.
3094 * @param pVCpu The cross context virtual CPU structure.
3095 * @param GCPhysPage The guest-physical address to check.
3096 *
3097 * @remarks This function does not assume the guest is executing in VMX non-root
3098 * mode or in VMX root-mode. However, it does assert that the VMCS has
3099 * been initialized and the virtual-APIC access VM-execution control is
3100 * enabled.
3101 * @note This is meant to be used by PGM while syncing the page-table entry for
3102 * the APIC-access page. All other queries for the APIC-access page address
3103 * should almost certainly use CPUMGetGuestVmxApicAccessPageAddr() instead!
3104 */
3105VMM_INT_DECL(bool) CPUMIsGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu, RTGCPHYS GCPhysPage)
3106{
3107 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3108 PCVMXVVMCS pVmcs = &pCtx->hwvirt.vmx.Vmcs;
3109 if ( pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fVmx /* VMX CPU feature is enabled for the guest. */
3110 && CPUMIsGuestVmxCurrentVmcsValid(pCtx) /* A VMCS is currently active. */
3111 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)) /* Virtual-APIC access VM-execution control is set. */
3112 {
3113 Assert(!(pVmcs->u64AddrApicAccess.u & X86_PAGE_4K_OFFSET_MASK)); /* Intel spec. mandates that this is 4K aligned. */
3114 Assert(!(GCPhysPage & GUEST_PAGE_OFFSET_MASK)); /* Caller must be passing us an aligned page. */
3115 return pVmcs->u64AddrApicAccess.u == GCPhysPage;
3116 }
3117 return false;
3118}
3119
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette