VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 60716

Last change on this file since 60716 was 60664, checked in by vboxsync, 9 years ago

VMM,ConsoleImpl2: Added 386 profile, adding IEM code for some obvious 386isms (EFLAGS and CR0/MSW).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 85.8 KB
Line 
1/* $Id: CPUMAllRegs.cpp 60664 2016-04-22 23:35:07Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
53AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/**
60 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
61 *
62 * @returns Pointer to the Virtual CPU.
63 * @param a_pGuestCtx Pointer to the guest context.
64 */
65#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
66
67/**
68 * Lazily loads the hidden parts of a selector register when using raw-mode.
69 */
70#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
71# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
72 do \
73 { \
74 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
75 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
76 } while (0)
77#else
78# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
79 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
80#endif
81
82
83
84#ifdef VBOX_WITH_RAW_MODE_NOT_R0
85
86/**
87 * Does the lazy hidden selector register loading.
88 *
89 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
90 * @param pSReg The selector register to lazily load hidden parts of.
91 */
92static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
93{
94 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
95 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
96 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
97
98 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
99 {
100 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
101 pSReg->Attr.u = 0;
102 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
103 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
104 pSReg->Attr.n.u2Dpl = 3;
105 pSReg->Attr.n.u1Present = 1;
106 pSReg->u32Limit = 0x0000ffff;
107 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
108 pSReg->ValidSel = pSReg->Sel;
109 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
110 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
111 }
112 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
113 {
114 /* Real mode - leave the limit and flags alone here, at least for now. */
115 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
116 pSReg->ValidSel = pSReg->Sel;
117 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
118 }
119 else
120 {
121 /* Protected mode - get it from the selector descriptor tables. */
122 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
123 {
124 Assert(!CPUMIsGuestInLongMode(pVCpu));
125 pSReg->Sel = 0;
126 pSReg->u64Base = 0;
127 pSReg->u32Limit = 0;
128 pSReg->Attr.u = 0;
129 pSReg->ValidSel = 0;
130 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
131 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
132 }
133 else
134 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
135 }
136}
137
138
139/**
140 * Makes sure the hidden CS and SS selector registers are valid, loading them if
141 * necessary.
142 *
143 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
144 */
145VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
146{
147 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
148 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
149}
150
151
152/**
153 * Loads a the hidden parts of a selector register.
154 *
155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
156 */
157VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
158{
159 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
160}
161
162#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
163
164
165/**
166 * Obsolete.
167 *
168 * We don't support nested hypervisor context interrupts or traps. Life is much
169 * simpler when we don't. It's also slightly faster at times.
170 *
171 * @param pVCpu The cross context virtual CPU structure.
172 */
173VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
174{
175 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
176}
177
178
179/**
180 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
181 *
182 * @param pVCpu The cross context virtual CPU structure.
183 */
184VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
185{
186 return &pVCpu->cpum.s.Hyper;
187}
188
189
190VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
191{
192 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
193 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
194}
195
196
197VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
198{
199 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
200 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
201}
202
203
204VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
205{
206 pVCpu->cpum.s.Hyper.cr3 = cr3;
207
208#ifdef IN_RC
209 /* Update the current CR3. */
210 ASMSetCR3(cr3);
211#endif
212}
213
214VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
215{
216 return pVCpu->cpum.s.Hyper.cr3;
217}
218
219
220VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
221{
222 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
223}
224
225
226VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
227{
228 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
229}
230
231
232VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
233{
234 pVCpu->cpum.s.Hyper.es.Sel = SelES;
235}
236
237
238VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
239{
240 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
241}
242
243
244VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
245{
246 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
247}
248
249
250VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
251{
252 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
253}
254
255
256VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
257{
258 pVCpu->cpum.s.Hyper.esp = u32ESP;
259}
260
261
262VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
263{
264 pVCpu->cpum.s.Hyper.esp = u32ESP;
265}
266
267
268VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
269{
270 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
271 return VINF_SUCCESS;
272}
273
274
275VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
276{
277 pVCpu->cpum.s.Hyper.eip = u32EIP;
278}
279
280
281/**
282 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
283 * EFLAGS and EIP prior to resuming guest execution.
284 *
285 * All general register not given as a parameter will be set to 0. The EFLAGS
286 * register will be set to sane values for C/C++ code execution with interrupts
287 * disabled and IOPL 0.
288 *
289 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
290 * @param u32EIP The EIP value.
291 * @param u32ESP The ESP value.
292 * @param u32EAX The EAX value.
293 * @param u32EDX The EDX value.
294 */
295VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
296{
297 pVCpu->cpum.s.Hyper.eip = u32EIP;
298 pVCpu->cpum.s.Hyper.esp = u32ESP;
299 pVCpu->cpum.s.Hyper.eax = u32EAX;
300 pVCpu->cpum.s.Hyper.edx = u32EDX;
301 pVCpu->cpum.s.Hyper.ecx = 0;
302 pVCpu->cpum.s.Hyper.ebx = 0;
303 pVCpu->cpum.s.Hyper.ebp = 0;
304 pVCpu->cpum.s.Hyper.esi = 0;
305 pVCpu->cpum.s.Hyper.edi = 0;
306 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
307}
308
309
310VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
311{
312 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
313}
314
315
316VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
317{
318 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
319}
320
321
322/** @def MAYBE_LOAD_DRx
323 * Macro for updating DRx values in raw-mode and ring-0 contexts.
324 */
325#ifdef IN_RING0
326# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
327# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
328 do { \
329 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
330 a_fnLoad(a_uValue); \
331 else \
332 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
333 } while (0)
334# else
335# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
336 do { \
337 a_fnLoad(a_uValue); \
338 } while (0)
339# endif
340
341#elif defined(IN_RC)
342# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
343 do { \
344 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
345 { a_fnLoad(a_uValue); } \
346 } while (0)
347
348#else
349# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
350#endif
351
352VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
353{
354 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
355 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
356}
357
358
359VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
360{
361 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
362 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
363}
364
365
366VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
367{
368 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
369 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
370}
371
372
373VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
374{
375 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
376 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
377}
378
379
380VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
381{
382 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
383}
384
385
386VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
387{
388 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
389#ifdef IN_RC
390 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
391#endif
392}
393
394
395VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
396{
397 return pVCpu->cpum.s.Hyper.cs.Sel;
398}
399
400
401VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
402{
403 return pVCpu->cpum.s.Hyper.ds.Sel;
404}
405
406
407VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
408{
409 return pVCpu->cpum.s.Hyper.es.Sel;
410}
411
412
413VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
414{
415 return pVCpu->cpum.s.Hyper.fs.Sel;
416}
417
418
419VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
420{
421 return pVCpu->cpum.s.Hyper.gs.Sel;
422}
423
424
425VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
426{
427 return pVCpu->cpum.s.Hyper.ss.Sel;
428}
429
430
431VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
432{
433 return pVCpu->cpum.s.Hyper.eax;
434}
435
436
437VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
438{
439 return pVCpu->cpum.s.Hyper.ebx;
440}
441
442
443VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
444{
445 return pVCpu->cpum.s.Hyper.ecx;
446}
447
448
449VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
450{
451 return pVCpu->cpum.s.Hyper.edx;
452}
453
454
455VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
456{
457 return pVCpu->cpum.s.Hyper.esi;
458}
459
460
461VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
462{
463 return pVCpu->cpum.s.Hyper.edi;
464}
465
466
467VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
468{
469 return pVCpu->cpum.s.Hyper.ebp;
470}
471
472
473VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
474{
475 return pVCpu->cpum.s.Hyper.esp;
476}
477
478
479VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
480{
481 return pVCpu->cpum.s.Hyper.eflags.u32;
482}
483
484
485VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
486{
487 return pVCpu->cpum.s.Hyper.eip;
488}
489
490
491VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
492{
493 return pVCpu->cpum.s.Hyper.rip;
494}
495
496
497VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
498{
499 if (pcbLimit)
500 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
501 return pVCpu->cpum.s.Hyper.idtr.pIdt;
502}
503
504
505VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
506{
507 if (pcbLimit)
508 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
509 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
510}
511
512
513VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
514{
515 return pVCpu->cpum.s.Hyper.ldtr.Sel;
516}
517
518
519VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
520{
521 return pVCpu->cpum.s.Hyper.dr[0];
522}
523
524
525VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
526{
527 return pVCpu->cpum.s.Hyper.dr[1];
528}
529
530
531VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
532{
533 return pVCpu->cpum.s.Hyper.dr[2];
534}
535
536
537VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
538{
539 return pVCpu->cpum.s.Hyper.dr[3];
540}
541
542
543VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
544{
545 return pVCpu->cpum.s.Hyper.dr[6];
546}
547
548
549VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
550{
551 return pVCpu->cpum.s.Hyper.dr[7];
552}
553
554
555/**
556 * Gets the pointer to the internal CPUMCTXCORE structure.
557 * This is only for reading in order to save a few calls.
558 *
559 * @param pVCpu The cross context virtual CPU structure.
560 */
561VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
562{
563 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
564}
565
566
567/**
568 * Queries the pointer to the internal CPUMCTX structure.
569 *
570 * @returns The CPUMCTX pointer.
571 * @param pVCpu The cross context virtual CPU structure.
572 */
573VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
574{
575 return &pVCpu->cpum.s.Guest;
576}
577
578VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
579{
580#ifdef VBOX_WITH_IEM
581# ifdef VBOX_WITH_RAW_MODE_NOT_R0
582 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
583 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
584# endif
585#endif
586 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
587 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
588 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
589 return VINF_SUCCESS; /* formality, consider it void. */
590}
591
592VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
593{
594#ifdef VBOX_WITH_IEM
595# ifdef VBOX_WITH_RAW_MODE_NOT_R0
596 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
597 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
598# endif
599#endif
600 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
601 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
602 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
603 return VINF_SUCCESS; /* formality, consider it void. */
604}
605
606VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
607{
608#ifdef VBOX_WITH_IEM
609# ifdef VBOX_WITH_RAW_MODE_NOT_R0
610 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
611 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
612# endif
613#endif
614 pVCpu->cpum.s.Guest.tr.Sel = tr;
615 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
616 return VINF_SUCCESS; /* formality, consider it void. */
617}
618
619VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
620{
621#ifdef VBOX_WITH_IEM
622# ifdef VBOX_WITH_RAW_MODE_NOT_R0
623 if ( ( ldtr != 0
624 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
625 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
626 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
627# endif
628#endif
629 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
630 /* The caller will set more hidden bits if it has them. */
631 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
632 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
633 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
634 return VINF_SUCCESS; /* formality, consider it void. */
635}
636
637
638/**
639 * Set the guest CR0.
640 *
641 * When called in GC, the hyper CR0 may be updated if that is
642 * required. The caller only has to take special action if AM,
643 * WP, PG or PE changes.
644 *
645 * @returns VINF_SUCCESS (consider it void).
646 * @param pVCpu The cross context virtual CPU structure.
647 * @param cr0 The new CR0 value.
648 */
649VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
650{
651#ifdef IN_RC
652 /*
653 * Check if we need to change hypervisor CR0 because
654 * of math stuff.
655 */
656 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
657 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
658 {
659 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
660 {
661 /*
662 * We haven't saved the host FPU state yet, so TS and MT are both set
663 * and EM should be reflecting the guest EM (it always does this).
664 */
665 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
666 {
667 uint32_t HyperCR0 = ASMGetCR0();
668 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
669 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
670 HyperCR0 &= ~X86_CR0_EM;
671 HyperCR0 |= cr0 & X86_CR0_EM;
672 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
673 ASMSetCR0(HyperCR0);
674 }
675# ifdef VBOX_STRICT
676 else
677 {
678 uint32_t HyperCR0 = ASMGetCR0();
679 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
680 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
681 }
682# endif
683 }
684 else
685 {
686 /*
687 * Already saved the state, so we're just mirroring
688 * the guest flags.
689 */
690 uint32_t HyperCR0 = ASMGetCR0();
691 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
692 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
693 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
694 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
695 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
696 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
697 ASMSetCR0(HyperCR0);
698 }
699 }
700#endif /* IN_RC */
701
702 /*
703 * Check for changes causing TLB flushes (for REM).
704 * The caller is responsible for calling PGM when appropriate.
705 */
706 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
707 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
708 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
709 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
710
711 /*
712 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
713 */
714 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
715 PGMCr0WpEnabled(pVCpu);
716
717 /* The ET flag is settable on a 386 and hardwired on 486+. */
718 if ( !(cr0 & X86_CR0_ET)
719 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
720 cr0 |= X86_CR0_ET;
721
722 pVCpu->cpum.s.Guest.cr0 = cr0;
723 return VINF_SUCCESS;
724}
725
726
727VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
728{
729 pVCpu->cpum.s.Guest.cr2 = cr2;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
735{
736 pVCpu->cpum.s.Guest.cr3 = cr3;
737 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
738 return VINF_SUCCESS;
739}
740
741
742VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
743{
744 /*
745 * The CR4.OSXSAVE bit is reflected in CPUID(1).ECX[27].
746 */
747 if ( (cr4 & X86_CR4_OSXSAVE)
748 != (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE) )
749 {
750 PVM pVM = pVCpu->CTX_SUFF(pVM);
751 if (cr4 & X86_CR4_OSXSAVE)
752 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE);
753 else
754 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE);
755 }
756
757 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
758 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
759 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
760
761 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
762 pVCpu->cpum.s.Guest.cr4 = cr4;
763 return VINF_SUCCESS;
764}
765
766
767VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
768{
769 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
770 return VINF_SUCCESS;
771}
772
773
774VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
775{
776 pVCpu->cpum.s.Guest.eip = eip;
777 return VINF_SUCCESS;
778}
779
780
781VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
782{
783 pVCpu->cpum.s.Guest.eax = eax;
784 return VINF_SUCCESS;
785}
786
787
788VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
789{
790 pVCpu->cpum.s.Guest.ebx = ebx;
791 return VINF_SUCCESS;
792}
793
794
795VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
796{
797 pVCpu->cpum.s.Guest.ecx = ecx;
798 return VINF_SUCCESS;
799}
800
801
802VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
803{
804 pVCpu->cpum.s.Guest.edx = edx;
805 return VINF_SUCCESS;
806}
807
808
809VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
810{
811 pVCpu->cpum.s.Guest.esp = esp;
812 return VINF_SUCCESS;
813}
814
815
816VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
817{
818 pVCpu->cpum.s.Guest.ebp = ebp;
819 return VINF_SUCCESS;
820}
821
822
823VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
824{
825 pVCpu->cpum.s.Guest.esi = esi;
826 return VINF_SUCCESS;
827}
828
829
830VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
831{
832 pVCpu->cpum.s.Guest.edi = edi;
833 return VINF_SUCCESS;
834}
835
836
837VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
838{
839 pVCpu->cpum.s.Guest.ss.Sel = ss;
840 return VINF_SUCCESS;
841}
842
843
844VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
845{
846 pVCpu->cpum.s.Guest.cs.Sel = cs;
847 return VINF_SUCCESS;
848}
849
850
851VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
852{
853 pVCpu->cpum.s.Guest.ds.Sel = ds;
854 return VINF_SUCCESS;
855}
856
857
858VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
859{
860 pVCpu->cpum.s.Guest.es.Sel = es;
861 return VINF_SUCCESS;
862}
863
864
865VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
866{
867 pVCpu->cpum.s.Guest.fs.Sel = fs;
868 return VINF_SUCCESS;
869}
870
871
872VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
873{
874 pVCpu->cpum.s.Guest.gs.Sel = gs;
875 return VINF_SUCCESS;
876}
877
878
879VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
880{
881 pVCpu->cpum.s.Guest.msrEFER = val;
882}
883
884
885VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
886{
887 if (pcbLimit)
888 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
889 return pVCpu->cpum.s.Guest.idtr.pIdt;
890}
891
892
893VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
894{
895 if (pHidden)
896 *pHidden = pVCpu->cpum.s.Guest.tr;
897 return pVCpu->cpum.s.Guest.tr.Sel;
898}
899
900
901VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
902{
903 return pVCpu->cpum.s.Guest.cs.Sel;
904}
905
906
907VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
908{
909 return pVCpu->cpum.s.Guest.ds.Sel;
910}
911
912
913VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
914{
915 return pVCpu->cpum.s.Guest.es.Sel;
916}
917
918
919VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
920{
921 return pVCpu->cpum.s.Guest.fs.Sel;
922}
923
924
925VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
926{
927 return pVCpu->cpum.s.Guest.gs.Sel;
928}
929
930
931VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
932{
933 return pVCpu->cpum.s.Guest.ss.Sel;
934}
935
936
937VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
938{
939 return pVCpu->cpum.s.Guest.ldtr.Sel;
940}
941
942
943VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
944{
945 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
946 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
947 return pVCpu->cpum.s.Guest.ldtr.Sel;
948}
949
950
951VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
952{
953 return pVCpu->cpum.s.Guest.cr0;
954}
955
956
957VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
958{
959 return pVCpu->cpum.s.Guest.cr2;
960}
961
962
963VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
964{
965 return pVCpu->cpum.s.Guest.cr3;
966}
967
968
969VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
970{
971 return pVCpu->cpum.s.Guest.cr4;
972}
973
974
975VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
976{
977 uint64_t u64;
978 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
979 if (RT_FAILURE(rc))
980 u64 = 0;
981 return u64;
982}
983
984
985VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
986{
987 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
988}
989
990
991VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
992{
993 return pVCpu->cpum.s.Guest.eip;
994}
995
996
997VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
998{
999 return pVCpu->cpum.s.Guest.rip;
1000}
1001
1002
1003VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1004{
1005 return pVCpu->cpum.s.Guest.eax;
1006}
1007
1008
1009VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1010{
1011 return pVCpu->cpum.s.Guest.ebx;
1012}
1013
1014
1015VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1016{
1017 return pVCpu->cpum.s.Guest.ecx;
1018}
1019
1020
1021VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1022{
1023 return pVCpu->cpum.s.Guest.edx;
1024}
1025
1026
1027VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1028{
1029 return pVCpu->cpum.s.Guest.esi;
1030}
1031
1032
1033VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1034{
1035 return pVCpu->cpum.s.Guest.edi;
1036}
1037
1038
1039VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1040{
1041 return pVCpu->cpum.s.Guest.esp;
1042}
1043
1044
1045VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1046{
1047 return pVCpu->cpum.s.Guest.ebp;
1048}
1049
1050
1051VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1052{
1053 return pVCpu->cpum.s.Guest.eflags.u32;
1054}
1055
1056
1057VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1058{
1059 switch (iReg)
1060 {
1061 case DISCREG_CR0:
1062 *pValue = pVCpu->cpum.s.Guest.cr0;
1063 break;
1064
1065 case DISCREG_CR2:
1066 *pValue = pVCpu->cpum.s.Guest.cr2;
1067 break;
1068
1069 case DISCREG_CR3:
1070 *pValue = pVCpu->cpum.s.Guest.cr3;
1071 break;
1072
1073 case DISCREG_CR4:
1074 *pValue = pVCpu->cpum.s.Guest.cr4;
1075 break;
1076
1077 case DISCREG_CR8:
1078 {
1079 uint8_t u8Tpr;
1080 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1081 if (RT_FAILURE(rc))
1082 {
1083 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1084 *pValue = 0;
1085 return rc;
1086 }
1087 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1088 break;
1089 }
1090
1091 default:
1092 return VERR_INVALID_PARAMETER;
1093 }
1094 return VINF_SUCCESS;
1095}
1096
1097
1098VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1099{
1100 return pVCpu->cpum.s.Guest.dr[0];
1101}
1102
1103
1104VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1105{
1106 return pVCpu->cpum.s.Guest.dr[1];
1107}
1108
1109
1110VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1111{
1112 return pVCpu->cpum.s.Guest.dr[2];
1113}
1114
1115
1116VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1117{
1118 return pVCpu->cpum.s.Guest.dr[3];
1119}
1120
1121
1122VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1123{
1124 return pVCpu->cpum.s.Guest.dr[6];
1125}
1126
1127
1128VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1129{
1130 return pVCpu->cpum.s.Guest.dr[7];
1131}
1132
1133
1134VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1135{
1136 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1137 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1138 if (iReg == 4 || iReg == 5)
1139 iReg += 2;
1140 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1141 return VINF_SUCCESS;
1142}
1143
1144
1145VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1146{
1147 return pVCpu->cpum.s.Guest.msrEFER;
1148}
1149
1150
1151/**
1152 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1153 *
1154 * @returns Pointer to the leaf if found, NULL if not.
1155 *
1156 * @param pVM The cross context VM structure.
1157 * @param uLeaf The leaf to get.
1158 */
1159PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1160{
1161 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1162 if (iEnd)
1163 {
1164 unsigned iStart = 0;
1165 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1166 for (;;)
1167 {
1168 unsigned i = iStart + (iEnd - iStart) / 2U;
1169 if (uLeaf < paLeaves[i].uLeaf)
1170 {
1171 if (i <= iStart)
1172 return NULL;
1173 iEnd = i;
1174 }
1175 else if (uLeaf > paLeaves[i].uLeaf)
1176 {
1177 i += 1;
1178 if (i >= iEnd)
1179 return NULL;
1180 iStart = i;
1181 }
1182 else
1183 {
1184 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1185 return &paLeaves[i];
1186
1187 /* This shouldn't normally happen. But in case the it does due
1188 to user configuration overrids or something, just return the
1189 first sub-leaf. */
1190 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1191 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1192 while ( paLeaves[i].uSubLeaf != 0
1193 && i > 0
1194 && uLeaf == paLeaves[i - 1].uLeaf)
1195 i--;
1196 return &paLeaves[i];
1197 }
1198 }
1199 }
1200
1201 return NULL;
1202}
1203
1204
1205/**
1206 * Looks up a CPUID leaf in the CPUID leaf array.
1207 *
1208 * @returns Pointer to the leaf if found, NULL if not.
1209 *
1210 * @param pVM The cross context VM structure.
1211 * @param uLeaf The leaf to get.
1212 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1213 * isn't.
1214 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1215 */
1216PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1217{
1218 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1219 if (iEnd)
1220 {
1221 unsigned iStart = 0;
1222 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1223 for (;;)
1224 {
1225 unsigned i = iStart + (iEnd - iStart) / 2U;
1226 if (uLeaf < paLeaves[i].uLeaf)
1227 {
1228 if (i <= iStart)
1229 return NULL;
1230 iEnd = i;
1231 }
1232 else if (uLeaf > paLeaves[i].uLeaf)
1233 {
1234 i += 1;
1235 if (i >= iEnd)
1236 return NULL;
1237 iStart = i;
1238 }
1239 else
1240 {
1241 uSubLeaf &= paLeaves[i].fSubLeafMask;
1242 if (uSubLeaf == paLeaves[i].uSubLeaf)
1243 *pfExactSubLeafHit = true;
1244 else
1245 {
1246 /* Find the right subleaf. We return the last one before
1247 uSubLeaf if we don't find an exact match. */
1248 if (uSubLeaf < paLeaves[i].uSubLeaf)
1249 while ( i > 0
1250 && uLeaf == paLeaves[i - 1].uLeaf
1251 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1252 i--;
1253 else
1254 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1255 && uLeaf == paLeaves[i + 1].uLeaf
1256 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1257 i++;
1258 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1259 }
1260 return &paLeaves[i];
1261 }
1262 }
1263 }
1264
1265 *pfExactSubLeafHit = false;
1266 return NULL;
1267}
1268
1269
1270/**
1271 * Gets a CPUID leaf.
1272 *
1273 * @param pVCpu The cross context virtual CPU structure.
1274 * @param uLeaf The CPUID leaf to get.
1275 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1276 * @param pEax Where to store the EAX value.
1277 * @param pEbx Where to store the EBX value.
1278 * @param pEcx Where to store the ECX value.
1279 * @param pEdx Where to store the EDX value.
1280 */
1281VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1282 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1283{
1284 bool fExactSubLeafHit;
1285 PVM pVM = pVCpu->CTX_SUFF(pVM);
1286 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1287 if (pLeaf)
1288 {
1289 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1290 if (fExactSubLeafHit)
1291 {
1292 *pEax = pLeaf->uEax;
1293 *pEbx = pLeaf->uEbx;
1294 *pEcx = pLeaf->uEcx;
1295 *pEdx = pLeaf->uEdx;
1296
1297 /*
1298 * Deal with CPU specific information (currently only APIC ID).
1299 */
1300 if (pLeaf->fFlags & (CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE))
1301 {
1302 if (uLeaf == 1)
1303 {
1304 /* EBX: Bits 31-24: Initial APIC ID. */
1305 Assert(pVCpu->idCpu <= 255);
1306 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1307 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1308
1309 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1310 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1311 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1312 }
1313 else if (uLeaf == 0xb)
1314 {
1315 /* EDX: Initial extended APIC ID. */
1316 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1317 *pEdx = pVCpu->idCpu;
1318 }
1319 else if (uLeaf == UINT32_C(0x8000001e))
1320 {
1321 /* EAX: Initial extended APIC ID. */
1322 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1323 *pEax = pVCpu->idCpu;
1324 }
1325 else
1326 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1327 }
1328 }
1329 /*
1330 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1331 * them here, but we do the best we can here...
1332 */
1333 else
1334 {
1335 *pEax = *pEbx = *pEcx = *pEdx = 0;
1336 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1337 {
1338 *pEcx = uSubLeaf & 0xff;
1339 *pEdx = pVCpu->idCpu;
1340 }
1341 }
1342 }
1343 else
1344 {
1345 /*
1346 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1347 */
1348 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1349 {
1350 default:
1351 AssertFailed();
1352 case CPUMUNKNOWNCPUID_DEFAULTS:
1353 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1354 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1355 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1356 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1357 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1358 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1359 break;
1360 case CPUMUNKNOWNCPUID_PASSTHRU:
1361 *pEax = uLeaf;
1362 *pEbx = 0;
1363 *pEcx = uSubLeaf;
1364 *pEdx = 0;
1365 break;
1366 }
1367 }
1368 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1369}
1370
1371
1372/**
1373 * Sets a CPUID feature bit.
1374 *
1375 * @param pVM The cross context VM structure.
1376 * @param enmFeature The feature to set.
1377 */
1378VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1379{
1380 PCPUMCPUIDLEAF pLeaf;
1381
1382 switch (enmFeature)
1383 {
1384 /*
1385 * Set the APIC bit in both feature masks.
1386 */
1387 case CPUMCPUIDFEATURE_APIC:
1388 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1389 if (pLeaf)
1390 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
1391
1392 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1393 if ( pLeaf
1394 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1395 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1396
1397 pVM->cpum.s.GuestFeatures.fApic = 1;
1398 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled xAPIC\n"));
1399 break;
1400
1401 /*
1402 * Set the x2APIC bit in the standard feature mask.
1403 */
1404 case CPUMCPUIDFEATURE_X2APIC:
1405 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1406 if (pLeaf)
1407 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
1408 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
1409 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
1410 break;
1411
1412 /*
1413 * Set the sysenter/sysexit bit in the standard feature mask.
1414 * Assumes the caller knows what it's doing! (host must support these)
1415 */
1416 case CPUMCPUIDFEATURE_SEP:
1417 if (!pVM->cpum.s.HostFeatures.fSysEnter)
1418 {
1419 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1420 return;
1421 }
1422
1423 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1424 if (pLeaf)
1425 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
1426 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
1427 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
1428 break;
1429
1430 /*
1431 * Set the syscall/sysret bit in the extended feature mask.
1432 * Assumes the caller knows what it's doing! (host must support these)
1433 */
1434 case CPUMCPUIDFEATURE_SYSCALL:
1435 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1436 if ( !pLeaf
1437 || !pVM->cpum.s.HostFeatures.fSysCall)
1438 {
1439#if HC_ARCH_BITS == 32
1440 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit
1441 mode by Intel, even when the cpu is capable of doing so in
1442 64-bit mode. Long mode requires syscall support. */
1443 if (!pVM->cpum.s.HostFeatures.fLongMode)
1444#endif
1445 {
1446 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
1447 return;
1448 }
1449 }
1450
1451 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1452 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1453 pVM->cpum.s.GuestFeatures.fSysCall = 1;
1454 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
1455 break;
1456
1457 /*
1458 * Set the PAE bit in both feature masks.
1459 * Assumes the caller knows what it's doing! (host must support these)
1460 */
1461 case CPUMCPUIDFEATURE_PAE:
1462 if (!pVM->cpum.s.HostFeatures.fPae)
1463 {
1464 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
1465 return;
1466 }
1467
1468 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1469 if (pLeaf)
1470 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
1471
1472 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1473 if ( pLeaf
1474 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1475 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1476
1477 pVM->cpum.s.GuestFeatures.fPae = 1;
1478 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
1479 break;
1480
1481 /*
1482 * Set the LONG MODE bit in the extended feature mask.
1483 * Assumes the caller knows what it's doing! (host must support these)
1484 */
1485 case CPUMCPUIDFEATURE_LONG_MODE:
1486 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1487 if ( !pLeaf
1488 || !pVM->cpum.s.HostFeatures.fLongMode)
1489 {
1490 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
1491 return;
1492 }
1493
1494 /* Valid for both Intel and AMD. */
1495 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1496 pVM->cpum.s.GuestFeatures.fLongMode = 1;
1497 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
1498 break;
1499
1500 /*
1501 * Set the NX/XD bit in the extended feature mask.
1502 * Assumes the caller knows what it's doing! (host must support these)
1503 */
1504 case CPUMCPUIDFEATURE_NX:
1505 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1506 if ( !pLeaf
1507 || !pVM->cpum.s.HostFeatures.fNoExecute)
1508 {
1509 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
1510 return;
1511 }
1512
1513 /* Valid for both Intel and AMD. */
1514 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
1515 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
1516 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
1517 break;
1518
1519
1520 /*
1521 * Set the LAHF/SAHF support in 64-bit mode.
1522 * Assumes the caller knows what it's doing! (host must support this)
1523 */
1524 case CPUMCPUIDFEATURE_LAHF:
1525 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1526 if ( !pLeaf
1527 || !pVM->cpum.s.HostFeatures.fLahfSahf)
1528 {
1529 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
1530 return;
1531 }
1532
1533 /* Valid for both Intel and AMD. */
1534 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1535 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
1536 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1537 break;
1538
1539 /*
1540 * Set the page attribute table bit. This is alternative page level
1541 * cache control that doesn't much matter when everything is
1542 * virtualized, though it may when passing thru device memory.
1543 */
1544 case CPUMCPUIDFEATURE_PAT:
1545 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1546 if (pLeaf)
1547 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
1548
1549 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1550 if ( pLeaf
1551 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1552 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1553
1554 pVM->cpum.s.GuestFeatures.fPat = 1;
1555 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
1556 break;
1557
1558 /*
1559 * Set the RDTSCP support bit.
1560 * Assumes the caller knows what it's doing! (host must support this)
1561 */
1562 case CPUMCPUIDFEATURE_RDTSCP:
1563 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1564 if ( !pLeaf
1565 || !pVM->cpum.s.HostFeatures.fRdTscP
1566 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1567 {
1568 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1569 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
1570 return;
1571 }
1572
1573 /* Valid for both Intel and AMD. */
1574 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1575 pVM->cpum.s.HostFeatures.fRdTscP = 1;
1576 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1577 break;
1578
1579 /*
1580 * Set the Hypervisor Present bit in the standard feature mask.
1581 */
1582 case CPUMCPUIDFEATURE_HVP:
1583 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1584 if (pLeaf)
1585 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
1586 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
1587 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1588 break;
1589
1590 /*
1591 * Set the MWAIT Extensions Present bit in the MWAIT/MONITOR leaf.
1592 * This currently includes the Present bit and MWAITBREAK bit as well.
1593 */
1594 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1595 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005));
1596 if ( !pLeaf
1597 || !pVM->cpum.s.HostFeatures.fMWaitExtensions)
1598 {
1599 LogRel(("CPUM: WARNING! Can't turn on MWAIT Extensions when the host doesn't support it!\n"));
1600 return;
1601 }
1602
1603 /* Valid for both Intel and AMD. */
1604 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1605 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1;
1606 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n"));
1607 break;
1608
1609 /*
1610 * OSXSAVE - only used from CPUMSetGuestCR4.
1611 */
1612 case CPUMCPUIDFEATURE_OSXSAVE:
1613 AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor);
1614
1615 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1616 AssertLogRelReturnVoid(pLeaf);
1617
1618 /* UNI: Special case for single CPU to make life simple for CPUMPatchHlpCpuId. */
1619 if (pVM->cCpus == 1)
1620 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_OSXSAVE;
1621 /* SMP: Set flag indicating OSXSAVE updating (superfluous because of the APIC ID, but that's fine). */
1622 else
1623 ASMAtomicOrU32(&pLeaf->fFlags, CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE);
1624 break;
1625
1626 default:
1627 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1628 break;
1629 }
1630
1631 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1632 {
1633 PVMCPU pVCpu = &pVM->aCpus[i];
1634 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1635 }
1636}
1637
1638
1639/**
1640 * Queries a CPUID feature bit.
1641 *
1642 * @returns boolean for feature presence
1643 * @param pVM The cross context VM structure.
1644 * @param enmFeature The feature to query.
1645 */
1646VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1647{
1648 switch (enmFeature)
1649 {
1650 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
1651 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
1652 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
1653 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
1654 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
1655 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
1656 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
1657 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
1658 case CPUMCPUIDFEATURE_PAT: return pVM->cpum.s.GuestFeatures.fPat;
1659 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
1660 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
1661 case CPUMCPUIDFEATURE_MWAIT_EXTS: return pVM->cpum.s.GuestFeatures.fMWaitExtensions;
1662
1663 case CPUMCPUIDFEATURE_OSXSAVE:
1664 case CPUMCPUIDFEATURE_INVALID:
1665 case CPUMCPUIDFEATURE_32BIT_HACK:
1666 break;
1667 }
1668 AssertFailed();
1669 return false;
1670}
1671
1672
1673/**
1674 * Clears a CPUID feature bit.
1675 *
1676 * @param pVM The cross context VM structure.
1677 * @param enmFeature The feature to clear.
1678 */
1679VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1680{
1681 PCPUMCPUIDLEAF pLeaf;
1682 switch (enmFeature)
1683 {
1684 case CPUMCPUIDFEATURE_APIC:
1685 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1686 if (pLeaf)
1687 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1688
1689 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1690 if ( pLeaf
1691 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1692 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1693
1694 pVM->cpum.s.GuestFeatures.fApic = 0;
1695 Log(("CPUM: ClearGuestCpuIdFeature: Disabled xAPIC\n"));
1696 break;
1697
1698 case CPUMCPUIDFEATURE_X2APIC:
1699 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1700 if (pLeaf)
1701 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1702 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
1703 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
1704 break;
1705
1706 case CPUMCPUIDFEATURE_PAE:
1707 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1708 if (pLeaf)
1709 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
1710
1711 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1712 if ( pLeaf
1713 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1714 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1715
1716 pVM->cpum.s.GuestFeatures.fPae = 0;
1717 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
1718 break;
1719
1720 case CPUMCPUIDFEATURE_PAT:
1721 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1722 if (pLeaf)
1723 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
1724
1725 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1726 if ( pLeaf
1727 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1728 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1729
1730 pVM->cpum.s.GuestFeatures.fPat = 0;
1731 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
1732 break;
1733
1734 case CPUMCPUIDFEATURE_LONG_MODE:
1735 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1736 if (pLeaf)
1737 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1738 pVM->cpum.s.GuestFeatures.fLongMode = 0;
1739 break;
1740
1741 case CPUMCPUIDFEATURE_LAHF:
1742 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1743 if (pLeaf)
1744 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1745 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
1746 break;
1747
1748 case CPUMCPUIDFEATURE_RDTSCP:
1749 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1750 if (pLeaf)
1751 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1752 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
1753 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
1754 break;
1755
1756 case CPUMCPUIDFEATURE_HVP:
1757 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1758 if (pLeaf)
1759 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
1760 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
1761 break;
1762
1763 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1764 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005));
1765 if (pLeaf)
1766 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
1767 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0;
1768 Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
1769 break;
1770
1771 /*
1772 * OSXSAVE - only used from CPUMSetGuestCR4.
1773 */
1774 case CPUMCPUIDFEATURE_OSXSAVE:
1775 AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor);
1776
1777 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1778 AssertLogRelReturnVoid(pLeaf);
1779
1780 /* UNI: Special case for single CPU to make life easy for CPUMPatchHlpCpuId. */
1781 if (pVM->cCpus == 1)
1782 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_OSXSAVE;
1783 /* else: SMP: We never set the OSXSAVE bit and leaving the CONTAINS_OSXSAVE flag is fine. */
1784 break;
1785
1786
1787 default:
1788 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1789 break;
1790 }
1791
1792 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1793 {
1794 PVMCPU pVCpu = &pVM->aCpus[i];
1795 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1796 }
1797}
1798
1799
1800/**
1801 * Gets the host CPU vendor.
1802 *
1803 * @returns CPU vendor.
1804 * @param pVM The cross context VM structure.
1805 */
1806VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1807{
1808 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1809}
1810
1811
1812/**
1813 * Gets the CPU vendor.
1814 *
1815 * @returns CPU vendor.
1816 * @param pVM The cross context VM structure.
1817 */
1818VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1819{
1820 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1821}
1822
1823
1824VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1825{
1826 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1827 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1828}
1829
1830
1831VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1832{
1833 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1834 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1835}
1836
1837
1838VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1839{
1840 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1841 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1842}
1843
1844
1845VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1846{
1847 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1848 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1849}
1850
1851
1852VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1853{
1854 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1855 return VINF_SUCCESS; /* No need to recalc. */
1856}
1857
1858
1859VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1860{
1861 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1862 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1863}
1864
1865
1866VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1867{
1868 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1869 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1870 if (iReg == 4 || iReg == 5)
1871 iReg += 2;
1872 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1873 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1874}
1875
1876
1877/**
1878 * Recalculates the hypervisor DRx register values based on current guest
1879 * registers and DBGF breakpoints, updating changed registers depending on the
1880 * context.
1881 *
1882 * This is called whenever a guest DRx register is modified (any context) and
1883 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1884 *
1885 * In raw-mode context this function will reload any (hyper) DRx registers which
1886 * comes out with a different value. It may also have to save the host debug
1887 * registers if that haven't been done already. In this context though, we'll
1888 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1889 * are only important when breakpoints are actually enabled.
1890 *
1891 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1892 * reloaded by the HM code if it changes. Further more, we will only use the
1893 * combined register set when the VBox debugger is actually using hardware BPs,
1894 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1895 * concern us here).
1896 *
1897 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1898 * all the time.
1899 *
1900 * @returns VINF_SUCCESS.
1901 * @param pVCpu The cross context virtual CPU structure.
1902 * @param iGstReg The guest debug register number that was modified.
1903 * UINT8_MAX if not guest register.
1904 * @param fForceHyper Used in HM to force hyper registers because of single
1905 * stepping.
1906 */
1907VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1908{
1909 PVM pVM = pVCpu->CTX_SUFF(pVM);
1910
1911 /*
1912 * Compare the DR7s first.
1913 *
1914 * We only care about the enabled flags. GD is virtualized when we
1915 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1916 * always have the LE and GE bits set, so no need to check and disable
1917 * stuff if they're cleared like we have to for the guest DR7.
1918 */
1919 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1920 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1921 uGstDr7 = 0;
1922 else if (!(uGstDr7 & X86_DR7_LE))
1923 uGstDr7 &= ~X86_DR7_LE_ALL;
1924 else if (!(uGstDr7 & X86_DR7_GE))
1925 uGstDr7 &= ~X86_DR7_GE_ALL;
1926
1927 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1928
1929#ifdef IN_RING0
1930 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1931 fForceHyper = true;
1932#endif
1933 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1934 {
1935 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1936#ifdef IN_RC
1937 bool const fHmEnabled = false;
1938#elif defined(IN_RING3)
1939 bool const fHmEnabled = HMIsEnabled(pVM);
1940#endif
1941
1942 /*
1943 * Ok, something is enabled. Recalc each of the breakpoints, taking
1944 * the VM debugger ones of the guest ones. In raw-mode context we will
1945 * not allow breakpoints with values inside the hypervisor area.
1946 */
1947 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1948
1949 /* bp 0 */
1950 RTGCUINTREG uNewDr0;
1951 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1952 {
1953 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1954 uNewDr0 = DBGFBpGetDR0(pVM);
1955 }
1956 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1957 {
1958 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1959#ifndef IN_RING0
1960 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1961 uNewDr0 = 0;
1962 else
1963#endif
1964 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1965 }
1966 else
1967 uNewDr0 = 0;
1968
1969 /* bp 1 */
1970 RTGCUINTREG uNewDr1;
1971 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1972 {
1973 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1974 uNewDr1 = DBGFBpGetDR1(pVM);
1975 }
1976 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1977 {
1978 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1979#ifndef IN_RING0
1980 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1981 uNewDr1 = 0;
1982 else
1983#endif
1984 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1985 }
1986 else
1987 uNewDr1 = 0;
1988
1989 /* bp 2 */
1990 RTGCUINTREG uNewDr2;
1991 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1992 {
1993 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1994 uNewDr2 = DBGFBpGetDR2(pVM);
1995 }
1996 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1997 {
1998 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1999#ifndef IN_RING0
2000 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
2001 uNewDr2 = 0;
2002 else
2003#endif
2004 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2005 }
2006 else
2007 uNewDr2 = 0;
2008
2009 /* bp 3 */
2010 RTGCUINTREG uNewDr3;
2011 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2012 {
2013 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2014 uNewDr3 = DBGFBpGetDR3(pVM);
2015 }
2016 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2017 {
2018 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2019#ifndef IN_RING0
2020 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
2021 uNewDr3 = 0;
2022 else
2023#endif
2024 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2025 }
2026 else
2027 uNewDr3 = 0;
2028
2029 /*
2030 * Apply the updates.
2031 */
2032#ifdef IN_RC
2033 /* Make sure to save host registers first. */
2034 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
2035 {
2036 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
2037 {
2038 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
2039 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
2040 }
2041 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
2042 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
2043 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
2044 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
2045 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
2046
2047 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
2048 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
2049 ASMSetDR0(uNewDr0);
2050 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
2051 ASMSetDR1(uNewDr1);
2052 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
2053 ASMSetDR2(uNewDr2);
2054 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
2055 ASMSetDR3(uNewDr3);
2056 ASMSetDR6(X86_DR6_INIT_VAL);
2057 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
2058 ASMSetDR7(uNewDr7);
2059 }
2060 else
2061#endif
2062 {
2063 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
2064 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2065 CPUMSetHyperDR3(pVCpu, uNewDr3);
2066 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2067 CPUMSetHyperDR2(pVCpu, uNewDr2);
2068 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2069 CPUMSetHyperDR1(pVCpu, uNewDr1);
2070 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2071 CPUMSetHyperDR0(pVCpu, uNewDr0);
2072 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2073 CPUMSetHyperDR7(pVCpu, uNewDr7);
2074 }
2075 }
2076#ifdef IN_RING0
2077 else if (CPUMIsGuestDebugStateActive(pVCpu))
2078 {
2079 /*
2080 * Reload the register that was modified. Normally this won't happen
2081 * as we won't intercept DRx writes when not having the hyper debug
2082 * state loaded, but in case we do for some reason we'll simply deal
2083 * with it.
2084 */
2085 switch (iGstReg)
2086 {
2087 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
2088 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
2089 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
2090 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
2091 default:
2092 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
2093 }
2094 }
2095#endif
2096 else
2097 {
2098 /*
2099 * No active debug state any more. In raw-mode this means we have to
2100 * make sure DR7 has everything disabled now, if we armed it already.
2101 * In ring-0 we might end up here when just single stepping.
2102 */
2103#if defined(IN_RC) || defined(IN_RING0)
2104 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2105 {
2106# ifdef IN_RC
2107 ASMSetDR7(X86_DR7_INIT_VAL);
2108# endif
2109 if (pVCpu->cpum.s.Hyper.dr[0])
2110 ASMSetDR0(0);
2111 if (pVCpu->cpum.s.Hyper.dr[1])
2112 ASMSetDR1(0);
2113 if (pVCpu->cpum.s.Hyper.dr[2])
2114 ASMSetDR2(0);
2115 if (pVCpu->cpum.s.Hyper.dr[3])
2116 ASMSetDR3(0);
2117 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2118 }
2119#endif
2120 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2121
2122 /* Clear all the registers. */
2123 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2124 pVCpu->cpum.s.Hyper.dr[3] = 0;
2125 pVCpu->cpum.s.Hyper.dr[2] = 0;
2126 pVCpu->cpum.s.Hyper.dr[1] = 0;
2127 pVCpu->cpum.s.Hyper.dr[0] = 0;
2128
2129 }
2130 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2131 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2132 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2133 pVCpu->cpum.s.Hyper.dr[7]));
2134
2135 return VINF_SUCCESS;
2136}
2137
2138
2139/**
2140 * Set the guest XCR0 register.
2141 *
2142 * Will load additional state if the FPU state is already loaded (in ring-0 &
2143 * raw-mode context).
2144 *
2145 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
2146 * value.
2147 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2148 * @param uNewValue The new value.
2149 * @thread EMT(pVCpu)
2150 */
2151VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
2152{
2153 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
2154 /* The X87 bit cannot be cleared. */
2155 && (uNewValue & XSAVE_C_X87)
2156 /* AVX requires SSE. */
2157 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
2158 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
2159 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
2160 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
2161 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
2162 )
2163 {
2164 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
2165
2166 /* If more state components are enabled, we need to take care to load
2167 them if the FPU/SSE state is already loaded. May otherwise leak
2168 host state to the guest. */
2169 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
2170 if (fNewComponents)
2171 {
2172#if defined(IN_RING0) || defined(IN_RC)
2173 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU)
2174 {
2175 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
2176 /* Adding more components. */
2177 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
2178 else
2179 {
2180 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
2181 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
2182 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
2183 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
2184 }
2185 }
2186#endif
2187 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
2188 }
2189 return VINF_SUCCESS;
2190 }
2191 return VERR_CPUM_RAISE_GP_0;
2192}
2193
2194
2195/**
2196 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2197 *
2198 * @returns true if in real mode, otherwise false.
2199 * @param pVCpu The cross context virtual CPU structure.
2200 */
2201VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2202{
2203 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2204}
2205
2206
2207/**
2208 * Tests if the guest has the Page Size Extension enabled (PSE).
2209 *
2210 * @returns true if in real mode, otherwise false.
2211 * @param pVCpu The cross context virtual CPU structure.
2212 */
2213VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2214{
2215 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2216 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2217}
2218
2219
2220/**
2221 * Tests if the guest has the paging enabled (PG).
2222 *
2223 * @returns true if in real mode, otherwise false.
2224 * @param pVCpu The cross context virtual CPU structure.
2225 */
2226VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2227{
2228 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2229}
2230
2231
2232/**
2233 * Tests if the guest has the paging enabled (PG).
2234 *
2235 * @returns true if in real mode, otherwise false.
2236 * @param pVCpu The cross context virtual CPU structure.
2237 */
2238VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2239{
2240 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2241}
2242
2243
2244/**
2245 * Tests if the guest is running in real mode or not.
2246 *
2247 * @returns true if in real mode, otherwise false.
2248 * @param pVCpu The cross context virtual CPU structure.
2249 */
2250VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2251{
2252 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2253}
2254
2255
2256/**
2257 * Tests if the guest is running in real or virtual 8086 mode.
2258 *
2259 * @returns @c true if it is, @c false if not.
2260 * @param pVCpu The cross context virtual CPU structure.
2261 */
2262VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2263{
2264 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2265 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2266}
2267
2268
2269/**
2270 * Tests if the guest is running in protected or not.
2271 *
2272 * @returns true if in protected mode, otherwise false.
2273 * @param pVCpu The cross context virtual CPU structure.
2274 */
2275VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2276{
2277 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2278}
2279
2280
2281/**
2282 * Tests if the guest is running in paged protected or not.
2283 *
2284 * @returns true if in paged protected mode, otherwise false.
2285 * @param pVCpu The cross context virtual CPU structure.
2286 */
2287VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2288{
2289 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2290}
2291
2292
2293/**
2294 * Tests if the guest is running in long mode or not.
2295 *
2296 * @returns true if in long mode, otherwise false.
2297 * @param pVCpu The cross context virtual CPU structure.
2298 */
2299VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2300{
2301 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2302}
2303
2304
2305/**
2306 * Tests if the guest is running in PAE mode or not.
2307 *
2308 * @returns true if in PAE mode, otherwise false.
2309 * @param pVCpu The cross context virtual CPU structure.
2310 */
2311VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2312{
2313 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2314 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2315 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2316 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2317 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2318}
2319
2320
2321/**
2322 * Tests if the guest is running in 64 bits mode or not.
2323 *
2324 * @returns true if in 64 bits protected mode, otherwise false.
2325 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2326 */
2327VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2328{
2329 if (!CPUMIsGuestInLongMode(pVCpu))
2330 return false;
2331 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2332 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2333}
2334
2335
2336/**
2337 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2338 * registers.
2339 *
2340 * @returns true if in 64 bits protected mode, otherwise false.
2341 * @param pCtx Pointer to the current guest CPU context.
2342 */
2343VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2344{
2345 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2346}
2347
2348#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2349
2350/**
2351 *
2352 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2353 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2354 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2355 */
2356VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2357{
2358 return pVCpu->cpum.s.fRawEntered;
2359}
2360
2361/**
2362 * Transforms the guest CPU state to raw-ring mode.
2363 *
2364 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2365 *
2366 * @returns VBox status code. (recompiler failure)
2367 * @param pVCpu The cross context virtual CPU structure.
2368 * @see @ref pg_raw
2369 */
2370VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2371{
2372 PVM pVM = pVCpu->CTX_SUFF(pVM);
2373
2374 Assert(!pVCpu->cpum.s.fRawEntered);
2375 Assert(!pVCpu->cpum.s.fRemEntered);
2376 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2377
2378 /*
2379 * Are we in Ring-0?
2380 */
2381 if ( pCtx->ss.Sel
2382 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2383 && !pCtx->eflags.Bits.u1VM)
2384 {
2385 /*
2386 * Enter execution mode.
2387 */
2388 PATMRawEnter(pVM, pCtx);
2389
2390 /*
2391 * Set CPL to Ring-1.
2392 */
2393 pCtx->ss.Sel |= 1;
2394 if ( pCtx->cs.Sel
2395 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2396 pCtx->cs.Sel |= 1;
2397 }
2398 else
2399 {
2400# ifdef VBOX_WITH_RAW_RING1
2401 if ( EMIsRawRing1Enabled(pVM)
2402 && !pCtx->eflags.Bits.u1VM
2403 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2404 {
2405 /* Set CPL to Ring-2. */
2406 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2407 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2408 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2409 }
2410# else
2411 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2412 ("ring-1 code not supported\n"));
2413# endif
2414 /*
2415 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2416 */
2417 PATMRawEnter(pVM, pCtx);
2418 }
2419
2420 /*
2421 * Assert sanity.
2422 */
2423 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2424 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2425 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2426 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2427
2428 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2429
2430 pVCpu->cpum.s.fRawEntered = true;
2431 return VINF_SUCCESS;
2432}
2433
2434
2435/**
2436 * Transforms the guest CPU state from raw-ring mode to correct values.
2437 *
2438 * This function will change any selector registers with DPL=1 to DPL=0.
2439 *
2440 * @returns Adjusted rc.
2441 * @param pVCpu The cross context virtual CPU structure.
2442 * @param rc Raw mode return code
2443 * @see @ref pg_raw
2444 */
2445VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2446{
2447 PVM pVM = pVCpu->CTX_SUFF(pVM);
2448
2449 /*
2450 * Don't leave if we've already left (in RC).
2451 */
2452 Assert(!pVCpu->cpum.s.fRemEntered);
2453 if (!pVCpu->cpum.s.fRawEntered)
2454 return rc;
2455 pVCpu->cpum.s.fRawEntered = false;
2456
2457 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2458 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2459 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2460 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2461
2462 /*
2463 * Are we executing in raw ring-1?
2464 */
2465 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2466 && !pCtx->eflags.Bits.u1VM)
2467 {
2468 /*
2469 * Leave execution mode.
2470 */
2471 PATMRawLeave(pVM, pCtx, rc);
2472 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2473 /** @todo See what happens if we remove this. */
2474 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2475 pCtx->ds.Sel &= ~X86_SEL_RPL;
2476 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2477 pCtx->es.Sel &= ~X86_SEL_RPL;
2478 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2479 pCtx->fs.Sel &= ~X86_SEL_RPL;
2480 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2481 pCtx->gs.Sel &= ~X86_SEL_RPL;
2482
2483 /*
2484 * Ring-1 selector => Ring-0.
2485 */
2486 pCtx->ss.Sel &= ~X86_SEL_RPL;
2487 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2488 pCtx->cs.Sel &= ~X86_SEL_RPL;
2489 }
2490 else
2491 {
2492 /*
2493 * PATM is taking care of the IOPL and IF flags for us.
2494 */
2495 PATMRawLeave(pVM, pCtx, rc);
2496 if (!pCtx->eflags.Bits.u1VM)
2497 {
2498# ifdef VBOX_WITH_RAW_RING1
2499 if ( EMIsRawRing1Enabled(pVM)
2500 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2501 {
2502 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2503 /** @todo See what happens if we remove this. */
2504 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2505 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2506 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2507 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2508 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2509 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2510 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2511 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2512
2513 /*
2514 * Ring-2 selector => Ring-1.
2515 */
2516 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2517 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2518 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2519 }
2520 else
2521 {
2522# endif
2523 /** @todo See what happens if we remove this. */
2524 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2525 pCtx->ds.Sel &= ~X86_SEL_RPL;
2526 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2527 pCtx->es.Sel &= ~X86_SEL_RPL;
2528 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2529 pCtx->fs.Sel &= ~X86_SEL_RPL;
2530 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2531 pCtx->gs.Sel &= ~X86_SEL_RPL;
2532# ifdef VBOX_WITH_RAW_RING1
2533 }
2534# endif
2535 }
2536 }
2537
2538 return rc;
2539}
2540
2541#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2542
2543/**
2544 * Updates the EFLAGS while we're in raw-mode.
2545 *
2546 * @param pVCpu The cross context virtual CPU structure.
2547 * @param fEfl The new EFLAGS value.
2548 */
2549VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2550{
2551#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2552 if (pVCpu->cpum.s.fRawEntered)
2553 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2554 else
2555#endif
2556 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2557}
2558
2559
2560/**
2561 * Gets the EFLAGS while we're in raw-mode.
2562 *
2563 * @returns The eflags.
2564 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2565 */
2566VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2567{
2568#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2569 if (pVCpu->cpum.s.fRawEntered)
2570 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2571#endif
2572 return pVCpu->cpum.s.Guest.eflags.u32;
2573}
2574
2575
2576/**
2577 * Sets the specified changed flags (CPUM_CHANGED_*).
2578 *
2579 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2580 * @param fChangedAdd The changed flags to add.
2581 */
2582VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2583{
2584 pVCpu->cpum.s.fChanged |= fChangedAdd;
2585}
2586
2587
2588/**
2589 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2590 *
2591 * @returns true if supported.
2592 * @returns false if not supported.
2593 * @param pVM The cross context VM structure.
2594 */
2595VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2596{
2597 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2598}
2599
2600
2601/**
2602 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2603 * @returns true if used.
2604 * @returns false if not used.
2605 * @param pVM The cross context VM structure.
2606 */
2607VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2608{
2609 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2610}
2611
2612
2613/**
2614 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2615 * @returns true if used.
2616 * @returns false if not used.
2617 * @param pVM The cross context VM structure.
2618 */
2619VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2620{
2621 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2622}
2623
2624#ifdef IN_RC
2625
2626/**
2627 * Lazily sync in the FPU/XMM state.
2628 *
2629 * @returns VBox status code.
2630 * @param pVCpu The cross context virtual CPU structure.
2631 */
2632VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2633{
2634 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2635}
2636
2637#endif /* !IN_RC */
2638
2639/**
2640 * Checks if we activated the FPU/XMM state of the guest OS.
2641 * @returns true if we did.
2642 * @returns false if not.
2643 * @param pVCpu The cross context virtual CPU structure.
2644 */
2645VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2646{
2647 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
2648}
2649
2650
2651/**
2652 * Checks if the guest debug state is active.
2653 *
2654 * @returns boolean
2655 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2656 */
2657VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2658{
2659 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2660}
2661
2662
2663/**
2664 * Checks if the guest debug state is to be made active during the world-switch
2665 * (currently only used for the 32->64 switcher case).
2666 *
2667 * @returns boolean
2668 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2669 */
2670VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2671{
2672 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2673}
2674
2675
2676/**
2677 * Checks if the hyper debug state is active.
2678 *
2679 * @returns boolean
2680 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2681 */
2682VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2683{
2684 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2685}
2686
2687
2688/**
2689 * Checks if the hyper debug state is to be made active during the world-switch
2690 * (currently only used for the 32->64 switcher case).
2691 *
2692 * @returns boolean
2693 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2694 */
2695VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2696{
2697 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2698}
2699
2700
2701/**
2702 * Mark the guest's debug state as inactive.
2703 *
2704 * @returns boolean
2705 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2706 * @todo This API doesn't make sense any more.
2707 */
2708VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2709{
2710 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2711 NOREF(pVCpu);
2712}
2713
2714
2715/**
2716 * Get the current privilege level of the guest.
2717 *
2718 * @returns CPL
2719 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2720 */
2721VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2722{
2723 /*
2724 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2725 *
2726 * Note! We used to check CS.DPL here, assuming it was always equal to
2727 * CPL even if a conforming segment was loaded. But this truned out to
2728 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2729 * during install after a far call to ring 2 with VT-x. Then on newer
2730 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2731 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2732 *
2733 * So, forget CS.DPL, always use SS.DPL.
2734 *
2735 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2736 * isn't necessarily equal if the segment is conforming.
2737 * See section 4.11.1 in the AMD manual.
2738 *
2739 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2740 * right after real->prot mode switch and when in V8086 mode? That
2741 * section says the RPL specified in a direct transfere (call, jmp,
2742 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2743 * it would be impossible for an exception handle or the iret
2744 * instruction to figure out whether SS:ESP are part of the frame
2745 * or not. VBox or qemu bug must've lead to this misconception.
2746 *
2747 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2748 * selector into SS with an RPL other than the CPL when CPL != 3 and
2749 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2750 * RPL = CPL. Weird.
2751 */
2752 uint32_t uCpl;
2753 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2754 {
2755 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2756 {
2757 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2758 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2759 else
2760 {
2761 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2762#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2763# ifdef VBOX_WITH_RAW_RING1
2764 if (pVCpu->cpum.s.fRawEntered)
2765 {
2766 if ( uCpl == 2
2767 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2768 uCpl = 1;
2769 else if (uCpl == 1)
2770 uCpl = 0;
2771 }
2772 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2773# else
2774 if (uCpl == 1)
2775 uCpl = 0;
2776# endif
2777#endif
2778 }
2779 }
2780 else
2781 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2782 }
2783 else
2784 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2785 return uCpl;
2786}
2787
2788
2789/**
2790 * Gets the current guest CPU mode.
2791 *
2792 * If paging mode is what you need, check out PGMGetGuestMode().
2793 *
2794 * @returns The CPU mode.
2795 * @param pVCpu The cross context virtual CPU structure.
2796 */
2797VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2798{
2799 CPUMMODE enmMode;
2800 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2801 enmMode = CPUMMODE_REAL;
2802 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2803 enmMode = CPUMMODE_PROTECTED;
2804 else
2805 enmMode = CPUMMODE_LONG;
2806
2807 return enmMode;
2808}
2809
2810
2811/**
2812 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2813 *
2814 * @returns 16, 32 or 64.
2815 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2816 */
2817VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2818{
2819 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2820 return 16;
2821
2822 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2823 {
2824 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2825 return 16;
2826 }
2827
2828 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2829 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2830 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2831 return 64;
2832
2833 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2834 return 32;
2835
2836 return 16;
2837}
2838
2839
2840VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2841{
2842 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2843 return DISCPUMODE_16BIT;
2844
2845 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2846 {
2847 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2848 return DISCPUMODE_16BIT;
2849 }
2850
2851 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2852 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2853 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2854 return DISCPUMODE_64BIT;
2855
2856 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2857 return DISCPUMODE_32BIT;
2858
2859 return DISCPUMODE_16BIT;
2860}
2861
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette