VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 49391

Last change on this file since 49391 was 49360, checked in by vboxsync, 11 years ago

Stopgap MSR fixes (better MSR implementation is underways, just very tedious work), getter as well.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 102.5 KB
Line 
1/* $Id: CPUMAllRegs.cpp 49360 2013-11-01 12:08:56Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52
53/*******************************************************************************
54* Defined Constants And Macros *
55*******************************************************************************/
56/**
57 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
58 *
59 * @returns Pointer to the Virtual CPU.
60 * @param a_pGuestCtx Pointer to the guest context.
61 */
62#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
63
64/**
65 * Lazily loads the hidden parts of a selector register when using raw-mode.
66 */
67#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
68# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
69 do \
70 { \
71 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
72 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
73 } while (0)
74#else
75# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
76 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
77#endif
78
79
80
81#ifdef VBOX_WITH_RAW_MODE_NOT_R0
82
83/**
84 * Does the lazy hidden selector register loading.
85 *
86 * @param pVCpu The current Virtual CPU.
87 * @param pSReg The selector register to lazily load hidden parts of.
88 */
89static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
90{
91 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
92 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
93 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
94
95 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
96 {
97 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
98 pSReg->Attr.u = 0;
99 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
100 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
101 pSReg->Attr.n.u2Dpl = 3;
102 pSReg->Attr.n.u1Present = 1;
103 pSReg->u32Limit = 0x0000ffff;
104 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
105 pSReg->ValidSel = pSReg->Sel;
106 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
107 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
108 }
109 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
110 {
111 /* Real mode - leave the limit and flags alone here, at least for now. */
112 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
113 pSReg->ValidSel = pSReg->Sel;
114 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
115 }
116 else
117 {
118 /* Protected mode - get it from the selector descriptor tables. */
119 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
120 {
121 Assert(!CPUMIsGuestInLongMode(pVCpu));
122 pSReg->Sel = 0;
123 pSReg->u64Base = 0;
124 pSReg->u32Limit = 0;
125 pSReg->Attr.u = 0;
126 pSReg->ValidSel = 0;
127 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
128 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
129 }
130 else
131 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
132 }
133}
134
135
136/**
137 * Makes sure the hidden CS and SS selector registers are valid, loading them if
138 * necessary.
139 *
140 * @param pVCpu The current virtual CPU.
141 */
142VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
143{
144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
145 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
146}
147
148
149/**
150 * Loads a the hidden parts of a selector register.
151 *
152 * @param pVCpu The current virtual CPU.
153 */
154VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
155{
156 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
157}
158
159#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
160
161
162/**
163 * Obsolete.
164 *
165 * We don't support nested hypervisor context interrupts or traps. Life is much
166 * simpler when we don't. It's also slightly faster at times.
167 *
168 * @param pVM Handle to the virtual machine.
169 */
170VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
171{
172 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
173}
174
175
176/**
177 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
178 *
179 * @param pVCpu Pointer to the VMCPU.
180 */
181VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
182{
183 return &pVCpu->cpum.s.Hyper;
184}
185
186
187VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
188{
189 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
190 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
191}
192
193
194VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
195{
196 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
197 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
198}
199
200
201VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
202{
203 pVCpu->cpum.s.Hyper.cr3 = cr3;
204
205#ifdef IN_RC
206 /* Update the current CR3. */
207 ASMSetCR3(cr3);
208#endif
209}
210
211VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
212{
213 return pVCpu->cpum.s.Hyper.cr3;
214}
215
216
217VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
218{
219 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
220}
221
222
223VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
224{
225 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
226}
227
228
229VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
230{
231 pVCpu->cpum.s.Hyper.es.Sel = SelES;
232}
233
234
235VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
236{
237 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
238}
239
240
241VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
242{
243 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
244}
245
246
247VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
248{
249 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
250}
251
252
253VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
254{
255 pVCpu->cpum.s.Hyper.esp = u32ESP;
256}
257
258
259VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
260{
261 pVCpu->cpum.s.Hyper.esp = u32ESP;
262}
263
264
265VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
266{
267 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
268 return VINF_SUCCESS;
269}
270
271
272VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
273{
274 pVCpu->cpum.s.Hyper.eip = u32EIP;
275}
276
277
278/**
279 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
280 * EFLAGS and EIP prior to resuming guest execution.
281 *
282 * All general register not given as a parameter will be set to 0. The EFLAGS
283 * register will be set to sane values for C/C++ code execution with interrupts
284 * disabled and IOPL 0.
285 *
286 * @param pVCpu The current virtual CPU.
287 * @param u32EIP The EIP value.
288 * @param u32ESP The ESP value.
289 * @param u32EAX The EAX value.
290 * @param u32EDX The EDX value.
291 */
292VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
293{
294 pVCpu->cpum.s.Hyper.eip = u32EIP;
295 pVCpu->cpum.s.Hyper.esp = u32ESP;
296 pVCpu->cpum.s.Hyper.eax = u32EAX;
297 pVCpu->cpum.s.Hyper.edx = u32EDX;
298 pVCpu->cpum.s.Hyper.ecx = 0;
299 pVCpu->cpum.s.Hyper.ebx = 0;
300 pVCpu->cpum.s.Hyper.ebp = 0;
301 pVCpu->cpum.s.Hyper.esi = 0;
302 pVCpu->cpum.s.Hyper.edi = 0;
303 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
304}
305
306
307VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
308{
309 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
310}
311
312
313VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
314{
315 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
316}
317
318
319/** @MAYBE_LOAD_DRx
320 * Macro for updating DRx values in raw-mode and ring-0 contexts.
321 */
322#ifdef IN_RING0
323# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
324# ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
325# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
326 do { \
327 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
328 a_fnLoad(a_uValue); \
329 else \
330 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
331 } while (0)
332# else
333# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
334 do { \
335 /** @todo we're not loading the correct guest value here! */ \
336 a_fnLoad(a_uValue); \
337 } while (0)
338# endif
339# else
340# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
341 do { \
342 a_fnLoad(a_uValue); \
343 } while (0)
344# endif
345
346#elif defined(IN_RC)
347# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
348 do { \
349 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
350 { a_fnLoad(a_uValue); } \
351 } while (0)
352
353#else
354# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
355#endif
356
357VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
358{
359 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
360 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
361}
362
363
364VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
365{
366 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
367 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
368}
369
370
371VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
372{
373 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
374 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
375}
376
377
378VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
379{
380 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
381 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
382}
383
384
385VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
386{
387 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
388}
389
390
391VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
392{
393 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
394#ifdef IN_RC
395 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
396#endif
397}
398
399
400VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
401{
402 return pVCpu->cpum.s.Hyper.cs.Sel;
403}
404
405
406VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
407{
408 return pVCpu->cpum.s.Hyper.ds.Sel;
409}
410
411
412VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
413{
414 return pVCpu->cpum.s.Hyper.es.Sel;
415}
416
417
418VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
419{
420 return pVCpu->cpum.s.Hyper.fs.Sel;
421}
422
423
424VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
425{
426 return pVCpu->cpum.s.Hyper.gs.Sel;
427}
428
429
430VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
431{
432 return pVCpu->cpum.s.Hyper.ss.Sel;
433}
434
435
436VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
437{
438 return pVCpu->cpum.s.Hyper.eax;
439}
440
441
442VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
443{
444 return pVCpu->cpum.s.Hyper.ebx;
445}
446
447
448VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
449{
450 return pVCpu->cpum.s.Hyper.ecx;
451}
452
453
454VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
455{
456 return pVCpu->cpum.s.Hyper.edx;
457}
458
459
460VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
461{
462 return pVCpu->cpum.s.Hyper.esi;
463}
464
465
466VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
467{
468 return pVCpu->cpum.s.Hyper.edi;
469}
470
471
472VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
473{
474 return pVCpu->cpum.s.Hyper.ebp;
475}
476
477
478VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
479{
480 return pVCpu->cpum.s.Hyper.esp;
481}
482
483
484VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
485{
486 return pVCpu->cpum.s.Hyper.eflags.u32;
487}
488
489
490VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
491{
492 return pVCpu->cpum.s.Hyper.eip;
493}
494
495
496VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
497{
498 return pVCpu->cpum.s.Hyper.rip;
499}
500
501
502VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
503{
504 if (pcbLimit)
505 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
506 return pVCpu->cpum.s.Hyper.idtr.pIdt;
507}
508
509
510VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
511{
512 if (pcbLimit)
513 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
514 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
515}
516
517
518VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
519{
520 return pVCpu->cpum.s.Hyper.ldtr.Sel;
521}
522
523
524VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
525{
526 return pVCpu->cpum.s.Hyper.dr[0];
527}
528
529
530VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
531{
532 return pVCpu->cpum.s.Hyper.dr[1];
533}
534
535
536VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
537{
538 return pVCpu->cpum.s.Hyper.dr[2];
539}
540
541
542VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
543{
544 return pVCpu->cpum.s.Hyper.dr[3];
545}
546
547
548VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
549{
550 return pVCpu->cpum.s.Hyper.dr[6];
551}
552
553
554VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
555{
556 return pVCpu->cpum.s.Hyper.dr[7];
557}
558
559
560/**
561 * Gets the pointer to the internal CPUMCTXCORE structure.
562 * This is only for reading in order to save a few calls.
563 *
564 * @param pVCpu Handle to the virtual cpu.
565 */
566VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
567{
568 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
569}
570
571
572/**
573 * Queries the pointer to the internal CPUMCTX structure.
574 *
575 * @returns The CPUMCTX pointer.
576 * @param pVCpu Handle to the virtual cpu.
577 */
578VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
579{
580 return &pVCpu->cpum.s.Guest;
581}
582
583VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
584{
585#ifdef VBOX_WITH_IEM
586# ifdef VBOX_WITH_RAW_MODE_NOT_R0
587 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
588 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
589# endif
590#endif
591 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
592 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
593 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
594 return VINF_SUCCESS; /* formality, consider it void. */
595}
596
597VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
598{
599#ifdef VBOX_WITH_IEM
600# ifdef VBOX_WITH_RAW_MODE_NOT_R0
601 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
602 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
603# endif
604#endif
605 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
606 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
607 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
608 return VINF_SUCCESS; /* formality, consider it void. */
609}
610
611VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
612{
613#ifdef VBOX_WITH_IEM
614# ifdef VBOX_WITH_RAW_MODE_NOT_R0
615 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
616 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
617# endif
618#endif
619 pVCpu->cpum.s.Guest.tr.Sel = tr;
620 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
621 return VINF_SUCCESS; /* formality, consider it void. */
622}
623
624VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
625{
626#ifdef VBOX_WITH_IEM
627# ifdef VBOX_WITH_RAW_MODE_NOT_R0
628 if ( ( ldtr != 0
629 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
630 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
631 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
632# endif
633#endif
634 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
635 /* The caller will set more hidden bits if it has them. */
636 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
637 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
638 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
639 return VINF_SUCCESS; /* formality, consider it void. */
640}
641
642
643/**
644 * Set the guest CR0.
645 *
646 * When called in GC, the hyper CR0 may be updated if that is
647 * required. The caller only has to take special action if AM,
648 * WP, PG or PE changes.
649 *
650 * @returns VINF_SUCCESS (consider it void).
651 * @param pVCpu Handle to the virtual cpu.
652 * @param cr0 The new CR0 value.
653 */
654VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
655{
656#ifdef IN_RC
657 /*
658 * Check if we need to change hypervisor CR0 because
659 * of math stuff.
660 */
661 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
662 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
663 {
664 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
665 {
666 /*
667 * We haven't saved the host FPU state yet, so TS and MT are both set
668 * and EM should be reflecting the guest EM (it always does this).
669 */
670 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
671 {
672 uint32_t HyperCR0 = ASMGetCR0();
673 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
674 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
675 HyperCR0 &= ~X86_CR0_EM;
676 HyperCR0 |= cr0 & X86_CR0_EM;
677 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
678 ASMSetCR0(HyperCR0);
679 }
680# ifdef VBOX_STRICT
681 else
682 {
683 uint32_t HyperCR0 = ASMGetCR0();
684 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
685 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
686 }
687# endif
688 }
689 else
690 {
691 /*
692 * Already saved the state, so we're just mirroring
693 * the guest flags.
694 */
695 uint32_t HyperCR0 = ASMGetCR0();
696 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
697 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
698 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
699 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
700 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
701 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
702 ASMSetCR0(HyperCR0);
703 }
704 }
705#endif /* IN_RC */
706
707 /*
708 * Check for changes causing TLB flushes (for REM).
709 * The caller is responsible for calling PGM when appropriate.
710 */
711 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
712 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
713 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
714 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
715
716 /*
717 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
718 */
719 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
720 PGMCr0WpEnabled(pVCpu);
721
722 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
723 return VINF_SUCCESS;
724}
725
726
727VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
728{
729 pVCpu->cpum.s.Guest.cr2 = cr2;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
735{
736 pVCpu->cpum.s.Guest.cr3 = cr3;
737 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
738 return VINF_SUCCESS;
739}
740
741
742VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
743{
744 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
745 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
746 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
747 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
748 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
749 cr4 &= ~X86_CR4_OSFSXR;
750 pVCpu->cpum.s.Guest.cr4 = cr4;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
756{
757 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
763{
764 pVCpu->cpum.s.Guest.eip = eip;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
770{
771 pVCpu->cpum.s.Guest.eax = eax;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
777{
778 pVCpu->cpum.s.Guest.ebx = ebx;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
784{
785 pVCpu->cpum.s.Guest.ecx = ecx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
791{
792 pVCpu->cpum.s.Guest.edx = edx;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
798{
799 pVCpu->cpum.s.Guest.esp = esp;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
805{
806 pVCpu->cpum.s.Guest.ebp = ebp;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
812{
813 pVCpu->cpum.s.Guest.esi = esi;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
819{
820 pVCpu->cpum.s.Guest.edi = edi;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
826{
827 pVCpu->cpum.s.Guest.ss.Sel = ss;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
833{
834 pVCpu->cpum.s.Guest.cs.Sel = cs;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
840{
841 pVCpu->cpum.s.Guest.ds.Sel = ds;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
847{
848 pVCpu->cpum.s.Guest.es.Sel = es;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
854{
855 pVCpu->cpum.s.Guest.fs.Sel = fs;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
861{
862 pVCpu->cpum.s.Guest.gs.Sel = gs;
863 return VINF_SUCCESS;
864}
865
866
867VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
868{
869 pVCpu->cpum.s.Guest.msrEFER = val;
870}
871
872
873/**
874 * Worker for CPUMQueryGuestMsr().
875 *
876 * @retval VINF_SUCCESS
877 * @retval VERR_CPUM_RAISE_GP_0
878 * @param pVCpu The cross context CPU structure.
879 * @param idMsr The MSR to read.
880 * @param puValue Where to store the return value.
881 */
882static int cpumQueryGuestMsrInt(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
883{
884 /*
885 * If we don't indicate MSR support in the CPUID feature bits, indicate
886 * that a #GP(0) should be raised.
887 */
888 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
889 {
890 *puValue = 0;
891 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
892 }
893
894 int rc = VINF_SUCCESS;
895 uint8_t const u8Multiplier = 4;
896 switch (idMsr)
897 {
898 case MSR_IA32_TSC:
899 *puValue = TMCpuTickGet(pVCpu);
900 break;
901
902 case MSR_IA32_APICBASE:
903 {
904 PVM pVM = pVCpu->CTX_SUFF(pVM);
905 if ( ( pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1 /* APIC Std feature */
906 && (pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_APIC))
907 || ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 /* APIC Ext feature (AMD) */
908 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD
909 && (pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_APIC))
910 || ( pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1 /* x2APIC */
911 && (pVM->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_X2APIC)))
912 {
913 *puValue = pVCpu->cpum.s.Guest.msrApicBase;
914 }
915 else
916 {
917 *puValue = 0;
918 rc = VERR_CPUM_RAISE_GP_0;
919 }
920 break;
921 }
922
923 case MSR_IA32_CR_PAT:
924 *puValue = pVCpu->cpum.s.Guest.msrPAT;
925 break;
926
927 case MSR_IA32_SYSENTER_CS:
928 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
929 break;
930
931 case MSR_IA32_SYSENTER_EIP:
932 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
933 break;
934
935 case MSR_IA32_SYSENTER_ESP:
936 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
937 break;
938
939 case MSR_IA32_MTRR_CAP:
940 {
941 /* This is currently a bit weird. :-) */
942 uint8_t const cVariableRangeRegs = 0;
943 bool const fSystemManagementRangeRegisters = false;
944 bool const fFixedRangeRegisters = false;
945 bool const fWriteCombiningType = false;
946 *puValue = cVariableRangeRegs
947 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0)
948 | (fWriteCombiningType ? RT_BIT_64(10) : 0)
949 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
950 break;
951 }
952
953 case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0:
954 case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1:
955 case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2:
956 case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3:
957 case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4:
958 case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5:
959 case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6:
960 case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7:
961 /** @todo implement variable MTRRs. */
962 *puValue = 0;
963 break;
964#if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */
965 case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8:
966 case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9:
967 *puValue = 0;
968 break;
969#endif
970
971 case MSR_IA32_MTRR_DEF_TYPE:
972 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;
973 break;
974
975 case IA32_MTRR_FIX64K_00000:
976 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000;
977 break;
978 case IA32_MTRR_FIX16K_80000:
979 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000;
980 break;
981 case IA32_MTRR_FIX16K_A0000:
982 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000;
983 break;
984 case IA32_MTRR_FIX4K_C0000:
985 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000;
986 break;
987 case IA32_MTRR_FIX4K_C8000:
988 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000;
989 break;
990 case IA32_MTRR_FIX4K_D0000:
991 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000;
992 break;
993 case IA32_MTRR_FIX4K_D8000:
994 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000;
995 break;
996 case IA32_MTRR_FIX4K_E0000:
997 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000;
998 break;
999 case IA32_MTRR_FIX4K_E8000:
1000 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000;
1001 break;
1002 case IA32_MTRR_FIX4K_F0000:
1003 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000;
1004 break;
1005 case IA32_MTRR_FIX4K_F8000:
1006 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000;
1007 break;
1008
1009 case MSR_K6_EFER:
1010 *puValue = pVCpu->cpum.s.Guest.msrEFER;
1011 break;
1012
1013 case MSR_K8_SF_MASK:
1014 *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
1015 break;
1016
1017 case MSR_K6_STAR:
1018 *puValue = pVCpu->cpum.s.Guest.msrSTAR;
1019 break;
1020
1021 case MSR_K8_LSTAR:
1022 *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
1023 break;
1024
1025 case MSR_K8_CSTAR:
1026 *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
1027 break;
1028
1029 case MSR_K8_FS_BASE:
1030 *puValue = pVCpu->cpum.s.Guest.fs.u64Base;
1031 break;
1032
1033 case MSR_K8_GS_BASE:
1034 *puValue = pVCpu->cpum.s.Guest.gs.u64Base;
1035 break;
1036
1037 case MSR_K8_KERNEL_GS_BASE:
1038 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
1039 break;
1040
1041 case MSR_K8_TSC_AUX:
1042 *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;
1043 break;
1044
1045 case MSR_IA32_PERF_STATUS:
1046 /** @todo could really be not exactly correct, maybe use host's values
1047 * Apple code indicates that we should use CPU Hz / 1.333MHz here. */
1048 /** @todo Where are the specs implemented here found? */
1049 *puValue = UINT64_C(1000) /* TSC increment by tick */
1050 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
1051 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
1052 break;
1053
1054 case MSR_IA32_FSB_CLOCK_STS:
1055 /*
1056 * Encoded as:
1057 * 0 - 266
1058 * 1 - 133
1059 * 2 - 200
1060 * 3 - return 166
1061 * 5 - return 100
1062 */
1063 *puValue = (2 << 4);
1064 break;
1065
1066 case MSR_IA32_PLATFORM_INFO:
1067 *puValue = ((uint32_t)u8Multiplier << 8) /* Flex ratio max */
1068 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
1069 break;
1070
1071 case MSR_IA32_THERM_STATUS:
1072 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
1073 *puValue = RT_BIT(31) /* validity bit */
1074 | (UINT64_C(20) << 16) /* degrees till TCC */;
1075 break;
1076
1077 case MSR_IA32_MISC_ENABLE:
1078#if 0
1079 /* Needs to be tested more before enabling. */
1080 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
1081#else
1082 /* Currenty we don't allow guests to modify enable MSRs. */
1083 *puValue = MSR_IA32_MISC_ENABLE_FAST_STRINGS /* by default */;
1084
1085 if ((pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR) != 0)
1086
1087 *puValue |= MSR_IA32_MISC_ENABLE_MONITOR /* if mwait/monitor available */;
1088 /** @todo: add more cpuid-controlled features this way. */
1089#endif
1090 break;
1091
1092 /** @todo virtualize DEBUGCTL and relatives */
1093 case MSR_IA32_DEBUGCTL:
1094 *puValue = 0;
1095 break;
1096
1097#if 0 /*def IN_RING0 */
1098 case MSR_IA32_PLATFORM_ID:
1099 case MSR_IA32_BIOS_SIGN_ID:
1100 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
1101 {
1102 /* Available since the P6 family. VT-x implies that this feature is present. */
1103 if (idMsr == MSR_IA32_PLATFORM_ID)
1104 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
1105 else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
1106 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
1107 break;
1108 }
1109 /* no break */
1110#endif
1111 /*
1112 * The BIOS_SIGN_ID MSR and MSR_IA32_MCP_CAP et al exist on AMD64 as
1113 * well, at least bulldozer have them. Windows 7 is querying them.
1114 * XP has been observed querying MSR_IA32_MC0_CTL.
1115 * XP64 has been observed querying MSR_P4_LASTBRANCH_0 (also on AMD).
1116 */
1117 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1118 case MSR_IA32_MCG_CAP: /* fam/mod >= 6_01 */
1119 case MSR_IA32_MCG_STATUS: /* indicated as not present in CAP */
1120 /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */
1121 case MSR_IA32_MC0_CTL:
1122 case MSR_IA32_MC0_STATUS:
1123 case MSR_P4_LASTBRANCH_0:
1124 case MSR_P4_LASTBRANCH_1:
1125 case MSR_P4_LASTBRANCH_2:
1126 case MSR_P4_LASTBRANCH_3:
1127 *puValue = 0;
1128 break;
1129
1130
1131 /*
1132 * Intel specifics MSRs:
1133 */
1134 case MSR_P5_MC_ADDR:
1135 case MSR_P5_MC_TYPE:
1136 case MSR_P4_LASTBRANCH_TOS: /** @todo Are these branch regs still here on more recent CPUs? The documentation doesn't mention them for several archs. */
1137 case MSR_IA32_PERFEVTSEL0: /* NetWare 6.5 wants the these four. (Bet on AMD as well.) */
1138 case MSR_IA32_PERFEVTSEL1:
1139 case MSR_IA32_PMC0:
1140 case MSR_IA32_PMC1:
1141 case MSR_IA32_PLATFORM_ID: /* fam/mod >= 6_01 */
1142 case MSR_IA32_MPERF: /* intel_pstate depends on this but does a validation test */
1143 case MSR_IA32_APERF: /* intel_pstate depends on this but does a validation test */
1144 /*case MSR_IA32_BIOS_UPDT_TRIG: - write-only? */
1145 case MSR_RAPL_POWER_UNIT:
1146 case MSR_BBL_CR_CTL3: /* ca. core arch? */
1147 case MSR_PKG_CST_CONFIG_CONTROL: /* Nahalem, Sandy Bridge */
1148 case MSR_CORE_THREAD_COUNT: /* Apple queries this. */
1149 case MSR_FLEX_RATIO: /* Apple queries this. */
1150 *puValue = 0;
1151 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1152 {
1153 Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1154 rc = VERR_CPUM_RAISE_GP_0;
1155 break;
1156 }
1157
1158 /* Provide more plausive values for some of them. */
1159 switch (idMsr)
1160 {
1161 case MSR_RAPL_POWER_UNIT:
1162 *puValue = RT_MAKE_U32_FROM_U8(3 /* power units (1/8 W)*/,
1163 16 /* 15.3 micro-Joules */,
1164 10 /* 976 microseconds increments */,
1165 0);
1166 break;
1167 case MSR_BBL_CR_CTL3:
1168 *puValue = RT_MAKE_U32_FROM_U8(1, /* bit 0 - L2 Hardware Enabled. (RO) */
1169 1, /* bit 8 - L2 Enabled (R/W). */
1170 0, /* bit 23 - L2 Not Present (RO). */
1171 0);
1172 break;
1173 case MSR_PKG_CST_CONFIG_CONTROL:
1174 *puValue = pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl;
1175 break;
1176 case MSR_CORE_THREAD_COUNT:
1177 {
1178 /** @todo restrict this to nehalem. */
1179 PVM pVM = pVCpu->CTX_SUFF(pVM); /* Note! Not sweating the 4-bit core count limit on westmere. */
1180 *puValue = (pVM->cCpus & 0xffff) | ((pVM->cCpus & 0xffff) << 16);
1181 break;
1182 }
1183
1184 case MSR_FLEX_RATIO:
1185 {
1186 /** @todo Check for P4, it's different there. Try find accurate specs. */
1187 *puValue = (uint32_t)u8Multiplier << 8;
1188 break;
1189 }
1190 }
1191 break;
1192
1193#if 0 /* Only on pentium CPUs! */
1194 /* Event counters, not supported. */
1195 case MSR_IA32_CESR:
1196 case MSR_IA32_CTR0:
1197 case MSR_IA32_CTR1:
1198 *puValue = 0;
1199 break;
1200#endif
1201
1202
1203 /*
1204 * AMD specific MSRs:
1205 */
1206 case MSR_K8_SYSCFG:
1207 case MSR_K8_INT_PENDING:
1208 case MSR_K8_NB_CFG: /* (All known values are 0 on reset.) */
1209 case MSR_K8_HWCR: /* Very interesting bits here. :) */
1210 case MSR_K8_VM_CR: /* Windows 8 */
1211 case 0xc0011029: /* quick fix for FreeBSd 9.1. */
1212 case 0xc0010042: /* quick fix for something. */
1213 case 0xc001102a: /* quick fix for w2k8 + opposition. */
1214 case 0xc0011004: /* quick fix for the opposition. */
1215 case 0xc0011005: /* quick fix for the opposition. */
1216 case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */
1217 case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */
1218 case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */
1219 case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */
1220 case MSR_K7_PERFCTR0: /* quick fix for the opposition. */
1221 case MSR_K7_PERFCTR1: /* quick fix for the opposition. */
1222 case MSR_K7_PERFCTR2: /* quick fix for the opposition. */
1223 case MSR_K7_PERFCTR3: /* quick fix for the opposition. */
1224 *puValue = 0;
1225 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD)
1226 {
1227 Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1228 return VERR_CPUM_RAISE_GP_0;
1229 }
1230 /* ignored */
1231 break;
1232
1233 default:
1234 /*
1235 * Hand the X2APIC range to PDM and the APIC.
1236 */
1237 if ( idMsr >= MSR_IA32_X2APIC_START
1238 && idMsr <= MSR_IA32_X2APIC_END)
1239 {
1240 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
1241 if (RT_SUCCESS(rc))
1242 rc = VINF_SUCCESS;
1243 else
1244 {
1245 *puValue = 0;
1246 rc = VERR_CPUM_RAISE_GP_0;
1247 }
1248 }
1249 else
1250 {
1251 *puValue = 0;
1252 rc = VERR_CPUM_RAISE_GP_0;
1253 }
1254 break;
1255 }
1256
1257 return rc;
1258}
1259
1260
1261/**
1262 * Query an MSR.
1263 *
1264 * The caller is responsible for checking privilege if the call is the result
1265 * of a RDMSR instruction. We'll do the rest.
1266 *
1267 * @retval VINF_SUCCESS on success.
1268 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
1269 * expected to take the appropriate actions. @a *puValue is set to 0.
1270 * @param pVCpu Pointer to the VMCPU.
1271 * @param idMsr The MSR.
1272 * @param puValue Where to return the value.
1273 *
1274 * @remarks This will always return the right values, even when we're in the
1275 * recompiler.
1276 */
1277VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
1278{
1279 int rc = cpumQueryGuestMsrInt(pVCpu, idMsr, puValue);
1280 LogFlow(("CPUMQueryGuestMsr: %#x -> %llx rc=%d\n", idMsr, *puValue, rc));
1281 return rc;
1282}
1283
1284
1285/**
1286 * Sets the MSR.
1287 *
1288 * The caller is responsible for checking privilege if the call is the result
1289 * of a WRMSR instruction. We'll do the rest.
1290 *
1291 * @retval VINF_SUCCESS on success.
1292 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
1293 * appropriate actions.
1294 *
1295 * @param pVCpu Pointer to the VMCPU.
1296 * @param idMsr The MSR id.
1297 * @param uValue The value to set.
1298 *
1299 * @remarks Everyone changing MSR values, including the recompiler, shall do it
1300 * by calling this method. This makes sure we have current values and
1301 * that we trigger all the right actions when something changes.
1302 */
1303VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
1304{
1305 LogFlow(("CPUMSetGuestMsr: %#x <- %#llx\n", idMsr, uValue));
1306
1307 /*
1308 * If we don't indicate MSR support in the CPUID feature bits, indicate
1309 * that a #GP(0) should be raised.
1310 */
1311 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
1312 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
1313
1314 int rc = VINF_SUCCESS;
1315 switch (idMsr)
1316 {
1317 case MSR_IA32_MISC_ENABLE:
1318 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue;
1319 break;
1320
1321 case MSR_IA32_TSC:
1322 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
1323 break;
1324
1325 case MSR_IA32_APICBASE:
1326 rc = PDMApicSetBase(pVCpu, uValue);
1327 if (rc != VINF_SUCCESS)
1328 rc = VERR_CPUM_RAISE_GP_0;
1329 break;
1330
1331 case MSR_IA32_CR_PAT:
1332 pVCpu->cpum.s.Guest.msrPAT = uValue;
1333 break;
1334
1335 case MSR_IA32_SYSENTER_CS:
1336 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
1337 break;
1338
1339 case MSR_IA32_SYSENTER_EIP:
1340 pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
1341 break;
1342
1343 case MSR_IA32_SYSENTER_ESP:
1344 pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
1345 break;
1346
1347 case MSR_IA32_MTRR_CAP:
1348 return VERR_CPUM_RAISE_GP_0;
1349
1350 case MSR_IA32_MTRR_DEF_TYPE:
1351 if ( (uValue & UINT64_C(0xfffffffffffff300))
1352 || ( (uValue & 0xff) != 0
1353 && (uValue & 0xff) != 1
1354 && (uValue & 0xff) != 4
1355 && (uValue & 0xff) != 5
1356 && (uValue & 0xff) != 6) )
1357 {
1358 Log(("CPUM: MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue));
1359 return VERR_CPUM_RAISE_GP_0;
1360 }
1361 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;
1362 break;
1363
1364 case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0:
1365 case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1:
1366 case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2:
1367 case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3:
1368 case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4:
1369 case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5:
1370 case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6:
1371 case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7:
1372 /** @todo implement variable MTRRs. */
1373 break;
1374#if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */
1375 case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8:
1376 case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9:
1377 break;
1378#endif
1379
1380 case IA32_MTRR_FIX64K_00000:
1381 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue;
1382 break;
1383 case IA32_MTRR_FIX16K_80000:
1384 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000 = uValue;
1385 break;
1386 case IA32_MTRR_FIX16K_A0000:
1387 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000 = uValue;
1388 break;
1389 case IA32_MTRR_FIX4K_C0000:
1390 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000 = uValue;
1391 break;
1392 case IA32_MTRR_FIX4K_C8000:
1393 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000 = uValue;
1394 break;
1395 case IA32_MTRR_FIX4K_D0000:
1396 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000 = uValue;
1397 break;
1398 case IA32_MTRR_FIX4K_D8000:
1399 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000 = uValue;
1400 break;
1401 case IA32_MTRR_FIX4K_E0000:
1402 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000 = uValue;
1403 break;
1404 case IA32_MTRR_FIX4K_E8000:
1405 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000 = uValue;
1406 break;
1407 case IA32_MTRR_FIX4K_F0000:
1408 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000 = uValue;
1409 break;
1410 case IA32_MTRR_FIX4K_F8000:
1411 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000 = uValue;
1412 break;
1413
1414 /*
1415 * AMD64 MSRs.
1416 */
1417 case MSR_K6_EFER:
1418 {
1419 PVM pVM = pVCpu->CTX_SUFF(pVM);
1420 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER;
1421 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1422 ? pVM->cpum.s.aGuestCpuIdExt[1].edx
1423 : 0;
1424 uint64_t fMask = 0;
1425
1426 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
1427 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX)
1428 fMask |= MSR_K6_EFER_NXE;
1429 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
1430 fMask |= MSR_K6_EFER_LME;
1431 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
1432 fMask |= MSR_K6_EFER_SCE;
1433 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
1434 fMask |= MSR_K6_EFER_FFXSR;
1435
1436 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
1437 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1438 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
1439 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
1440 {
1441 Log(("CPUM: Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
1442 return VERR_CPUM_RAISE_GP_0;
1443 }
1444
1445 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
1446 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
1447 ("Unexpected value %RX64\n", uValue));
1448 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
1449
1450 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
1451 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
1452 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
1453 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
1454 {
1455 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
1456 HMFlushTLB(pVCpu);
1457
1458 /* Notify PGM about NXE changes. */
1459 if ( (uOldEFER & MSR_K6_EFER_NXE)
1460 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
1461 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));
1462 }
1463 break;
1464 }
1465
1466 case MSR_K8_SF_MASK:
1467 pVCpu->cpum.s.Guest.msrSFMASK = uValue;
1468 break;
1469
1470 case MSR_K6_STAR:
1471 pVCpu->cpum.s.Guest.msrSTAR = uValue;
1472 break;
1473
1474 case MSR_K8_LSTAR:
1475 pVCpu->cpum.s.Guest.msrLSTAR = uValue;
1476 break;
1477
1478 case MSR_K8_CSTAR:
1479 pVCpu->cpum.s.Guest.msrCSTAR = uValue;
1480 break;
1481
1482 case MSR_K8_FS_BASE:
1483 pVCpu->cpum.s.Guest.fs.u64Base = uValue;
1484 break;
1485
1486 case MSR_K8_GS_BASE:
1487 pVCpu->cpum.s.Guest.gs.u64Base = uValue;
1488 break;
1489
1490 case MSR_K8_KERNEL_GS_BASE:
1491 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
1492 break;
1493
1494 case MSR_K8_TSC_AUX:
1495 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
1496 break;
1497
1498 case MSR_IA32_DEBUGCTL:
1499 /** @todo virtualize DEBUGCTL and relatives */
1500 break;
1501
1502 /*
1503 * Intel specifics MSRs:
1504 */
1505 /*case MSR_IA32_PLATFORM_ID: - read-only */
1506 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1507 case MSR_IA32_BIOS_UPDT_TRIG: /* fam/mod >= 6_01 */
1508 /*case MSR_IA32_MCP_CAP: - read-only */
1509 /*case MSR_IA32_MCG_STATUS: - read-only */
1510 /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */
1511 /*case MSR_IA32_MC0_CTL: - read-only? */
1512 /*case MSR_IA32_MC0_STATUS: - read-only? */
1513 case MSR_PKG_CST_CONFIG_CONTROL:
1514 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1515 {
1516 Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1517 return VERR_CPUM_RAISE_GP_0;
1518 }
1519
1520 switch (idMsr)
1521 {
1522 case MSR_PKG_CST_CONFIG_CONTROL:
1523 {
1524 if (pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl & RT_BIT_64(15))
1525 {
1526 Log(("MSR_PKG_CST_CONFIG_CONTROL: Write protected -> #GP\n"));
1527 return VERR_CPUM_RAISE_GP_0;
1528 }
1529 static uint64_t s_fMask = UINT64_C(0x01f08407); /** @todo Only Nehalem has 24; Only Sandy has 27 and 28. */
1530 static uint64_t s_fGpInvalid = UINT64_C(0xffffffff00ff0000); /** @todo figure out exactly what's off limits. */
1531 if ((uValue & s_fGpInvalid) || (uValue & 7) >= 5)
1532 {
1533 Log(("MSR_PKG_CST_CONFIG_CONTROL: Invalid value %#llx -> #GP\n", uValue));
1534 return VERR_CPUM_RAISE_GP_0;
1535 }
1536 pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = uValue & s_fMask;
1537 break;
1538 }
1539
1540 }
1541 /* ignored */
1542 break;
1543
1544 /*
1545 * AMD specific MSRs:
1546 */
1547 case MSR_K8_SYSCFG: /** @todo can be written, but we ignore that for now. */
1548 case MSR_K8_INT_PENDING: /** @todo can be written, but we ignore that for now. */
1549 case MSR_K8_NB_CFG: /** @todo can be written; the apicid swapping might be used and would need saving, but probably unnecessary. */
1550 case 0xc0011029: /* quick fix for FreeBSd 9.1. */
1551 case 0xc0010042: /* quick fix for something. */
1552 case 0xc001102a: /* quick fix for w2k8 + opposition. */
1553 case 0xc0011004: /* quick fix for the opposition. */
1554 case 0xc0011005: /* quick fix for the opposition. */
1555 case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */
1556 case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */
1557 case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */
1558 case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */
1559 case MSR_K7_PERFCTR0: /* quick fix for the opposition. */
1560 case MSR_K7_PERFCTR1: /* quick fix for the opposition. */
1561 case MSR_K7_PERFCTR2: /* quick fix for the opposition. */
1562 case MSR_K7_PERFCTR3: /* quick fix for the opposition. */
1563 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD)
1564 {
1565 Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1566 return VERR_CPUM_RAISE_GP_0;
1567 }
1568 /* ignored */
1569 break;
1570
1571
1572 default:
1573 /*
1574 * Hand the X2APIC range to PDM and the APIC.
1575 */
1576 if ( idMsr >= MSR_IA32_X2APIC_START
1577 && idMsr <= MSR_IA32_X2APIC_END)
1578 {
1579 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
1580 if (rc != VINF_SUCCESS)
1581 rc = VERR_CPUM_RAISE_GP_0;
1582 }
1583 else
1584 {
1585 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
1586 /** @todo rc = VERR_CPUM_RAISE_GP_0 */
1587 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
1588 }
1589 break;
1590 }
1591 return rc;
1592}
1593
1594
1595VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
1596{
1597 if (pcbLimit)
1598 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
1599 return pVCpu->cpum.s.Guest.idtr.pIdt;
1600}
1601
1602
1603VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
1604{
1605 if (pHidden)
1606 *pHidden = pVCpu->cpum.s.Guest.tr;
1607 return pVCpu->cpum.s.Guest.tr.Sel;
1608}
1609
1610
1611VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
1612{
1613 return pVCpu->cpum.s.Guest.cs.Sel;
1614}
1615
1616
1617VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
1618{
1619 return pVCpu->cpum.s.Guest.ds.Sel;
1620}
1621
1622
1623VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
1624{
1625 return pVCpu->cpum.s.Guest.es.Sel;
1626}
1627
1628
1629VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
1630{
1631 return pVCpu->cpum.s.Guest.fs.Sel;
1632}
1633
1634
1635VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
1636{
1637 return pVCpu->cpum.s.Guest.gs.Sel;
1638}
1639
1640
1641VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
1642{
1643 return pVCpu->cpum.s.Guest.ss.Sel;
1644}
1645
1646
1647VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
1648{
1649 return pVCpu->cpum.s.Guest.ldtr.Sel;
1650}
1651
1652
1653VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
1654{
1655 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
1656 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
1657 return pVCpu->cpum.s.Guest.ldtr.Sel;
1658}
1659
1660
1661VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
1662{
1663 return pVCpu->cpum.s.Guest.cr0;
1664}
1665
1666
1667VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
1668{
1669 return pVCpu->cpum.s.Guest.cr2;
1670}
1671
1672
1673VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
1674{
1675 return pVCpu->cpum.s.Guest.cr3;
1676}
1677
1678
1679VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
1680{
1681 return pVCpu->cpum.s.Guest.cr4;
1682}
1683
1684
1685VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
1686{
1687 uint64_t u64;
1688 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1689 if (RT_FAILURE(rc))
1690 u64 = 0;
1691 return u64;
1692}
1693
1694
1695VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1696{
1697 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1698}
1699
1700
1701VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1702{
1703 return pVCpu->cpum.s.Guest.eip;
1704}
1705
1706
1707VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1708{
1709 return pVCpu->cpum.s.Guest.rip;
1710}
1711
1712
1713VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1714{
1715 return pVCpu->cpum.s.Guest.eax;
1716}
1717
1718
1719VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1720{
1721 return pVCpu->cpum.s.Guest.ebx;
1722}
1723
1724
1725VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1726{
1727 return pVCpu->cpum.s.Guest.ecx;
1728}
1729
1730
1731VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1732{
1733 return pVCpu->cpum.s.Guest.edx;
1734}
1735
1736
1737VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1738{
1739 return pVCpu->cpum.s.Guest.esi;
1740}
1741
1742
1743VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1744{
1745 return pVCpu->cpum.s.Guest.edi;
1746}
1747
1748
1749VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1750{
1751 return pVCpu->cpum.s.Guest.esp;
1752}
1753
1754
1755VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1756{
1757 return pVCpu->cpum.s.Guest.ebp;
1758}
1759
1760
1761VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1762{
1763 return pVCpu->cpum.s.Guest.eflags.u32;
1764}
1765
1766
1767VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1768{
1769 switch (iReg)
1770 {
1771 case DISCREG_CR0:
1772 *pValue = pVCpu->cpum.s.Guest.cr0;
1773 break;
1774
1775 case DISCREG_CR2:
1776 *pValue = pVCpu->cpum.s.Guest.cr2;
1777 break;
1778
1779 case DISCREG_CR3:
1780 *pValue = pVCpu->cpum.s.Guest.cr3;
1781 break;
1782
1783 case DISCREG_CR4:
1784 *pValue = pVCpu->cpum.s.Guest.cr4;
1785 break;
1786
1787 case DISCREG_CR8:
1788 {
1789 uint8_t u8Tpr;
1790 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1791 if (RT_FAILURE(rc))
1792 {
1793 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1794 *pValue = 0;
1795 return rc;
1796 }
1797 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1798 break;
1799 }
1800
1801 default:
1802 return VERR_INVALID_PARAMETER;
1803 }
1804 return VINF_SUCCESS;
1805}
1806
1807
1808VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1809{
1810 return pVCpu->cpum.s.Guest.dr[0];
1811}
1812
1813
1814VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1815{
1816 return pVCpu->cpum.s.Guest.dr[1];
1817}
1818
1819
1820VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1821{
1822 return pVCpu->cpum.s.Guest.dr[2];
1823}
1824
1825
1826VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1827{
1828 return pVCpu->cpum.s.Guest.dr[3];
1829}
1830
1831
1832VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1833{
1834 return pVCpu->cpum.s.Guest.dr[6];
1835}
1836
1837
1838VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1839{
1840 return pVCpu->cpum.s.Guest.dr[7];
1841}
1842
1843
1844VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1845{
1846 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1847 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1848 if (iReg == 4 || iReg == 5)
1849 iReg += 2;
1850 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1851 return VINF_SUCCESS;
1852}
1853
1854
1855VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1856{
1857 return pVCpu->cpum.s.Guest.msrEFER;
1858}
1859
1860
1861/**
1862 * Gets a CPUID leaf.
1863 *
1864 * @param pVCpu Pointer to the VMCPU.
1865 * @param iLeaf The CPUID leaf to get.
1866 * @param pEax Where to store the EAX value.
1867 * @param pEbx Where to store the EBX value.
1868 * @param pEcx Where to store the ECX value.
1869 * @param pEdx Where to store the EDX value.
1870 */
1871VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1872{
1873 PVM pVM = pVCpu->CTX_SUFF(pVM);
1874
1875 PCCPUMCPUID pCpuId;
1876 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1877 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1878 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1879 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1880 else if ( iLeaf - UINT32_C(0x40000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdHyper)
1881 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP))
1882 pCpuId = &pVM->cpum.s.aGuestCpuIdHyper[iLeaf - UINT32_C(0x40000000)]; /* Only report if HVP bit set. */
1883 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1884 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1885 else
1886 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1887
1888 uint32_t cCurrentCacheIndex = *pEcx;
1889
1890 *pEax = pCpuId->eax;
1891 *pEbx = pCpuId->ebx;
1892 *pEcx = pCpuId->ecx;
1893 *pEdx = pCpuId->edx;
1894
1895 if ( iLeaf == 1)
1896 {
1897 /* Bits 31-24: Initial APIC ID */
1898 Assert(pVCpu->idCpu <= 255);
1899 *pEbx |= (pVCpu->idCpu << 24);
1900 }
1901
1902 if ( iLeaf == 4
1903 && cCurrentCacheIndex < 3
1904 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1905 {
1906 uint32_t type, level, sharing, linesize,
1907 partitions, associativity, sets, cores;
1908
1909 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1910 partitions = 1;
1911 /* Those are only to shut up compiler, as they will always
1912 get overwritten, and compiler should be able to figure that out */
1913 sets = associativity = sharing = level = 1;
1914 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1915 switch (cCurrentCacheIndex)
1916 {
1917 case 0:
1918 type = 1;
1919 level = 1;
1920 sharing = 1;
1921 linesize = 64;
1922 associativity = 8;
1923 sets = 64;
1924 break;
1925 case 1:
1926 level = 1;
1927 type = 2;
1928 sharing = 1;
1929 linesize = 64;
1930 associativity = 8;
1931 sets = 64;
1932 break;
1933 default: /* shut up gcc.*/
1934 AssertFailed();
1935 case 2:
1936 level = 2;
1937 type = 3;
1938 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1939 linesize = 64;
1940 associativity = 24;
1941 sets = 4096;
1942 break;
1943 }
1944
1945 *pEax |= ((cores - 1) << 26) |
1946 ((sharing - 1) << 14) |
1947 (level << 5) |
1948 1;
1949 *pEbx = (linesize - 1) |
1950 ((partitions - 1) << 12) |
1951 ((associativity - 1) << 22); /* -1 encoding */
1952 *pEcx = sets - 1;
1953 }
1954
1955 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1956}
1957
1958/**
1959 * Gets a number of standard CPUID leafs.
1960 *
1961 * @returns Number of leafs.
1962 * @param pVM Pointer to the VM.
1963 * @remark Intended for PATM.
1964 */
1965VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1966{
1967 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1968}
1969
1970
1971/**
1972 * Gets a number of extended CPUID leafs.
1973 *
1974 * @returns Number of leafs.
1975 * @param pVM Pointer to the VM.
1976 * @remark Intended for PATM.
1977 */
1978VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1979{
1980 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1981}
1982
1983
1984/**
1985 * Gets a number of centaur CPUID leafs.
1986 *
1987 * @returns Number of leafs.
1988 * @param pVM Pointer to the VM.
1989 * @remark Intended for PATM.
1990 */
1991VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1992{
1993 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1994}
1995
1996
1997/**
1998 * Sets a CPUID feature bit.
1999 *
2000 * @param pVM Pointer to the VM.
2001 * @param enmFeature The feature to set.
2002 */
2003VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2004{
2005 switch (enmFeature)
2006 {
2007 /*
2008 * Set the APIC bit in both feature masks.
2009 */
2010 case CPUMCPUIDFEATURE_APIC:
2011 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2012 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
2013 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2014 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2015 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
2016 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
2017 break;
2018
2019 /*
2020 * Set the x2APIC bit in the standard feature mask.
2021 */
2022 case CPUMCPUIDFEATURE_X2APIC:
2023 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2024 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
2025 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
2026 break;
2027
2028 /*
2029 * Set the sysenter/sysexit bit in the standard feature mask.
2030 * Assumes the caller knows what it's doing! (host must support these)
2031 */
2032 case CPUMCPUIDFEATURE_SEP:
2033 {
2034 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
2035 {
2036 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
2037 return;
2038 }
2039
2040 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2041 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
2042 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
2043 break;
2044 }
2045
2046 /*
2047 * Set the syscall/sysret bit in the extended feature mask.
2048 * Assumes the caller knows what it's doing! (host must support these)
2049 */
2050 case CPUMCPUIDFEATURE_SYSCALL:
2051 {
2052 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2053 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
2054 {
2055#if HC_ARCH_BITS == 32
2056 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode.
2057 * Even when the cpu is capable of doing so in 64 bits mode.
2058 */
2059 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2060 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
2061 || !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
2062#endif
2063 {
2064 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
2065 return;
2066 }
2067 }
2068 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
2069 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
2070 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
2071 break;
2072 }
2073
2074 /*
2075 * Set the PAE bit in both feature masks.
2076 * Assumes the caller knows what it's doing! (host must support these)
2077 */
2078 case CPUMCPUIDFEATURE_PAE:
2079 {
2080 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
2081 {
2082 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
2083 return;
2084 }
2085
2086 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2087 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
2088 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2089 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2090 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
2091 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
2092 break;
2093 }
2094
2095 /*
2096 * Set the LONG MODE bit in the extended feature mask.
2097 * Assumes the caller knows what it's doing! (host must support these)
2098 */
2099 case CPUMCPUIDFEATURE_LONG_MODE:
2100 {
2101 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2102 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2103 {
2104 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
2105 return;
2106 }
2107
2108 /* Valid for both Intel and AMD. */
2109 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
2110 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
2111 break;
2112 }
2113
2114 /*
2115 * Set the NX/XD bit in the extended feature mask.
2116 * Assumes the caller knows what it's doing! (host must support these)
2117 */
2118 case CPUMCPUIDFEATURE_NX:
2119 {
2120 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2121 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
2122 {
2123 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
2124 return;
2125 }
2126
2127 /* Valid for both Intel and AMD. */
2128 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX;
2129 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
2130 break;
2131 }
2132
2133 /*
2134 * Set the LAHF/SAHF support in 64-bit mode.
2135 * Assumes the caller knows what it's doing! (host must support this)
2136 */
2137 case CPUMCPUIDFEATURE_LAHF:
2138 {
2139 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2140 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
2141 {
2142 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
2143 return;
2144 }
2145
2146 /* Valid for both Intel and AMD. */
2147 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
2148 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
2149 break;
2150 }
2151
2152 case CPUMCPUIDFEATURE_PAT:
2153 {
2154 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2155 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
2156 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2157 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2158 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
2159 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
2160 break;
2161 }
2162
2163 /*
2164 * Set the RDTSCP support bit.
2165 * Assumes the caller knows what it's doing! (host must support this)
2166 */
2167 case CPUMCPUIDFEATURE_RDTSCP:
2168 {
2169 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2170 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
2171 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
2172 {
2173 if (!pVM->cpum.s.u8PortableCpuIdLevel)
2174 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
2175 return;
2176 }
2177
2178 /* Valid for both Intel and AMD. */
2179 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
2180 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
2181 break;
2182 }
2183
2184 /*
2185 * Set the Hypervisor Present bit in the standard feature mask.
2186 */
2187 case CPUMCPUIDFEATURE_HVP:
2188 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2189 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP;
2190 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
2191 break;
2192
2193 default:
2194 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2195 break;
2196 }
2197 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2198 {
2199 PVMCPU pVCpu = &pVM->aCpus[i];
2200 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
2201 }
2202}
2203
2204
2205/**
2206 * Queries a CPUID feature bit.
2207 *
2208 * @returns boolean for feature presence
2209 * @param pVM Pointer to the VM.
2210 * @param enmFeature The feature to query.
2211 */
2212VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2213{
2214 switch (enmFeature)
2215 {
2216 case CPUMCPUIDFEATURE_PAE:
2217 {
2218 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2219 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
2220 break;
2221 }
2222
2223 case CPUMCPUIDFEATURE_NX:
2224 {
2225 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2226 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX);
2227 }
2228
2229 case CPUMCPUIDFEATURE_SYSCALL:
2230 {
2231 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2232 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL);
2233 }
2234
2235 case CPUMCPUIDFEATURE_RDTSCP:
2236 {
2237 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2238 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
2239 break;
2240 }
2241
2242 case CPUMCPUIDFEATURE_LONG_MODE:
2243 {
2244 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2245 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
2246 break;
2247 }
2248
2249 default:
2250 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2251 break;
2252 }
2253 return false;
2254}
2255
2256
2257/**
2258 * Clears a CPUID feature bit.
2259 *
2260 * @param pVM Pointer to the VM.
2261 * @param enmFeature The feature to clear.
2262 */
2263VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2264{
2265 switch (enmFeature)
2266 {
2267 /*
2268 * Set the APIC bit in both feature masks.
2269 */
2270 case CPUMCPUIDFEATURE_APIC:
2271 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2272 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
2273 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2274 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2275 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
2276 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
2277 break;
2278
2279 /*
2280 * Clear the x2APIC bit in the standard feature mask.
2281 */
2282 case CPUMCPUIDFEATURE_X2APIC:
2283 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2284 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
2285 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
2286 break;
2287
2288 case CPUMCPUIDFEATURE_PAE:
2289 {
2290 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2291 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
2292 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2293 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2294 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
2295 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
2296 break;
2297 }
2298
2299 case CPUMCPUIDFEATURE_PAT:
2300 {
2301 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2302 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
2303 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2304 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2305 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
2306 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
2307 break;
2308 }
2309
2310 case CPUMCPUIDFEATURE_LONG_MODE:
2311 {
2312 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2313 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
2314 break;
2315 }
2316
2317 case CPUMCPUIDFEATURE_LAHF:
2318 {
2319 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2320 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
2321 break;
2322 }
2323
2324 case CPUMCPUIDFEATURE_RDTSCP:
2325 {
2326 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2327 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
2328 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
2329 break;
2330 }
2331
2332 case CPUMCPUIDFEATURE_HVP:
2333 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2334 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP;
2335 break;
2336
2337 default:
2338 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2339 break;
2340 }
2341 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2342 {
2343 PVMCPU pVCpu = &pVM->aCpus[i];
2344 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
2345 }
2346}
2347
2348
2349/**
2350 * Gets the host CPU vendor.
2351 *
2352 * @returns CPU vendor.
2353 * @param pVM Pointer to the VM.
2354 */
2355VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
2356{
2357 return pVM->cpum.s.enmHostCpuVendor;
2358}
2359
2360
2361/**
2362 * Gets the CPU vendor.
2363 *
2364 * @returns CPU vendor.
2365 * @param pVM Pointer to the VM.
2366 */
2367VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
2368{
2369 return pVM->cpum.s.enmGuestCpuVendor;
2370}
2371
2372
2373VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
2374{
2375 pVCpu->cpum.s.Guest.dr[0] = uDr0;
2376 return CPUMRecalcHyperDRx(pVCpu, 0, false);
2377}
2378
2379
2380VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
2381{
2382 pVCpu->cpum.s.Guest.dr[1] = uDr1;
2383 return CPUMRecalcHyperDRx(pVCpu, 1, false);
2384}
2385
2386
2387VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
2388{
2389 pVCpu->cpum.s.Guest.dr[2] = uDr2;
2390 return CPUMRecalcHyperDRx(pVCpu, 2, false);
2391}
2392
2393
2394VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
2395{
2396 pVCpu->cpum.s.Guest.dr[3] = uDr3;
2397 return CPUMRecalcHyperDRx(pVCpu, 3, false);
2398}
2399
2400
2401VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
2402{
2403 pVCpu->cpum.s.Guest.dr[6] = uDr6;
2404 return VINF_SUCCESS; /* No need to recalc. */
2405}
2406
2407
2408VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
2409{
2410 pVCpu->cpum.s.Guest.dr[7] = uDr7;
2411 return CPUMRecalcHyperDRx(pVCpu, 7, false);
2412}
2413
2414
2415VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
2416{
2417 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
2418 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
2419 if (iReg == 4 || iReg == 5)
2420 iReg += 2;
2421 pVCpu->cpum.s.Guest.dr[iReg] = Value;
2422 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
2423}
2424
2425
2426/**
2427 * Recalculates the hypervisor DRx register values based on current guest
2428 * registers and DBGF breakpoints, updating changed registers depending on the
2429 * context.
2430 *
2431 * This is called whenever a guest DRx register is modified (any context) and
2432 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
2433 *
2434 * In raw-mode context this function will reload any (hyper) DRx registers which
2435 * comes out with a different value. It may also have to save the host debug
2436 * registers if that haven't been done already. In this context though, we'll
2437 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
2438 * are only important when breakpoints are actually enabled.
2439 *
2440 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
2441 * reloaded by the HM code if it changes. Further more, we will only use the
2442 * combined register set when the VBox debugger is actually using hardware BPs,
2443 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
2444 * concern us here).
2445 *
2446 * In ring-3 we won't be loading anything, so well calculate hypervisor values
2447 * all the time.
2448 *
2449 * @returns VINF_SUCCESS.
2450 * @param pVCpu Pointer to the VMCPU.
2451 * @param iGstReg The guest debug register number that was modified.
2452 * UINT8_MAX if not guest register.
2453 * @param fForceHyper Used in HM to force hyper registers because of single
2454 * stepping.
2455 */
2456VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
2457{
2458 PVM pVM = pVCpu->CTX_SUFF(pVM);
2459
2460 /*
2461 * Compare the DR7s first.
2462 *
2463 * We only care about the enabled flags. GD is virtualized when we
2464 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
2465 * always have the LE and GE bits set, so no need to check and disable
2466 * stuff if they're cleared like we have to for the guest DR7.
2467 */
2468 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
2469 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
2470 uGstDr7 = 0;
2471 else if (!(uGstDr7 & X86_DR7_LE))
2472 uGstDr7 &= ~X86_DR7_LE_ALL;
2473 else if (!(uGstDr7 & X86_DR7_GE))
2474 uGstDr7 &= ~X86_DR7_GE_ALL;
2475
2476 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
2477
2478#ifdef IN_RING0
2479 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
2480 fForceHyper = true;
2481#endif
2482 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
2483 {
2484 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
2485#ifdef IN_RC
2486 bool const fHmEnabled = false;
2487#elif defined(IN_RING3)
2488 bool const fHmEnabled = HMIsEnabled(pVM);
2489#endif
2490
2491 /*
2492 * Ok, something is enabled. Recalc each of the breakpoints, taking
2493 * the VM debugger ones of the guest ones. In raw-mode context we will
2494 * not allow breakpoints with values inside the hypervisor area.
2495 */
2496 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
2497
2498 /* bp 0 */
2499 RTGCUINTREG uNewDr0;
2500 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
2501 {
2502 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2503 uNewDr0 = DBGFBpGetDR0(pVM);
2504 }
2505 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
2506 {
2507 uNewDr0 = CPUMGetGuestDR0(pVCpu);
2508#ifndef IN_RING0
2509 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
2510 uNewDr0 = 0;
2511 else
2512#endif
2513 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2514 }
2515 else
2516 uNewDr0 = 0;
2517
2518 /* bp 1 */
2519 RTGCUINTREG uNewDr1;
2520 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
2521 {
2522 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2523 uNewDr1 = DBGFBpGetDR1(pVM);
2524 }
2525 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
2526 {
2527 uNewDr1 = CPUMGetGuestDR1(pVCpu);
2528#ifndef IN_RING0
2529 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
2530 uNewDr1 = 0;
2531 else
2532#endif
2533 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2534 }
2535 else
2536 uNewDr1 = 0;
2537
2538 /* bp 2 */
2539 RTGCUINTREG uNewDr2;
2540 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
2541 {
2542 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2543 uNewDr2 = DBGFBpGetDR2(pVM);
2544 }
2545 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
2546 {
2547 uNewDr2 = CPUMGetGuestDR2(pVCpu);
2548#ifndef IN_RING0
2549 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
2550 uNewDr2 = 0;
2551 else
2552#endif
2553 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2554 }
2555 else
2556 uNewDr2 = 0;
2557
2558 /* bp 3 */
2559 RTGCUINTREG uNewDr3;
2560 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2561 {
2562 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2563 uNewDr3 = DBGFBpGetDR3(pVM);
2564 }
2565 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2566 {
2567 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2568#ifndef IN_RING0
2569 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
2570 uNewDr3 = 0;
2571 else
2572#endif
2573 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2574 }
2575 else
2576 uNewDr3 = 0;
2577
2578 /*
2579 * Apply the updates.
2580 */
2581#ifdef IN_RC
2582 /* Make sure to save host registers first. */
2583 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
2584 {
2585 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
2586 {
2587 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
2588 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
2589 }
2590 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
2591 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
2592 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
2593 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
2594 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
2595
2596 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
2597 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
2598 ASMSetDR0(uNewDr0);
2599 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
2600 ASMSetDR1(uNewDr1);
2601 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
2602 ASMSetDR2(uNewDr2);
2603 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
2604 ASMSetDR3(uNewDr3);
2605 ASMSetDR6(X86_DR6_INIT_VAL);
2606 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
2607 ASMSetDR7(uNewDr7);
2608 }
2609 else
2610#endif
2611 {
2612 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
2613 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2614 CPUMSetHyperDR3(pVCpu, uNewDr3);
2615 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2616 CPUMSetHyperDR2(pVCpu, uNewDr2);
2617 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2618 CPUMSetHyperDR1(pVCpu, uNewDr1);
2619 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2620 CPUMSetHyperDR0(pVCpu, uNewDr0);
2621 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2622 CPUMSetHyperDR7(pVCpu, uNewDr7);
2623 }
2624 }
2625#ifdef IN_RING0
2626 else if (CPUMIsGuestDebugStateActive(pVCpu))
2627 {
2628 /*
2629 * Reload the register that was modified. Normally this won't happen
2630 * as we won't intercept DRx writes when not having the hyper debug
2631 * state loaded, but in case we do for some reason we'll simply deal
2632 * with it.
2633 */
2634 switch (iGstReg)
2635 {
2636 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
2637 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
2638 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
2639 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
2640 default:
2641 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
2642 }
2643 }
2644#endif
2645 else
2646 {
2647 /*
2648 * No active debug state any more. In raw-mode this means we have to
2649 * make sure DR7 has everything disabled now, if we armed it already.
2650 * In ring-0 we might end up here when just single stepping.
2651 */
2652#if defined(IN_RC) || defined(IN_RING0)
2653 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2654 {
2655# ifdef IN_RC
2656 ASMSetDR7(X86_DR7_INIT_VAL);
2657# endif
2658 if (pVCpu->cpum.s.Hyper.dr[0])
2659 ASMSetDR0(0);
2660 if (pVCpu->cpum.s.Hyper.dr[1])
2661 ASMSetDR1(0);
2662 if (pVCpu->cpum.s.Hyper.dr[2])
2663 ASMSetDR2(0);
2664 if (pVCpu->cpum.s.Hyper.dr[3])
2665 ASMSetDR3(0);
2666 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2667 }
2668#endif
2669 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2670
2671 /* Clear all the registers. */
2672 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2673 pVCpu->cpum.s.Hyper.dr[3] = 0;
2674 pVCpu->cpum.s.Hyper.dr[2] = 0;
2675 pVCpu->cpum.s.Hyper.dr[1] = 0;
2676 pVCpu->cpum.s.Hyper.dr[0] = 0;
2677
2678 }
2679 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2680 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2681 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2682 pVCpu->cpum.s.Hyper.dr[7]));
2683
2684 return VINF_SUCCESS;
2685}
2686
2687
2688/**
2689 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2690 *
2691 * @returns true if in real mode, otherwise false.
2692 * @param pVCpu Pointer to the VMCPU.
2693 */
2694VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2695{
2696 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2697}
2698
2699
2700/**
2701 * Tests if the guest has the Page Size Extension enabled (PSE).
2702 *
2703 * @returns true if in real mode, otherwise false.
2704 * @param pVCpu Pointer to the VMCPU.
2705 */
2706VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2707{
2708 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2709 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2710}
2711
2712
2713/**
2714 * Tests if the guest has the paging enabled (PG).
2715 *
2716 * @returns true if in real mode, otherwise false.
2717 * @param pVCpu Pointer to the VMCPU.
2718 */
2719VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2720{
2721 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2722}
2723
2724
2725/**
2726 * Tests if the guest has the paging enabled (PG).
2727 *
2728 * @returns true if in real mode, otherwise false.
2729 * @param pVCpu Pointer to the VMCPU.
2730 */
2731VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2732{
2733 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2734}
2735
2736
2737/**
2738 * Tests if the guest is running in real mode or not.
2739 *
2740 * @returns true if in real mode, otherwise false.
2741 * @param pVCpu Pointer to the VMCPU.
2742 */
2743VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2744{
2745 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2746}
2747
2748
2749/**
2750 * Tests if the guest is running in real or virtual 8086 mode.
2751 *
2752 * @returns @c true if it is, @c false if not.
2753 * @param pVCpu Pointer to the VMCPU.
2754 */
2755VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2756{
2757 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2758 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2759}
2760
2761
2762/**
2763 * Tests if the guest is running in protected or not.
2764 *
2765 * @returns true if in protected mode, otherwise false.
2766 * @param pVCpu Pointer to the VMCPU.
2767 */
2768VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2769{
2770 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2771}
2772
2773
2774/**
2775 * Tests if the guest is running in paged protected or not.
2776 *
2777 * @returns true if in paged protected mode, otherwise false.
2778 * @param pVCpu Pointer to the VMCPU.
2779 */
2780VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2781{
2782 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2783}
2784
2785
2786/**
2787 * Tests if the guest is running in long mode or not.
2788 *
2789 * @returns true if in long mode, otherwise false.
2790 * @param pVCpu Pointer to the VMCPU.
2791 */
2792VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2793{
2794 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2795}
2796
2797
2798/**
2799 * Tests if the guest is running in PAE mode or not.
2800 *
2801 * @returns true if in PAE mode, otherwise false.
2802 * @param pVCpu Pointer to the VMCPU.
2803 */
2804VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2805{
2806 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2807 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2808 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LME);
2809}
2810
2811
2812/**
2813 * Tests if the guest is running in 64 bits mode or not.
2814 *
2815 * @returns true if in 64 bits protected mode, otherwise false.
2816 * @param pVCpu The current virtual CPU.
2817 */
2818VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2819{
2820 if (!CPUMIsGuestInLongMode(pVCpu))
2821 return false;
2822 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2823 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2824}
2825
2826
2827/**
2828 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2829 * registers.
2830 *
2831 * @returns true if in 64 bits protected mode, otherwise false.
2832 * @param pCtx Pointer to the current guest CPU context.
2833 */
2834VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2835{
2836 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2837}
2838
2839#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2840
2841/**
2842 *
2843 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2844 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2845 * @param pVCpu The current virtual CPU.
2846 */
2847VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2848{
2849 return pVCpu->cpum.s.fRawEntered;
2850}
2851
2852/**
2853 * Transforms the guest CPU state to raw-ring mode.
2854 *
2855 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2856 *
2857 * @returns VBox status. (recompiler failure)
2858 * @param pVCpu Pointer to the VMCPU.
2859 * @param pCtxCore The context core (for trap usage).
2860 * @see @ref pg_raw
2861 */
2862VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2863{
2864 PVM pVM = pVCpu->CTX_SUFF(pVM);
2865
2866 Assert(!pVCpu->cpum.s.fRawEntered);
2867 Assert(!pVCpu->cpum.s.fRemEntered);
2868 if (!pCtxCore)
2869 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
2870
2871 /*
2872 * Are we in Ring-0?
2873 */
2874 if ( pCtxCore->ss.Sel
2875 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
2876 && !pCtxCore->eflags.Bits.u1VM)
2877 {
2878 /*
2879 * Enter execution mode.
2880 */
2881 PATMRawEnter(pVM, pCtxCore);
2882
2883 /*
2884 * Set CPL to Ring-1.
2885 */
2886 pCtxCore->ss.Sel |= 1;
2887 if ( pCtxCore->cs.Sel
2888 && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
2889 pCtxCore->cs.Sel |= 1;
2890 }
2891 else
2892 {
2893# ifdef VBOX_WITH_RAW_RING1
2894 if ( EMIsRawRing1Enabled(pVM)
2895 && !pCtxCore->eflags.Bits.u1VM
2896 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
2897 {
2898 /* Set CPL to Ring-2. */
2899 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
2900 if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
2901 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
2902 }
2903# else
2904 AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
2905 ("ring-1 code not supported\n"));
2906# endif
2907 /*
2908 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2909 */
2910 PATMRawEnter(pVM, pCtxCore);
2911 }
2912
2913 /*
2914 * Assert sanity.
2915 */
2916 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2917 AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0,
2918 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
2919 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2920
2921 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
2922
2923 pVCpu->cpum.s.fRawEntered = true;
2924 return VINF_SUCCESS;
2925}
2926
2927
2928/**
2929 * Transforms the guest CPU state from raw-ring mode to correct values.
2930 *
2931 * This function will change any selector registers with DPL=1 to DPL=0.
2932 *
2933 * @returns Adjusted rc.
2934 * @param pVCpu Pointer to the VMCPU.
2935 * @param rc Raw mode return code
2936 * @param pCtxCore The context core (for trap usage).
2937 * @see @ref pg_raw
2938 */
2939VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
2940{
2941 PVM pVM = pVCpu->CTX_SUFF(pVM);
2942
2943 /*
2944 * Don't leave if we've already left (in RC).
2945 */
2946 Assert(!pVCpu->cpum.s.fRemEntered);
2947 if (!pVCpu->cpum.s.fRawEntered)
2948 return rc;
2949 pVCpu->cpum.s.fRawEntered = false;
2950
2951 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2952 if (!pCtxCore)
2953 pCtxCore = CPUMCTX2CORE(pCtx);
2954 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss.Sel & X86_SEL_RPL));
2955 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL),
2956 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
2957
2958 /*
2959 * Are we executing in raw ring-1?
2960 */
2961 if ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
2962 && !pCtxCore->eflags.Bits.u1VM)
2963 {
2964 /*
2965 * Leave execution mode.
2966 */
2967 PATMRawLeave(pVM, pCtxCore, rc);
2968 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2969 /** @todo See what happens if we remove this. */
2970 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
2971 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
2972 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
2973 pCtxCore->es.Sel &= ~X86_SEL_RPL;
2974 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
2975 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
2976 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
2977 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
2978
2979 /*
2980 * Ring-1 selector => Ring-0.
2981 */
2982 pCtxCore->ss.Sel &= ~X86_SEL_RPL;
2983 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
2984 pCtxCore->cs.Sel &= ~X86_SEL_RPL;
2985 }
2986 else
2987 {
2988 /*
2989 * PATM is taking care of the IOPL and IF flags for us.
2990 */
2991 PATMRawLeave(pVM, pCtxCore, rc);
2992 if (!pCtxCore->eflags.Bits.u1VM)
2993 {
2994# ifdef VBOX_WITH_RAW_RING1
2995 if ( EMIsRawRing1Enabled(pVM)
2996 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)
2997 {
2998 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2999 /** @todo See what happens if we remove this. */
3000 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 2)
3001 pCtxCore->ds.Sel = (pCtxCore->ds.Sel & ~X86_SEL_RPL) | 1;
3002 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 2)
3003 pCtxCore->es.Sel = (pCtxCore->es.Sel & ~X86_SEL_RPL) | 1;
3004 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 2)
3005 pCtxCore->fs.Sel = (pCtxCore->fs.Sel & ~X86_SEL_RPL) | 1;
3006 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 2)
3007 pCtxCore->gs.Sel = (pCtxCore->gs.Sel & ~X86_SEL_RPL) | 1;
3008
3009 /*
3010 * Ring-2 selector => Ring-1.
3011 */
3012 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 1;
3013 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 2)
3014 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 1;
3015 }
3016 else
3017 {
3018# endif
3019 /** @todo See what happens if we remove this. */
3020 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
3021 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
3022 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
3023 pCtxCore->es.Sel &= ~X86_SEL_RPL;
3024 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
3025 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
3026 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
3027 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
3028# ifdef VBOX_WITH_RAW_RING1
3029 }
3030# endif
3031 }
3032 }
3033
3034 return rc;
3035}
3036
3037#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3038
3039/**
3040 * Updates the EFLAGS while we're in raw-mode.
3041 *
3042 * @param pVCpu Pointer to the VMCPU.
3043 * @param fEfl The new EFLAGS value.
3044 */
3045VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
3046{
3047#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3048 if (pVCpu->cpum.s.fRawEntered)
3049 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest), fEfl);
3050 else
3051#endif
3052 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
3053}
3054
3055
3056/**
3057 * Gets the EFLAGS while we're in raw-mode.
3058 *
3059 * @returns The eflags.
3060 * @param pVCpu Pointer to the current virtual CPU.
3061 */
3062VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
3063{
3064#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3065 if (pVCpu->cpum.s.fRawEntered)
3066 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest));
3067#endif
3068 return pVCpu->cpum.s.Guest.eflags.u32;
3069}
3070
3071
3072/**
3073 * Sets the specified changed flags (CPUM_CHANGED_*).
3074 *
3075 * @param pVCpu Pointer to the current virtual CPU.
3076 */
3077VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
3078{
3079 pVCpu->cpum.s.fChanged |= fChangedFlags;
3080}
3081
3082
3083/**
3084 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
3085 * @returns true if supported.
3086 * @returns false if not supported.
3087 * @param pVM Pointer to the VM.
3088 */
3089VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
3090{
3091 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
3092}
3093
3094
3095/**
3096 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
3097 * @returns true if used.
3098 * @returns false if not used.
3099 * @param pVM Pointer to the VM.
3100 */
3101VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
3102{
3103 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
3104}
3105
3106
3107/**
3108 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
3109 * @returns true if used.
3110 * @returns false if not used.
3111 * @param pVM Pointer to the VM.
3112 */
3113VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
3114{
3115 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
3116}
3117
3118#ifdef IN_RC
3119
3120/**
3121 * Lazily sync in the FPU/XMM state.
3122 *
3123 * @returns VBox status code.
3124 * @param pVCpu Pointer to the VMCPU.
3125 */
3126VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
3127{
3128 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
3129}
3130
3131#endif /* !IN_RC */
3132
3133/**
3134 * Checks if we activated the FPU/XMM state of the guest OS.
3135 * @returns true if we did.
3136 * @returns false if not.
3137 * @param pVCpu Pointer to the VMCPU.
3138 */
3139VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
3140{
3141 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
3142}
3143
3144
3145/**
3146 * Deactivate the FPU/XMM state of the guest OS.
3147 * @param pVCpu Pointer to the VMCPU.
3148 *
3149 * @todo r=bird: Why is this needed? Looks like a workaround for mishandled
3150 * FPU state management.
3151 */
3152VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
3153{
3154 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU));
3155 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
3156}
3157
3158
3159/**
3160 * Checks if the guest debug state is active.
3161 *
3162 * @returns boolean
3163 * @param pVM Pointer to the VMCPU.
3164 */
3165VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
3166{
3167 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
3168}
3169
3170
3171/**
3172 * Checks if the guest debug state is to be made active during the world-switch
3173 * (currently only used for the 32->64 switcher case).
3174 *
3175 * @returns boolean
3176 * @param pVM Pointer to the VMCPU.
3177 */
3178VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
3179{
3180 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
3181}
3182
3183
3184/**
3185 * Checks if the hyper debug state is active.
3186 *
3187 * @returns boolean
3188 * @param pVM Pointer to the VM.
3189 */
3190VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
3191{
3192 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
3193}
3194
3195
3196/**
3197 * Checks if the hyper debug state is to be made active during the world-switch
3198 * (currently only used for the 32->64 switcher case).
3199 *
3200 * @returns boolean
3201 * @param pVM Pointer to the VMCPU.
3202 */
3203VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
3204{
3205 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
3206}
3207
3208
3209/**
3210 * Mark the guest's debug state as inactive.
3211 *
3212 * @returns boolean
3213 * @param pVM Pointer to the VM.
3214 * @todo This API doesn't make sense any more.
3215 */
3216VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
3217{
3218 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
3219}
3220
3221
3222/**
3223 * Get the current privilege level of the guest.
3224 *
3225 * @returns CPL
3226 * @param pVCpu Pointer to the current virtual CPU.
3227 */
3228VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
3229{
3230 /*
3231 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
3232 *
3233 * Note! We used to check CS.DPL here, assuming it was always equal to
3234 * CPL even if a conforming segment was loaded. But this truned out to
3235 * only apply to older AMD-V. With VT-x we had an ACP2 regression
3236 * during install after a far call to ring 2 with VT-x. Then on newer
3237 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
3238 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
3239 *
3240 * So, forget CS.DPL, always use SS.DPL.
3241 *
3242 * Note! The SS RPL is always equal to the CPL, while the CS RPL
3243 * isn't necessarily equal if the segment is conforming.
3244 * See section 4.11.1 in the AMD manual.
3245 *
3246 * Update: Where the heck does it say CS.RPL can differ from CPL other than
3247 * right after real->prot mode switch and when in V8086 mode? That
3248 * section says the RPL specified in a direct transfere (call, jmp,
3249 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
3250 * it would be impossible for an exception handle or the iret
3251 * instruction to figure out whether SS:ESP are part of the frame
3252 * or not. VBox or qemu bug must've lead to this misconception.
3253 *
3254 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
3255 * selector into SS with an RPL other than the CPL when CPL != 3 and
3256 * we're in 64-bit mode. The intel dev box doesn't allow this, on
3257 * RPL = CPL. Weird.
3258 */
3259 uint32_t uCpl;
3260 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
3261 {
3262 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3263 {
3264 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
3265 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
3266 else
3267 {
3268 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
3269#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3270# ifdef VBOX_WITH_RAW_RING1
3271 if (pVCpu->cpum.s.fRawEntered)
3272 {
3273 if ( uCpl == 2
3274 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
3275 uCpl = 1;
3276 else if (uCpl == 1)
3277 uCpl = 0;
3278 }
3279 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
3280# else
3281 if (uCpl == 1)
3282 uCpl = 0;
3283# endif
3284#endif
3285 }
3286 }
3287 else
3288 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
3289 }
3290 else
3291 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
3292 return uCpl;
3293}
3294
3295
3296/**
3297 * Gets the current guest CPU mode.
3298 *
3299 * If paging mode is what you need, check out PGMGetGuestMode().
3300 *
3301 * @returns The CPU mode.
3302 * @param pVCpu Pointer to the VMCPU.
3303 */
3304VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
3305{
3306 CPUMMODE enmMode;
3307 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3308 enmMode = CPUMMODE_REAL;
3309 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3310 enmMode = CPUMMODE_PROTECTED;
3311 else
3312 enmMode = CPUMMODE_LONG;
3313
3314 return enmMode;
3315}
3316
3317
3318/**
3319 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
3320 *
3321 * @returns 16, 32 or 64.
3322 * @param pVCpu The current virtual CPU.
3323 */
3324VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
3325{
3326 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3327 return 16;
3328
3329 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3330 {
3331 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
3332 return 16;
3333 }
3334
3335 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
3336 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
3337 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3338 return 64;
3339
3340 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
3341 return 32;
3342
3343 return 16;
3344}
3345
3346
3347VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
3348{
3349 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3350 return DISCPUMODE_16BIT;
3351
3352 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3353 {
3354 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
3355 return DISCPUMODE_16BIT;
3356 }
3357
3358 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
3359 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
3360 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3361 return DISCPUMODE_64BIT;
3362
3363 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
3364 return DISCPUMODE_32BIT;
3365
3366 return DISCPUMODE_16BIT;
3367}
3368
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette