VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 49482

Last change on this file since 49482 was 49479, checked in by vboxsync, 11 years ago

VMM: Warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 102.5 KB
Line 
1/* $Id: CPUMAllRegs.cpp 49479 2013-11-14 15:13:05Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52
53/*******************************************************************************
54* Defined Constants And Macros *
55*******************************************************************************/
56/**
57 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
58 *
59 * @returns Pointer to the Virtual CPU.
60 * @param a_pGuestCtx Pointer to the guest context.
61 */
62#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
63
64/**
65 * Lazily loads the hidden parts of a selector register when using raw-mode.
66 */
67#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
68# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
69 do \
70 { \
71 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
72 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
73 } while (0)
74#else
75# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
76 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
77#endif
78
79
80
81#ifdef VBOX_WITH_RAW_MODE_NOT_R0
82
83/**
84 * Does the lazy hidden selector register loading.
85 *
86 * @param pVCpu The current Virtual CPU.
87 * @param pSReg The selector register to lazily load hidden parts of.
88 */
89static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
90{
91 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
92 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
93 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
94
95 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
96 {
97 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
98 pSReg->Attr.u = 0;
99 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
100 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
101 pSReg->Attr.n.u2Dpl = 3;
102 pSReg->Attr.n.u1Present = 1;
103 pSReg->u32Limit = 0x0000ffff;
104 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
105 pSReg->ValidSel = pSReg->Sel;
106 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
107 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
108 }
109 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
110 {
111 /* Real mode - leave the limit and flags alone here, at least for now. */
112 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
113 pSReg->ValidSel = pSReg->Sel;
114 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
115 }
116 else
117 {
118 /* Protected mode - get it from the selector descriptor tables. */
119 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
120 {
121 Assert(!CPUMIsGuestInLongMode(pVCpu));
122 pSReg->Sel = 0;
123 pSReg->u64Base = 0;
124 pSReg->u32Limit = 0;
125 pSReg->Attr.u = 0;
126 pSReg->ValidSel = 0;
127 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
128 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
129 }
130 else
131 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
132 }
133}
134
135
136/**
137 * Makes sure the hidden CS and SS selector registers are valid, loading them if
138 * necessary.
139 *
140 * @param pVCpu The current virtual CPU.
141 */
142VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
143{
144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
145 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
146}
147
148
149/**
150 * Loads a the hidden parts of a selector register.
151 *
152 * @param pVCpu The current virtual CPU.
153 */
154VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
155{
156 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
157}
158
159#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
160
161
162/**
163 * Obsolete.
164 *
165 * We don't support nested hypervisor context interrupts or traps. Life is much
166 * simpler when we don't. It's also slightly faster at times.
167 *
168 * @param pVM Handle to the virtual machine.
169 */
170VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
171{
172 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
173}
174
175
176/**
177 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
178 *
179 * @param pVCpu Pointer to the VMCPU.
180 */
181VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
182{
183 return &pVCpu->cpum.s.Hyper;
184}
185
186
187VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
188{
189 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
190 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
191}
192
193
194VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
195{
196 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
197 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
198}
199
200
201VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
202{
203 pVCpu->cpum.s.Hyper.cr3 = cr3;
204
205#ifdef IN_RC
206 /* Update the current CR3. */
207 ASMSetCR3(cr3);
208#endif
209}
210
211VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
212{
213 return pVCpu->cpum.s.Hyper.cr3;
214}
215
216
217VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
218{
219 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
220}
221
222
223VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
224{
225 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
226}
227
228
229VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
230{
231 pVCpu->cpum.s.Hyper.es.Sel = SelES;
232}
233
234
235VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
236{
237 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
238}
239
240
241VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
242{
243 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
244}
245
246
247VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
248{
249 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
250}
251
252
253VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
254{
255 pVCpu->cpum.s.Hyper.esp = u32ESP;
256}
257
258
259VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
260{
261 pVCpu->cpum.s.Hyper.esp = u32ESP;
262}
263
264
265VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
266{
267 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
268 return VINF_SUCCESS;
269}
270
271
272VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
273{
274 pVCpu->cpum.s.Hyper.eip = u32EIP;
275}
276
277
278/**
279 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
280 * EFLAGS and EIP prior to resuming guest execution.
281 *
282 * All general register not given as a parameter will be set to 0. The EFLAGS
283 * register will be set to sane values for C/C++ code execution with interrupts
284 * disabled and IOPL 0.
285 *
286 * @param pVCpu The current virtual CPU.
287 * @param u32EIP The EIP value.
288 * @param u32ESP The ESP value.
289 * @param u32EAX The EAX value.
290 * @param u32EDX The EDX value.
291 */
292VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
293{
294 pVCpu->cpum.s.Hyper.eip = u32EIP;
295 pVCpu->cpum.s.Hyper.esp = u32ESP;
296 pVCpu->cpum.s.Hyper.eax = u32EAX;
297 pVCpu->cpum.s.Hyper.edx = u32EDX;
298 pVCpu->cpum.s.Hyper.ecx = 0;
299 pVCpu->cpum.s.Hyper.ebx = 0;
300 pVCpu->cpum.s.Hyper.ebp = 0;
301 pVCpu->cpum.s.Hyper.esi = 0;
302 pVCpu->cpum.s.Hyper.edi = 0;
303 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
304}
305
306
307VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
308{
309 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
310}
311
312
313VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
314{
315 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
316}
317
318
319/** @MAYBE_LOAD_DRx
320 * Macro for updating DRx values in raw-mode and ring-0 contexts.
321 */
322#ifdef IN_RING0
323# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
324# ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
325# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
326 do { \
327 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
328 a_fnLoad(a_uValue); \
329 else \
330 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
331 } while (0)
332# else
333# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
334 do { \
335 /** @todo we're not loading the correct guest value here! */ \
336 a_fnLoad(a_uValue); \
337 } while (0)
338# endif
339# else
340# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
341 do { \
342 a_fnLoad(a_uValue); \
343 } while (0)
344# endif
345
346#elif defined(IN_RC)
347# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
348 do { \
349 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
350 { a_fnLoad(a_uValue); } \
351 } while (0)
352
353#else
354# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
355#endif
356
357VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
358{
359 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
360 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
361}
362
363
364VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
365{
366 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
367 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
368}
369
370
371VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
372{
373 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
374 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
375}
376
377
378VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
379{
380 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
381 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
382}
383
384
385VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
386{
387 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
388}
389
390
391VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
392{
393 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
394#ifdef IN_RC
395 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
396#endif
397}
398
399
400VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
401{
402 return pVCpu->cpum.s.Hyper.cs.Sel;
403}
404
405
406VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
407{
408 return pVCpu->cpum.s.Hyper.ds.Sel;
409}
410
411
412VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
413{
414 return pVCpu->cpum.s.Hyper.es.Sel;
415}
416
417
418VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
419{
420 return pVCpu->cpum.s.Hyper.fs.Sel;
421}
422
423
424VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
425{
426 return pVCpu->cpum.s.Hyper.gs.Sel;
427}
428
429
430VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
431{
432 return pVCpu->cpum.s.Hyper.ss.Sel;
433}
434
435
436VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
437{
438 return pVCpu->cpum.s.Hyper.eax;
439}
440
441
442VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
443{
444 return pVCpu->cpum.s.Hyper.ebx;
445}
446
447
448VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
449{
450 return pVCpu->cpum.s.Hyper.ecx;
451}
452
453
454VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
455{
456 return pVCpu->cpum.s.Hyper.edx;
457}
458
459
460VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
461{
462 return pVCpu->cpum.s.Hyper.esi;
463}
464
465
466VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
467{
468 return pVCpu->cpum.s.Hyper.edi;
469}
470
471
472VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
473{
474 return pVCpu->cpum.s.Hyper.ebp;
475}
476
477
478VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
479{
480 return pVCpu->cpum.s.Hyper.esp;
481}
482
483
484VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
485{
486 return pVCpu->cpum.s.Hyper.eflags.u32;
487}
488
489
490VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
491{
492 return pVCpu->cpum.s.Hyper.eip;
493}
494
495
496VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
497{
498 return pVCpu->cpum.s.Hyper.rip;
499}
500
501
502VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
503{
504 if (pcbLimit)
505 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
506 return pVCpu->cpum.s.Hyper.idtr.pIdt;
507}
508
509
510VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
511{
512 if (pcbLimit)
513 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
514 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
515}
516
517
518VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
519{
520 return pVCpu->cpum.s.Hyper.ldtr.Sel;
521}
522
523
524VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
525{
526 return pVCpu->cpum.s.Hyper.dr[0];
527}
528
529
530VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
531{
532 return pVCpu->cpum.s.Hyper.dr[1];
533}
534
535
536VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
537{
538 return pVCpu->cpum.s.Hyper.dr[2];
539}
540
541
542VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
543{
544 return pVCpu->cpum.s.Hyper.dr[3];
545}
546
547
548VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
549{
550 return pVCpu->cpum.s.Hyper.dr[6];
551}
552
553
554VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
555{
556 return pVCpu->cpum.s.Hyper.dr[7];
557}
558
559
560/**
561 * Gets the pointer to the internal CPUMCTXCORE structure.
562 * This is only for reading in order to save a few calls.
563 *
564 * @param pVCpu Handle to the virtual cpu.
565 */
566VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
567{
568 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
569}
570
571
572/**
573 * Queries the pointer to the internal CPUMCTX structure.
574 *
575 * @returns The CPUMCTX pointer.
576 * @param pVCpu Handle to the virtual cpu.
577 */
578VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
579{
580 return &pVCpu->cpum.s.Guest;
581}
582
583VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
584{
585#ifdef VBOX_WITH_IEM
586# ifdef VBOX_WITH_RAW_MODE_NOT_R0
587 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
588 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
589# endif
590#endif
591 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
592 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
593 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
594 return VINF_SUCCESS; /* formality, consider it void. */
595}
596
597VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
598{
599#ifdef VBOX_WITH_IEM
600# ifdef VBOX_WITH_RAW_MODE_NOT_R0
601 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
602 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
603# endif
604#endif
605 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
606 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
607 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
608 return VINF_SUCCESS; /* formality, consider it void. */
609}
610
611VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
612{
613#ifdef VBOX_WITH_IEM
614# ifdef VBOX_WITH_RAW_MODE_NOT_R0
615 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
616 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
617# endif
618#endif
619 pVCpu->cpum.s.Guest.tr.Sel = tr;
620 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
621 return VINF_SUCCESS; /* formality, consider it void. */
622}
623
624VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
625{
626#ifdef VBOX_WITH_IEM
627# ifdef VBOX_WITH_RAW_MODE_NOT_R0
628 if ( ( ldtr != 0
629 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
630 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
631 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
632# endif
633#endif
634 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
635 /* The caller will set more hidden bits if it has them. */
636 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
637 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
638 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
639 return VINF_SUCCESS; /* formality, consider it void. */
640}
641
642
643/**
644 * Set the guest CR0.
645 *
646 * When called in GC, the hyper CR0 may be updated if that is
647 * required. The caller only has to take special action if AM,
648 * WP, PG or PE changes.
649 *
650 * @returns VINF_SUCCESS (consider it void).
651 * @param pVCpu Handle to the virtual cpu.
652 * @param cr0 The new CR0 value.
653 */
654VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
655{
656#ifdef IN_RC
657 /*
658 * Check if we need to change hypervisor CR0 because
659 * of math stuff.
660 */
661 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
662 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
663 {
664 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
665 {
666 /*
667 * We haven't saved the host FPU state yet, so TS and MT are both set
668 * and EM should be reflecting the guest EM (it always does this).
669 */
670 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
671 {
672 uint32_t HyperCR0 = ASMGetCR0();
673 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
674 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
675 HyperCR0 &= ~X86_CR0_EM;
676 HyperCR0 |= cr0 & X86_CR0_EM;
677 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
678 ASMSetCR0(HyperCR0);
679 }
680# ifdef VBOX_STRICT
681 else
682 {
683 uint32_t HyperCR0 = ASMGetCR0();
684 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
685 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
686 }
687# endif
688 }
689 else
690 {
691 /*
692 * Already saved the state, so we're just mirroring
693 * the guest flags.
694 */
695 uint32_t HyperCR0 = ASMGetCR0();
696 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
697 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
698 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
699 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
700 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
701 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
702 ASMSetCR0(HyperCR0);
703 }
704 }
705#endif /* IN_RC */
706
707 /*
708 * Check for changes causing TLB flushes (for REM).
709 * The caller is responsible for calling PGM when appropriate.
710 */
711 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
712 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
713 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
714 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
715
716 /*
717 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
718 */
719 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
720 PGMCr0WpEnabled(pVCpu);
721
722 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
723 return VINF_SUCCESS;
724}
725
726
727VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
728{
729 pVCpu->cpum.s.Guest.cr2 = cr2;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
735{
736 pVCpu->cpum.s.Guest.cr3 = cr3;
737 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
738 return VINF_SUCCESS;
739}
740
741
742VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
743{
744 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
745 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
746 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
747 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
748 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
749 cr4 &= ~X86_CR4_OSFSXR;
750 pVCpu->cpum.s.Guest.cr4 = cr4;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
756{
757 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
763{
764 pVCpu->cpum.s.Guest.eip = eip;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
770{
771 pVCpu->cpum.s.Guest.eax = eax;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
777{
778 pVCpu->cpum.s.Guest.ebx = ebx;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
784{
785 pVCpu->cpum.s.Guest.ecx = ecx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
791{
792 pVCpu->cpum.s.Guest.edx = edx;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
798{
799 pVCpu->cpum.s.Guest.esp = esp;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
805{
806 pVCpu->cpum.s.Guest.ebp = ebp;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
812{
813 pVCpu->cpum.s.Guest.esi = esi;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
819{
820 pVCpu->cpum.s.Guest.edi = edi;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
826{
827 pVCpu->cpum.s.Guest.ss.Sel = ss;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
833{
834 pVCpu->cpum.s.Guest.cs.Sel = cs;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
840{
841 pVCpu->cpum.s.Guest.ds.Sel = ds;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
847{
848 pVCpu->cpum.s.Guest.es.Sel = es;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
854{
855 pVCpu->cpum.s.Guest.fs.Sel = fs;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
861{
862 pVCpu->cpum.s.Guest.gs.Sel = gs;
863 return VINF_SUCCESS;
864}
865
866
867VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
868{
869 pVCpu->cpum.s.Guest.msrEFER = val;
870}
871
872
873/**
874 * Worker for CPUMQueryGuestMsr().
875 *
876 * @retval VINF_SUCCESS
877 * @retval VERR_CPUM_RAISE_GP_0
878 * @param pVCpu The cross context CPU structure.
879 * @param idMsr The MSR to read.
880 * @param puValue Where to store the return value.
881 */
882static int cpumQueryGuestMsrInt(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
883{
884 /*
885 * If we don't indicate MSR support in the CPUID feature bits, indicate
886 * that a #GP(0) should be raised.
887 */
888 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
889 {
890 *puValue = 0;
891 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
892 }
893
894 int rc = VINF_SUCCESS;
895 uint8_t const u8Multiplier = 4;
896 switch (idMsr)
897 {
898 case MSR_IA32_TSC:
899 *puValue = TMCpuTickGet(pVCpu);
900 break;
901
902 case MSR_IA32_APICBASE:
903 {
904 PVM pVM = pVCpu->CTX_SUFF(pVM);
905 if ( ( pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1 /* APIC Std feature */
906 && (pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_APIC))
907 || ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 /* APIC Ext feature (AMD) */
908 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD
909 && (pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_APIC))
910 || ( pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1 /* x2APIC */
911 && (pVM->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_X2APIC)))
912 {
913 *puValue = pVCpu->cpum.s.Guest.msrApicBase;
914 }
915 else
916 {
917 *puValue = 0;
918 rc = VERR_CPUM_RAISE_GP_0;
919 }
920 break;
921 }
922
923 case MSR_IA32_CR_PAT:
924 *puValue = pVCpu->cpum.s.Guest.msrPAT;
925 break;
926
927 case MSR_IA32_SYSENTER_CS:
928 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
929 break;
930
931 case MSR_IA32_SYSENTER_EIP:
932 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
933 break;
934
935 case MSR_IA32_SYSENTER_ESP:
936 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
937 break;
938
939 case MSR_IA32_MTRR_CAP:
940 {
941 /* This is currently a bit weird. :-) */
942 uint8_t const cVariableRangeRegs = 0;
943 bool const fSystemManagementRangeRegisters = false;
944 bool const fFixedRangeRegisters = false;
945 bool const fWriteCombiningType = false;
946 *puValue = cVariableRangeRegs
947 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0)
948 | (fWriteCombiningType ? RT_BIT_64(10) : 0)
949 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
950 break;
951 }
952
953 case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0:
954 case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1:
955 case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2:
956 case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3:
957 case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4:
958 case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5:
959 case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6:
960 case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7:
961 /** @todo implement variable MTRRs. */
962 *puValue = 0;
963 break;
964#if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */
965 case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8:
966 case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9:
967 *puValue = 0;
968 break;
969#endif
970
971 case MSR_IA32_MTRR_DEF_TYPE:
972 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;
973 break;
974
975 case IA32_MTRR_FIX64K_00000:
976 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000;
977 break;
978 case IA32_MTRR_FIX16K_80000:
979 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000;
980 break;
981 case IA32_MTRR_FIX16K_A0000:
982 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000;
983 break;
984 case IA32_MTRR_FIX4K_C0000:
985 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000;
986 break;
987 case IA32_MTRR_FIX4K_C8000:
988 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000;
989 break;
990 case IA32_MTRR_FIX4K_D0000:
991 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000;
992 break;
993 case IA32_MTRR_FIX4K_D8000:
994 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000;
995 break;
996 case IA32_MTRR_FIX4K_E0000:
997 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000;
998 break;
999 case IA32_MTRR_FIX4K_E8000:
1000 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000;
1001 break;
1002 case IA32_MTRR_FIX4K_F0000:
1003 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000;
1004 break;
1005 case IA32_MTRR_FIX4K_F8000:
1006 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000;
1007 break;
1008
1009 case MSR_K6_EFER:
1010 *puValue = pVCpu->cpum.s.Guest.msrEFER;
1011 break;
1012
1013 case MSR_K8_SF_MASK:
1014 *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
1015 break;
1016
1017 case MSR_K6_STAR:
1018 *puValue = pVCpu->cpum.s.Guest.msrSTAR;
1019 break;
1020
1021 case MSR_K8_LSTAR:
1022 *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
1023 break;
1024
1025 case MSR_K8_CSTAR:
1026 *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
1027 break;
1028
1029 case MSR_K8_FS_BASE:
1030 *puValue = pVCpu->cpum.s.Guest.fs.u64Base;
1031 break;
1032
1033 case MSR_K8_GS_BASE:
1034 *puValue = pVCpu->cpum.s.Guest.gs.u64Base;
1035 break;
1036
1037 case MSR_K8_KERNEL_GS_BASE:
1038 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
1039 break;
1040
1041 case MSR_K8_TSC_AUX:
1042 *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;
1043 break;
1044
1045 case MSR_IA32_PERF_STATUS:
1046 /** @todo could really be not exactly correct, maybe use host's values
1047 * Apple code indicates that we should use CPU Hz / 1.333MHz here. */
1048 /** @todo Where are the specs implemented here found? */
1049 *puValue = UINT64_C(1000) /* TSC increment by tick */
1050 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
1051 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
1052 break;
1053
1054 case MSR_IA32_FSB_CLOCK_STS:
1055 /*
1056 * Encoded as:
1057 * 0 - 266
1058 * 1 - 133
1059 * 2 - 200
1060 * 3 - return 166
1061 * 5 - return 100
1062 */
1063 *puValue = (2 << 4);
1064 break;
1065
1066 case MSR_IA32_PLATFORM_INFO:
1067 *puValue = ((uint32_t)u8Multiplier << 8) /* Flex ratio max */
1068 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
1069 break;
1070
1071 case MSR_IA32_THERM_STATUS:
1072 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
1073 *puValue = RT_BIT(31) /* validity bit */
1074 | (UINT64_C(20) << 16) /* degrees till TCC */;
1075 break;
1076
1077 case MSR_IA32_MISC_ENABLE:
1078#if 0
1079 /* Needs to be tested more before enabling. */
1080 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
1081#else
1082 /* Currenty we don't allow guests to modify enable MSRs. */
1083 *puValue = MSR_IA32_MISC_ENABLE_FAST_STRINGS /* by default */;
1084
1085 if ((pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR) != 0)
1086
1087 *puValue |= MSR_IA32_MISC_ENABLE_MONITOR /* if mwait/monitor available */;
1088 /** @todo: add more cpuid-controlled features this way. */
1089#endif
1090 break;
1091
1092 /** @todo virtualize DEBUGCTL and relatives */
1093 case MSR_IA32_DEBUGCTL:
1094 *puValue = 0;
1095 break;
1096
1097#if 0 /*def IN_RING0 */
1098 case MSR_IA32_PLATFORM_ID:
1099 case MSR_IA32_BIOS_SIGN_ID:
1100 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
1101 {
1102 /* Available since the P6 family. VT-x implies that this feature is present. */
1103 if (idMsr == MSR_IA32_PLATFORM_ID)
1104 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
1105 else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
1106 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
1107 break;
1108 }
1109 /* no break */
1110#endif
1111 /*
1112 * The BIOS_SIGN_ID MSR and MSR_IA32_MCP_CAP et al exist on AMD64 as
1113 * well, at least bulldozer have them. Windows 7 is querying them.
1114 * XP has been observed querying MSR_IA32_MC0_CTL.
1115 * XP64 has been observed querying MSR_P4_LASTBRANCH_0 (also on AMD).
1116 */
1117 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1118 case MSR_IA32_MCG_CAP: /* fam/mod >= 6_01 */
1119 case MSR_IA32_MCG_STATUS: /* indicated as not present in CAP */
1120 /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */
1121 case MSR_IA32_MC0_CTL:
1122 case MSR_IA32_MC0_STATUS:
1123 case MSR_P4_LASTBRANCH_0:
1124 case MSR_P4_LASTBRANCH_1:
1125 case MSR_P4_LASTBRANCH_2:
1126 case MSR_P4_LASTBRANCH_3:
1127 *puValue = 0;
1128 break;
1129
1130
1131 /*
1132 * Intel specifics MSRs:
1133 */
1134 case MSR_P5_MC_ADDR:
1135 case MSR_P5_MC_TYPE:
1136 case MSR_P4_LASTBRANCH_TOS: /** @todo Are these branch regs still here on more recent CPUs? The documentation doesn't mention them for several archs. */
1137 case MSR_IA32_PERFEVTSEL0: /* NetWare 6.5 wants the these four. (Bet on AMD as well.) */
1138 case MSR_IA32_PERFEVTSEL1:
1139 case MSR_IA32_PMC0:
1140 case MSR_IA32_PMC1:
1141 case MSR_IA32_PLATFORM_ID: /* fam/mod >= 6_01 */
1142 case MSR_IA32_MPERF: /* intel_pstate depends on this but does a validation test */
1143 case MSR_IA32_APERF: /* intel_pstate depends on this but does a validation test */
1144 /*case MSR_IA32_BIOS_UPDT_TRIG: - write-only? */
1145 case MSR_RAPL_POWER_UNIT:
1146 case MSR_BBL_CR_CTL3: /* ca. core arch? */
1147 case MSR_PKG_CST_CONFIG_CONTROL: /* Nahalem, Sandy Bridge */
1148 case MSR_CORE_THREAD_COUNT: /* Apple queries this. */
1149 case MSR_FLEX_RATIO: /* Apple queries this. */
1150 *puValue = 0;
1151 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1152 {
1153 Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1154 rc = VERR_CPUM_RAISE_GP_0;
1155 break;
1156 }
1157
1158 /* Provide more plausive values for some of them. */
1159 switch (idMsr)
1160 {
1161 case MSR_RAPL_POWER_UNIT:
1162 *puValue = RT_MAKE_U32_FROM_U8(3 /* power units (1/8 W)*/,
1163 16 /* 15.3 micro-Joules */,
1164 10 /* 976 microseconds increments */,
1165 0);
1166 break;
1167 case MSR_BBL_CR_CTL3:
1168 *puValue = RT_MAKE_U32_FROM_U8(1, /* bit 0 - L2 Hardware Enabled. (RO) */
1169 1, /* bit 8 - L2 Enabled (R/W). */
1170 0, /* bit 23 - L2 Not Present (RO). */
1171 0);
1172 break;
1173 case MSR_PKG_CST_CONFIG_CONTROL:
1174 *puValue = pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl;
1175 break;
1176 case MSR_CORE_THREAD_COUNT:
1177 {
1178 /** @todo restrict this to nehalem. */
1179 PVM pVM = pVCpu->CTX_SUFF(pVM); /* Note! Not sweating the 4-bit core count limit on westmere. */
1180 *puValue = (pVM->cCpus & 0xffff) | ((pVM->cCpus & 0xffff) << 16);
1181 break;
1182 }
1183
1184 case MSR_FLEX_RATIO:
1185 {
1186 /** @todo Check for P4, it's different there. Try find accurate specs. */
1187 *puValue = (uint32_t)u8Multiplier << 8;
1188 break;
1189 }
1190 }
1191 break;
1192
1193#if 0 /* Only on pentium CPUs! */
1194 /* Event counters, not supported. */
1195 case MSR_IA32_CESR:
1196 case MSR_IA32_CTR0:
1197 case MSR_IA32_CTR1:
1198 *puValue = 0;
1199 break;
1200#endif
1201
1202
1203 /*
1204 * AMD specific MSRs:
1205 */
1206 case MSR_K8_SYSCFG:
1207 case MSR_K8_INT_PENDING:
1208 case MSR_K8_NB_CFG: /* (All known values are 0 on reset.) */
1209 case MSR_K8_HWCR: /* Very interesting bits here. :) */
1210 case MSR_K8_VM_CR: /* Windows 8 */
1211 case 0xc0011029: /* quick fix for FreeBSd 9.1. */
1212 case 0xc0010042: /* quick fix for something. */
1213 case 0xc001102a: /* quick fix for w2k8 + opposition. */
1214 case 0xc0011004: /* quick fix for the opposition. */
1215 case 0xc0011005: /* quick fix for the opposition. */
1216 case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */
1217 case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */
1218 case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */
1219 case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */
1220 case MSR_K7_PERFCTR0: /* quick fix for the opposition. */
1221 case MSR_K7_PERFCTR1: /* quick fix for the opposition. */
1222 case MSR_K7_PERFCTR2: /* quick fix for the opposition. */
1223 case MSR_K7_PERFCTR3: /* quick fix for the opposition. */
1224 *puValue = 0;
1225 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD)
1226 {
1227 Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1228 return VERR_CPUM_RAISE_GP_0;
1229 }
1230 /* ignored */
1231 break;
1232
1233 default:
1234 /*
1235 * Hand the X2APIC range to PDM and the APIC.
1236 */
1237 if ( idMsr >= MSR_IA32_X2APIC_START
1238 && idMsr <= MSR_IA32_X2APIC_END)
1239 {
1240 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
1241 if (RT_SUCCESS(rc))
1242 rc = VINF_SUCCESS;
1243 else
1244 {
1245 *puValue = 0;
1246 rc = VERR_CPUM_RAISE_GP_0;
1247 }
1248 }
1249 else
1250 {
1251 *puValue = 0;
1252 rc = VERR_CPUM_RAISE_GP_0;
1253 }
1254 break;
1255 }
1256
1257 return rc;
1258}
1259
1260
1261/**
1262 * Query an MSR.
1263 *
1264 * The caller is responsible for checking privilege if the call is the result
1265 * of a RDMSR instruction. We'll do the rest.
1266 *
1267 * @retval VINF_SUCCESS on success.
1268 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
1269 * expected to take the appropriate actions. @a *puValue is set to 0.
1270 * @param pVCpu Pointer to the VMCPU.
1271 * @param idMsr The MSR.
1272 * @param puValue Where to return the value.
1273 *
1274 * @remarks This will always return the right values, even when we're in the
1275 * recompiler.
1276 */
1277VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
1278{
1279 int rc = cpumQueryGuestMsrInt(pVCpu, idMsr, puValue);
1280 LogFlow(("CPUMQueryGuestMsr: %#x -> %llx rc=%d\n", idMsr, *puValue, rc));
1281 return rc;
1282}
1283
1284
1285/**
1286 * Sets the MSR.
1287 *
1288 * The caller is responsible for checking privilege if the call is the result
1289 * of a WRMSR instruction. We'll do the rest.
1290 *
1291 * @retval VINF_SUCCESS on success.
1292 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
1293 * appropriate actions.
1294 *
1295 * @param pVCpu Pointer to the VMCPU.
1296 * @param idMsr The MSR id.
1297 * @param uValue The value to set.
1298 *
1299 * @remarks Everyone changing MSR values, including the recompiler, shall do it
1300 * by calling this method. This makes sure we have current values and
1301 * that we trigger all the right actions when something changes.
1302 */
1303VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
1304{
1305 LogFlow(("CPUMSetGuestMsr: %#x <- %#llx\n", idMsr, uValue));
1306
1307 /*
1308 * If we don't indicate MSR support in the CPUID feature bits, indicate
1309 * that a #GP(0) should be raised.
1310 */
1311 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
1312 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
1313
1314 int rc = VINF_SUCCESS;
1315 switch (idMsr)
1316 {
1317 case MSR_IA32_MISC_ENABLE:
1318 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue;
1319 break;
1320
1321 case MSR_IA32_TSC:
1322 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
1323 break;
1324
1325 case MSR_IA32_APICBASE:
1326 rc = PDMApicSetBase(pVCpu, uValue);
1327 if (rc != VINF_SUCCESS)
1328 rc = VERR_CPUM_RAISE_GP_0;
1329 break;
1330
1331 case MSR_IA32_CR_PAT:
1332 pVCpu->cpum.s.Guest.msrPAT = uValue;
1333 break;
1334
1335 case MSR_IA32_SYSENTER_CS:
1336 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
1337 break;
1338
1339 case MSR_IA32_SYSENTER_EIP:
1340 pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
1341 break;
1342
1343 case MSR_IA32_SYSENTER_ESP:
1344 pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
1345 break;
1346
1347 case MSR_IA32_MTRR_CAP:
1348 return VERR_CPUM_RAISE_GP_0;
1349
1350 case MSR_IA32_MTRR_DEF_TYPE:
1351 if ( (uValue & UINT64_C(0xfffffffffffff300))
1352 || ( (uValue & 0xff) != 0
1353 && (uValue & 0xff) != 1
1354 && (uValue & 0xff) != 4
1355 && (uValue & 0xff) != 5
1356 && (uValue & 0xff) != 6) )
1357 {
1358 Log(("CPUM: MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue));
1359 return VERR_CPUM_RAISE_GP_0;
1360 }
1361 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;
1362 break;
1363
1364 case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0:
1365 case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1:
1366 case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2:
1367 case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3:
1368 case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4:
1369 case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5:
1370 case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6:
1371 case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7:
1372 /** @todo implement variable MTRRs. */
1373 break;
1374#if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */
1375 case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8:
1376 case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9:
1377 break;
1378#endif
1379
1380 case IA32_MTRR_FIX64K_00000:
1381 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue;
1382 break;
1383 case IA32_MTRR_FIX16K_80000:
1384 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000 = uValue;
1385 break;
1386 case IA32_MTRR_FIX16K_A0000:
1387 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000 = uValue;
1388 break;
1389 case IA32_MTRR_FIX4K_C0000:
1390 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000 = uValue;
1391 break;
1392 case IA32_MTRR_FIX4K_C8000:
1393 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000 = uValue;
1394 break;
1395 case IA32_MTRR_FIX4K_D0000:
1396 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000 = uValue;
1397 break;
1398 case IA32_MTRR_FIX4K_D8000:
1399 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000 = uValue;
1400 break;
1401 case IA32_MTRR_FIX4K_E0000:
1402 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000 = uValue;
1403 break;
1404 case IA32_MTRR_FIX4K_E8000:
1405 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000 = uValue;
1406 break;
1407 case IA32_MTRR_FIX4K_F0000:
1408 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000 = uValue;
1409 break;
1410 case IA32_MTRR_FIX4K_F8000:
1411 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000 = uValue;
1412 break;
1413
1414 /*
1415 * AMD64 MSRs.
1416 */
1417 case MSR_K6_EFER:
1418 {
1419 PVM pVM = pVCpu->CTX_SUFF(pVM);
1420 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER;
1421 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1422 ? pVM->cpum.s.aGuestCpuIdExt[1].edx
1423 : 0;
1424 uint64_t fMask = 0;
1425
1426 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
1427 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX)
1428 fMask |= MSR_K6_EFER_NXE;
1429 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
1430 fMask |= MSR_K6_EFER_LME;
1431 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
1432 fMask |= MSR_K6_EFER_SCE;
1433 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
1434 fMask |= MSR_K6_EFER_FFXSR;
1435
1436 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
1437 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1438 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
1439 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
1440 {
1441 Log(("CPUM: Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
1442 return VERR_CPUM_RAISE_GP_0;
1443 }
1444
1445 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
1446 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
1447 ("Unexpected value %RX64\n", uValue));
1448 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
1449
1450 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
1451 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
1452 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
1453 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
1454 {
1455 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
1456 HMFlushTLB(pVCpu);
1457
1458 /* Notify PGM about NXE changes. */
1459 if ( (uOldEFER & MSR_K6_EFER_NXE)
1460 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
1461 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));
1462 }
1463 break;
1464 }
1465
1466 case MSR_K8_SF_MASK:
1467 pVCpu->cpum.s.Guest.msrSFMASK = uValue;
1468 break;
1469
1470 case MSR_K6_STAR:
1471 pVCpu->cpum.s.Guest.msrSTAR = uValue;
1472 break;
1473
1474 case MSR_K8_LSTAR:
1475 pVCpu->cpum.s.Guest.msrLSTAR = uValue;
1476 break;
1477
1478 case MSR_K8_CSTAR:
1479 pVCpu->cpum.s.Guest.msrCSTAR = uValue;
1480 break;
1481
1482 case MSR_K8_FS_BASE:
1483 pVCpu->cpum.s.Guest.fs.u64Base = uValue;
1484 break;
1485
1486 case MSR_K8_GS_BASE:
1487 pVCpu->cpum.s.Guest.gs.u64Base = uValue;
1488 break;
1489
1490 case MSR_K8_KERNEL_GS_BASE:
1491 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
1492 break;
1493
1494 case MSR_K8_TSC_AUX:
1495 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
1496 break;
1497
1498 case MSR_IA32_DEBUGCTL:
1499 /** @todo virtualize DEBUGCTL and relatives */
1500 break;
1501
1502 /*
1503 * Intel specifics MSRs:
1504 */
1505 /*case MSR_IA32_PLATFORM_ID: - read-only */
1506 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1507 case MSR_IA32_BIOS_UPDT_TRIG: /* fam/mod >= 6_01 */
1508 /*case MSR_IA32_MCP_CAP: - read-only */
1509 /*case MSR_IA32_MCG_STATUS: - read-only */
1510 /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */
1511 /*case MSR_IA32_MC0_CTL: - read-only? */
1512 /*case MSR_IA32_MC0_STATUS: - read-only? */
1513 case MSR_PKG_CST_CONFIG_CONTROL:
1514 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1515 {
1516 Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1517 return VERR_CPUM_RAISE_GP_0;
1518 }
1519
1520 switch (idMsr)
1521 {
1522 case MSR_PKG_CST_CONFIG_CONTROL:
1523 {
1524 if (pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl & RT_BIT_64(15))
1525 {
1526 Log(("MSR_PKG_CST_CONFIG_CONTROL: Write protected -> #GP\n"));
1527 return VERR_CPUM_RAISE_GP_0;
1528 }
1529 static uint64_t s_fMask = UINT64_C(0x01f08407); /** @todo Only Nehalem has 24; Only Sandy has 27 and 28. */
1530 static uint64_t s_fGpInvalid = UINT64_C(0xffffffff00ff0000); /** @todo figure out exactly what's off limits. */
1531 if ((uValue & s_fGpInvalid) || (uValue & 7) >= 5)
1532 {
1533 Log(("MSR_PKG_CST_CONFIG_CONTROL: Invalid value %#llx -> #GP\n", uValue));
1534 return VERR_CPUM_RAISE_GP_0;
1535 }
1536 pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = uValue & s_fMask;
1537 break;
1538 }
1539
1540 }
1541 /* ignored */
1542 break;
1543
1544 /*
1545 * AMD specific MSRs:
1546 */
1547 case MSR_K8_SYSCFG: /** @todo can be written, but we ignore that for now. */
1548 case MSR_K8_INT_PENDING: /** @todo can be written, but we ignore that for now. */
1549 case MSR_K8_NB_CFG: /** @todo can be written; the apicid swapping might be used and would need saving, but probably unnecessary. */
1550 case 0xc0011029: /* quick fix for FreeBSd 9.1. */
1551 case 0xc0010042: /* quick fix for something. */
1552 case 0xc001102a: /* quick fix for w2k8 + opposition. */
1553 case 0xc0011004: /* quick fix for the opposition. */
1554 case 0xc0011005: /* quick fix for the opposition. */
1555 case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */
1556 case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */
1557 case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */
1558 case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */
1559 case MSR_K7_PERFCTR0: /* quick fix for the opposition. */
1560 case MSR_K7_PERFCTR1: /* quick fix for the opposition. */
1561 case MSR_K7_PERFCTR2: /* quick fix for the opposition. */
1562 case MSR_K7_PERFCTR3: /* quick fix for the opposition. */
1563 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD)
1564 {
1565 Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1566 return VERR_CPUM_RAISE_GP_0;
1567 }
1568 /* ignored */
1569 break;
1570
1571
1572 default:
1573 /*
1574 * Hand the X2APIC range to PDM and the APIC.
1575 */
1576 if ( idMsr >= MSR_IA32_X2APIC_START
1577 && idMsr <= MSR_IA32_X2APIC_END)
1578 {
1579 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
1580 if (rc != VINF_SUCCESS)
1581 rc = VERR_CPUM_RAISE_GP_0;
1582 }
1583 else
1584 {
1585 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
1586 /** @todo rc = VERR_CPUM_RAISE_GP_0 */
1587 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
1588 }
1589 break;
1590 }
1591 return rc;
1592}
1593
1594
1595VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
1596{
1597 if (pcbLimit)
1598 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
1599 return pVCpu->cpum.s.Guest.idtr.pIdt;
1600}
1601
1602
1603VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
1604{
1605 if (pHidden)
1606 *pHidden = pVCpu->cpum.s.Guest.tr;
1607 return pVCpu->cpum.s.Guest.tr.Sel;
1608}
1609
1610
1611VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
1612{
1613 return pVCpu->cpum.s.Guest.cs.Sel;
1614}
1615
1616
1617VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
1618{
1619 return pVCpu->cpum.s.Guest.ds.Sel;
1620}
1621
1622
1623VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
1624{
1625 return pVCpu->cpum.s.Guest.es.Sel;
1626}
1627
1628
1629VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
1630{
1631 return pVCpu->cpum.s.Guest.fs.Sel;
1632}
1633
1634
1635VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
1636{
1637 return pVCpu->cpum.s.Guest.gs.Sel;
1638}
1639
1640
1641VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
1642{
1643 return pVCpu->cpum.s.Guest.ss.Sel;
1644}
1645
1646
1647VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
1648{
1649 return pVCpu->cpum.s.Guest.ldtr.Sel;
1650}
1651
1652
1653VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
1654{
1655 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
1656 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
1657 return pVCpu->cpum.s.Guest.ldtr.Sel;
1658}
1659
1660
1661VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
1662{
1663 return pVCpu->cpum.s.Guest.cr0;
1664}
1665
1666
1667VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
1668{
1669 return pVCpu->cpum.s.Guest.cr2;
1670}
1671
1672
1673VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
1674{
1675 return pVCpu->cpum.s.Guest.cr3;
1676}
1677
1678
1679VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
1680{
1681 return pVCpu->cpum.s.Guest.cr4;
1682}
1683
1684
1685VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
1686{
1687 uint64_t u64;
1688 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1689 if (RT_FAILURE(rc))
1690 u64 = 0;
1691 return u64;
1692}
1693
1694
1695VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1696{
1697 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1698}
1699
1700
1701VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1702{
1703 return pVCpu->cpum.s.Guest.eip;
1704}
1705
1706
1707VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1708{
1709 return pVCpu->cpum.s.Guest.rip;
1710}
1711
1712
1713VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1714{
1715 return pVCpu->cpum.s.Guest.eax;
1716}
1717
1718
1719VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1720{
1721 return pVCpu->cpum.s.Guest.ebx;
1722}
1723
1724
1725VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1726{
1727 return pVCpu->cpum.s.Guest.ecx;
1728}
1729
1730
1731VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1732{
1733 return pVCpu->cpum.s.Guest.edx;
1734}
1735
1736
1737VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1738{
1739 return pVCpu->cpum.s.Guest.esi;
1740}
1741
1742
1743VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1744{
1745 return pVCpu->cpum.s.Guest.edi;
1746}
1747
1748
1749VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1750{
1751 return pVCpu->cpum.s.Guest.esp;
1752}
1753
1754
1755VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1756{
1757 return pVCpu->cpum.s.Guest.ebp;
1758}
1759
1760
1761VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1762{
1763 return pVCpu->cpum.s.Guest.eflags.u32;
1764}
1765
1766
1767VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1768{
1769 switch (iReg)
1770 {
1771 case DISCREG_CR0:
1772 *pValue = pVCpu->cpum.s.Guest.cr0;
1773 break;
1774
1775 case DISCREG_CR2:
1776 *pValue = pVCpu->cpum.s.Guest.cr2;
1777 break;
1778
1779 case DISCREG_CR3:
1780 *pValue = pVCpu->cpum.s.Guest.cr3;
1781 break;
1782
1783 case DISCREG_CR4:
1784 *pValue = pVCpu->cpum.s.Guest.cr4;
1785 break;
1786
1787 case DISCREG_CR8:
1788 {
1789 uint8_t u8Tpr;
1790 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1791 if (RT_FAILURE(rc))
1792 {
1793 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1794 *pValue = 0;
1795 return rc;
1796 }
1797 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1798 break;
1799 }
1800
1801 default:
1802 return VERR_INVALID_PARAMETER;
1803 }
1804 return VINF_SUCCESS;
1805}
1806
1807
1808VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1809{
1810 return pVCpu->cpum.s.Guest.dr[0];
1811}
1812
1813
1814VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1815{
1816 return pVCpu->cpum.s.Guest.dr[1];
1817}
1818
1819
1820VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1821{
1822 return pVCpu->cpum.s.Guest.dr[2];
1823}
1824
1825
1826VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1827{
1828 return pVCpu->cpum.s.Guest.dr[3];
1829}
1830
1831
1832VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1833{
1834 return pVCpu->cpum.s.Guest.dr[6];
1835}
1836
1837
1838VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1839{
1840 return pVCpu->cpum.s.Guest.dr[7];
1841}
1842
1843
1844VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1845{
1846 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1847 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1848 if (iReg == 4 || iReg == 5)
1849 iReg += 2;
1850 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1851 return VINF_SUCCESS;
1852}
1853
1854
1855VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1856{
1857 return pVCpu->cpum.s.Guest.msrEFER;
1858}
1859
1860
1861/**
1862 * Gets a CPUID leaf.
1863 *
1864 * @param pVCpu Pointer to the VMCPU.
1865 * @param iLeaf The CPUID leaf to get.
1866 * @param pEax Where to store the EAX value.
1867 * @param pEbx Where to store the EBX value.
1868 * @param pEcx Where to store the ECX value.
1869 * @param pEdx Where to store the EDX value.
1870 */
1871VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1872{
1873 PVM pVM = pVCpu->CTX_SUFF(pVM);
1874
1875 PCCPUMCPUID pCpuId;
1876 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1877 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1878 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1879 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1880 else if ( iLeaf - UINT32_C(0x40000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdHyper)
1881 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP))
1882 pCpuId = &pVM->cpum.s.aGuestCpuIdHyper[iLeaf - UINT32_C(0x40000000)]; /* Only report if HVP bit set. */
1883 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1884 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1885 else
1886 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1887
1888 uint32_t cCurrentCacheIndex = *pEcx;
1889
1890 *pEax = pCpuId->eax;
1891 *pEbx = pCpuId->ebx;
1892 *pEcx = pCpuId->ecx;
1893 *pEdx = pCpuId->edx;
1894
1895 if ( iLeaf == 1)
1896 {
1897 /* Bits 31-24: Initial APIC ID */
1898 Assert(pVCpu->idCpu <= 255);
1899 *pEbx |= (pVCpu->idCpu << 24);
1900 }
1901
1902 if ( iLeaf == 4
1903 && cCurrentCacheIndex < 3
1904 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1905 {
1906 uint32_t type, level, sharing, linesize,
1907 partitions, associativity, sets, cores;
1908
1909 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1910 partitions = 1;
1911 /* Those are only to shut up compiler, as they will always
1912 get overwritten, and compiler should be able to figure that out */
1913 sets = associativity = sharing = level = 1;
1914 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1915 switch (cCurrentCacheIndex)
1916 {
1917 case 0:
1918 type = 1;
1919 level = 1;
1920 sharing = 1;
1921 linesize = 64;
1922 associativity = 8;
1923 sets = 64;
1924 break;
1925 case 1:
1926 level = 1;
1927 type = 2;
1928 sharing = 1;
1929 linesize = 64;
1930 associativity = 8;
1931 sets = 64;
1932 break;
1933 default: /* shut up gcc.*/
1934 AssertFailed();
1935 case 2:
1936 level = 2;
1937 type = 3;
1938 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1939 linesize = 64;
1940 associativity = 24;
1941 sets = 4096;
1942 break;
1943 }
1944
1945 NOREF(type);
1946 *pEax |= ((cores - 1) << 26) |
1947 ((sharing - 1) << 14) |
1948 (level << 5) |
1949 1;
1950 *pEbx = (linesize - 1) |
1951 ((partitions - 1) << 12) |
1952 ((associativity - 1) << 22); /* -1 encoding */
1953 *pEcx = sets - 1;
1954 }
1955
1956 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1957}
1958
1959/**
1960 * Gets a number of standard CPUID leafs.
1961 *
1962 * @returns Number of leafs.
1963 * @param pVM Pointer to the VM.
1964 * @remark Intended for PATM.
1965 */
1966VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1967{
1968 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1969}
1970
1971
1972/**
1973 * Gets a number of extended CPUID leafs.
1974 *
1975 * @returns Number of leafs.
1976 * @param pVM Pointer to the VM.
1977 * @remark Intended for PATM.
1978 */
1979VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1980{
1981 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1982}
1983
1984
1985/**
1986 * Gets a number of centaur CPUID leafs.
1987 *
1988 * @returns Number of leafs.
1989 * @param pVM Pointer to the VM.
1990 * @remark Intended for PATM.
1991 */
1992VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1993{
1994 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1995}
1996
1997
1998/**
1999 * Sets a CPUID feature bit.
2000 *
2001 * @param pVM Pointer to the VM.
2002 * @param enmFeature The feature to set.
2003 */
2004VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2005{
2006 switch (enmFeature)
2007 {
2008 /*
2009 * Set the APIC bit in both feature masks.
2010 */
2011 case CPUMCPUIDFEATURE_APIC:
2012 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2013 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
2014 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2015 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2016 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
2017 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
2018 break;
2019
2020 /*
2021 * Set the x2APIC bit in the standard feature mask.
2022 */
2023 case CPUMCPUIDFEATURE_X2APIC:
2024 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2025 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
2026 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
2027 break;
2028
2029 /*
2030 * Set the sysenter/sysexit bit in the standard feature mask.
2031 * Assumes the caller knows what it's doing! (host must support these)
2032 */
2033 case CPUMCPUIDFEATURE_SEP:
2034 {
2035 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
2036 {
2037 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
2038 return;
2039 }
2040
2041 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2042 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
2043 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
2044 break;
2045 }
2046
2047 /*
2048 * Set the syscall/sysret bit in the extended feature mask.
2049 * Assumes the caller knows what it's doing! (host must support these)
2050 */
2051 case CPUMCPUIDFEATURE_SYSCALL:
2052 {
2053 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2054 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
2055 {
2056#if HC_ARCH_BITS == 32
2057 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode.
2058 * Even when the cpu is capable of doing so in 64 bits mode.
2059 */
2060 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2061 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
2062 || !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
2063#endif
2064 {
2065 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
2066 return;
2067 }
2068 }
2069 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
2070 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
2071 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
2072 break;
2073 }
2074
2075 /*
2076 * Set the PAE bit in both feature masks.
2077 * Assumes the caller knows what it's doing! (host must support these)
2078 */
2079 case CPUMCPUIDFEATURE_PAE:
2080 {
2081 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
2082 {
2083 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
2084 return;
2085 }
2086
2087 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2088 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
2089 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2090 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2091 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
2092 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
2093 break;
2094 }
2095
2096 /*
2097 * Set the LONG MODE bit in the extended feature mask.
2098 * Assumes the caller knows what it's doing! (host must support these)
2099 */
2100 case CPUMCPUIDFEATURE_LONG_MODE:
2101 {
2102 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2103 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2104 {
2105 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
2106 return;
2107 }
2108
2109 /* Valid for both Intel and AMD. */
2110 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
2111 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
2112 break;
2113 }
2114
2115 /*
2116 * Set the NX/XD bit in the extended feature mask.
2117 * Assumes the caller knows what it's doing! (host must support these)
2118 */
2119 case CPUMCPUIDFEATURE_NX:
2120 {
2121 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2122 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
2123 {
2124 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
2125 return;
2126 }
2127
2128 /* Valid for both Intel and AMD. */
2129 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX;
2130 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
2131 break;
2132 }
2133
2134 /*
2135 * Set the LAHF/SAHF support in 64-bit mode.
2136 * Assumes the caller knows what it's doing! (host must support this)
2137 */
2138 case CPUMCPUIDFEATURE_LAHF:
2139 {
2140 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2141 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
2142 {
2143 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
2144 return;
2145 }
2146
2147 /* Valid for both Intel and AMD. */
2148 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
2149 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
2150 break;
2151 }
2152
2153 case CPUMCPUIDFEATURE_PAT:
2154 {
2155 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2156 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
2157 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2158 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2159 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
2160 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
2161 break;
2162 }
2163
2164 /*
2165 * Set the RDTSCP support bit.
2166 * Assumes the caller knows what it's doing! (host must support this)
2167 */
2168 case CPUMCPUIDFEATURE_RDTSCP:
2169 {
2170 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2171 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
2172 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
2173 {
2174 if (!pVM->cpum.s.u8PortableCpuIdLevel)
2175 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
2176 return;
2177 }
2178
2179 /* Valid for both Intel and AMD. */
2180 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
2181 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
2182 break;
2183 }
2184
2185 /*
2186 * Set the Hypervisor Present bit in the standard feature mask.
2187 */
2188 case CPUMCPUIDFEATURE_HVP:
2189 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2190 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP;
2191 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
2192 break;
2193
2194 default:
2195 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2196 break;
2197 }
2198 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2199 {
2200 PVMCPU pVCpu = &pVM->aCpus[i];
2201 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
2202 }
2203}
2204
2205
2206/**
2207 * Queries a CPUID feature bit.
2208 *
2209 * @returns boolean for feature presence
2210 * @param pVM Pointer to the VM.
2211 * @param enmFeature The feature to query.
2212 */
2213VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2214{
2215 switch (enmFeature)
2216 {
2217 case CPUMCPUIDFEATURE_PAE:
2218 {
2219 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2220 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
2221 break;
2222 }
2223
2224 case CPUMCPUIDFEATURE_NX:
2225 {
2226 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2227 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX);
2228 }
2229
2230 case CPUMCPUIDFEATURE_SYSCALL:
2231 {
2232 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2233 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL);
2234 }
2235
2236 case CPUMCPUIDFEATURE_RDTSCP:
2237 {
2238 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2239 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
2240 break;
2241 }
2242
2243 case CPUMCPUIDFEATURE_LONG_MODE:
2244 {
2245 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2246 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
2247 break;
2248 }
2249
2250 default:
2251 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2252 break;
2253 }
2254 return false;
2255}
2256
2257
2258/**
2259 * Clears a CPUID feature bit.
2260 *
2261 * @param pVM Pointer to the VM.
2262 * @param enmFeature The feature to clear.
2263 */
2264VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2265{
2266 switch (enmFeature)
2267 {
2268 /*
2269 * Set the APIC bit in both feature masks.
2270 */
2271 case CPUMCPUIDFEATURE_APIC:
2272 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2273 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
2274 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2275 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2276 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
2277 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
2278 break;
2279
2280 /*
2281 * Clear the x2APIC bit in the standard feature mask.
2282 */
2283 case CPUMCPUIDFEATURE_X2APIC:
2284 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2285 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
2286 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
2287 break;
2288
2289 case CPUMCPUIDFEATURE_PAE:
2290 {
2291 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2292 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
2293 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2294 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2295 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
2296 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
2297 break;
2298 }
2299
2300 case CPUMCPUIDFEATURE_PAT:
2301 {
2302 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2303 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
2304 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2305 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2306 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
2307 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
2308 break;
2309 }
2310
2311 case CPUMCPUIDFEATURE_LONG_MODE:
2312 {
2313 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2314 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
2315 break;
2316 }
2317
2318 case CPUMCPUIDFEATURE_LAHF:
2319 {
2320 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2321 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
2322 break;
2323 }
2324
2325 case CPUMCPUIDFEATURE_RDTSCP:
2326 {
2327 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2328 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
2329 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
2330 break;
2331 }
2332
2333 case CPUMCPUIDFEATURE_HVP:
2334 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2335 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP;
2336 break;
2337
2338 default:
2339 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2340 break;
2341 }
2342 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2343 {
2344 PVMCPU pVCpu = &pVM->aCpus[i];
2345 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
2346 }
2347}
2348
2349
2350/**
2351 * Gets the host CPU vendor.
2352 *
2353 * @returns CPU vendor.
2354 * @param pVM Pointer to the VM.
2355 */
2356VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
2357{
2358 return pVM->cpum.s.enmHostCpuVendor;
2359}
2360
2361
2362/**
2363 * Gets the CPU vendor.
2364 *
2365 * @returns CPU vendor.
2366 * @param pVM Pointer to the VM.
2367 */
2368VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
2369{
2370 return pVM->cpum.s.enmGuestCpuVendor;
2371}
2372
2373
2374VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
2375{
2376 pVCpu->cpum.s.Guest.dr[0] = uDr0;
2377 return CPUMRecalcHyperDRx(pVCpu, 0, false);
2378}
2379
2380
2381VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
2382{
2383 pVCpu->cpum.s.Guest.dr[1] = uDr1;
2384 return CPUMRecalcHyperDRx(pVCpu, 1, false);
2385}
2386
2387
2388VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
2389{
2390 pVCpu->cpum.s.Guest.dr[2] = uDr2;
2391 return CPUMRecalcHyperDRx(pVCpu, 2, false);
2392}
2393
2394
2395VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
2396{
2397 pVCpu->cpum.s.Guest.dr[3] = uDr3;
2398 return CPUMRecalcHyperDRx(pVCpu, 3, false);
2399}
2400
2401
2402VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
2403{
2404 pVCpu->cpum.s.Guest.dr[6] = uDr6;
2405 return VINF_SUCCESS; /* No need to recalc. */
2406}
2407
2408
2409VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
2410{
2411 pVCpu->cpum.s.Guest.dr[7] = uDr7;
2412 return CPUMRecalcHyperDRx(pVCpu, 7, false);
2413}
2414
2415
2416VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
2417{
2418 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
2419 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
2420 if (iReg == 4 || iReg == 5)
2421 iReg += 2;
2422 pVCpu->cpum.s.Guest.dr[iReg] = Value;
2423 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
2424}
2425
2426
2427/**
2428 * Recalculates the hypervisor DRx register values based on current guest
2429 * registers and DBGF breakpoints, updating changed registers depending on the
2430 * context.
2431 *
2432 * This is called whenever a guest DRx register is modified (any context) and
2433 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
2434 *
2435 * In raw-mode context this function will reload any (hyper) DRx registers which
2436 * comes out with a different value. It may also have to save the host debug
2437 * registers if that haven't been done already. In this context though, we'll
2438 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
2439 * are only important when breakpoints are actually enabled.
2440 *
2441 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
2442 * reloaded by the HM code if it changes. Further more, we will only use the
2443 * combined register set when the VBox debugger is actually using hardware BPs,
2444 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
2445 * concern us here).
2446 *
2447 * In ring-3 we won't be loading anything, so well calculate hypervisor values
2448 * all the time.
2449 *
2450 * @returns VINF_SUCCESS.
2451 * @param pVCpu Pointer to the VMCPU.
2452 * @param iGstReg The guest debug register number that was modified.
2453 * UINT8_MAX if not guest register.
2454 * @param fForceHyper Used in HM to force hyper registers because of single
2455 * stepping.
2456 */
2457VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
2458{
2459 PVM pVM = pVCpu->CTX_SUFF(pVM);
2460
2461 /*
2462 * Compare the DR7s first.
2463 *
2464 * We only care about the enabled flags. GD is virtualized when we
2465 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
2466 * always have the LE and GE bits set, so no need to check and disable
2467 * stuff if they're cleared like we have to for the guest DR7.
2468 */
2469 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
2470 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
2471 uGstDr7 = 0;
2472 else if (!(uGstDr7 & X86_DR7_LE))
2473 uGstDr7 &= ~X86_DR7_LE_ALL;
2474 else if (!(uGstDr7 & X86_DR7_GE))
2475 uGstDr7 &= ~X86_DR7_GE_ALL;
2476
2477 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
2478
2479#ifdef IN_RING0
2480 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
2481 fForceHyper = true;
2482#endif
2483 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
2484 {
2485 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
2486#ifdef IN_RC
2487 bool const fHmEnabled = false;
2488#elif defined(IN_RING3)
2489 bool const fHmEnabled = HMIsEnabled(pVM);
2490#endif
2491
2492 /*
2493 * Ok, something is enabled. Recalc each of the breakpoints, taking
2494 * the VM debugger ones of the guest ones. In raw-mode context we will
2495 * not allow breakpoints with values inside the hypervisor area.
2496 */
2497 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
2498
2499 /* bp 0 */
2500 RTGCUINTREG uNewDr0;
2501 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
2502 {
2503 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2504 uNewDr0 = DBGFBpGetDR0(pVM);
2505 }
2506 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
2507 {
2508 uNewDr0 = CPUMGetGuestDR0(pVCpu);
2509#ifndef IN_RING0
2510 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
2511 uNewDr0 = 0;
2512 else
2513#endif
2514 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2515 }
2516 else
2517 uNewDr0 = 0;
2518
2519 /* bp 1 */
2520 RTGCUINTREG uNewDr1;
2521 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
2522 {
2523 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2524 uNewDr1 = DBGFBpGetDR1(pVM);
2525 }
2526 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
2527 {
2528 uNewDr1 = CPUMGetGuestDR1(pVCpu);
2529#ifndef IN_RING0
2530 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
2531 uNewDr1 = 0;
2532 else
2533#endif
2534 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2535 }
2536 else
2537 uNewDr1 = 0;
2538
2539 /* bp 2 */
2540 RTGCUINTREG uNewDr2;
2541 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
2542 {
2543 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2544 uNewDr2 = DBGFBpGetDR2(pVM);
2545 }
2546 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
2547 {
2548 uNewDr2 = CPUMGetGuestDR2(pVCpu);
2549#ifndef IN_RING0
2550 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
2551 uNewDr2 = 0;
2552 else
2553#endif
2554 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2555 }
2556 else
2557 uNewDr2 = 0;
2558
2559 /* bp 3 */
2560 RTGCUINTREG uNewDr3;
2561 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2562 {
2563 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2564 uNewDr3 = DBGFBpGetDR3(pVM);
2565 }
2566 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2567 {
2568 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2569#ifndef IN_RING0
2570 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
2571 uNewDr3 = 0;
2572 else
2573#endif
2574 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2575 }
2576 else
2577 uNewDr3 = 0;
2578
2579 /*
2580 * Apply the updates.
2581 */
2582#ifdef IN_RC
2583 /* Make sure to save host registers first. */
2584 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
2585 {
2586 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
2587 {
2588 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
2589 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
2590 }
2591 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
2592 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
2593 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
2594 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
2595 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
2596
2597 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
2598 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
2599 ASMSetDR0(uNewDr0);
2600 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
2601 ASMSetDR1(uNewDr1);
2602 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
2603 ASMSetDR2(uNewDr2);
2604 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
2605 ASMSetDR3(uNewDr3);
2606 ASMSetDR6(X86_DR6_INIT_VAL);
2607 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
2608 ASMSetDR7(uNewDr7);
2609 }
2610 else
2611#endif
2612 {
2613 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
2614 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2615 CPUMSetHyperDR3(pVCpu, uNewDr3);
2616 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2617 CPUMSetHyperDR2(pVCpu, uNewDr2);
2618 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2619 CPUMSetHyperDR1(pVCpu, uNewDr1);
2620 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2621 CPUMSetHyperDR0(pVCpu, uNewDr0);
2622 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2623 CPUMSetHyperDR7(pVCpu, uNewDr7);
2624 }
2625 }
2626#ifdef IN_RING0
2627 else if (CPUMIsGuestDebugStateActive(pVCpu))
2628 {
2629 /*
2630 * Reload the register that was modified. Normally this won't happen
2631 * as we won't intercept DRx writes when not having the hyper debug
2632 * state loaded, but in case we do for some reason we'll simply deal
2633 * with it.
2634 */
2635 switch (iGstReg)
2636 {
2637 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
2638 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
2639 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
2640 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
2641 default:
2642 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
2643 }
2644 }
2645#endif
2646 else
2647 {
2648 /*
2649 * No active debug state any more. In raw-mode this means we have to
2650 * make sure DR7 has everything disabled now, if we armed it already.
2651 * In ring-0 we might end up here when just single stepping.
2652 */
2653#if defined(IN_RC) || defined(IN_RING0)
2654 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2655 {
2656# ifdef IN_RC
2657 ASMSetDR7(X86_DR7_INIT_VAL);
2658# endif
2659 if (pVCpu->cpum.s.Hyper.dr[0])
2660 ASMSetDR0(0);
2661 if (pVCpu->cpum.s.Hyper.dr[1])
2662 ASMSetDR1(0);
2663 if (pVCpu->cpum.s.Hyper.dr[2])
2664 ASMSetDR2(0);
2665 if (pVCpu->cpum.s.Hyper.dr[3])
2666 ASMSetDR3(0);
2667 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2668 }
2669#endif
2670 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2671
2672 /* Clear all the registers. */
2673 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2674 pVCpu->cpum.s.Hyper.dr[3] = 0;
2675 pVCpu->cpum.s.Hyper.dr[2] = 0;
2676 pVCpu->cpum.s.Hyper.dr[1] = 0;
2677 pVCpu->cpum.s.Hyper.dr[0] = 0;
2678
2679 }
2680 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2681 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2682 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2683 pVCpu->cpum.s.Hyper.dr[7]));
2684
2685 return VINF_SUCCESS;
2686}
2687
2688
2689/**
2690 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2691 *
2692 * @returns true if in real mode, otherwise false.
2693 * @param pVCpu Pointer to the VMCPU.
2694 */
2695VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2696{
2697 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2698}
2699
2700
2701/**
2702 * Tests if the guest has the Page Size Extension enabled (PSE).
2703 *
2704 * @returns true if in real mode, otherwise false.
2705 * @param pVCpu Pointer to the VMCPU.
2706 */
2707VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2708{
2709 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2710 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2711}
2712
2713
2714/**
2715 * Tests if the guest has the paging enabled (PG).
2716 *
2717 * @returns true if in real mode, otherwise false.
2718 * @param pVCpu Pointer to the VMCPU.
2719 */
2720VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2721{
2722 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2723}
2724
2725
2726/**
2727 * Tests if the guest has the paging enabled (PG).
2728 *
2729 * @returns true if in real mode, otherwise false.
2730 * @param pVCpu Pointer to the VMCPU.
2731 */
2732VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2733{
2734 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2735}
2736
2737
2738/**
2739 * Tests if the guest is running in real mode or not.
2740 *
2741 * @returns true if in real mode, otherwise false.
2742 * @param pVCpu Pointer to the VMCPU.
2743 */
2744VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2745{
2746 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2747}
2748
2749
2750/**
2751 * Tests if the guest is running in real or virtual 8086 mode.
2752 *
2753 * @returns @c true if it is, @c false if not.
2754 * @param pVCpu Pointer to the VMCPU.
2755 */
2756VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2757{
2758 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2759 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2760}
2761
2762
2763/**
2764 * Tests if the guest is running in protected or not.
2765 *
2766 * @returns true if in protected mode, otherwise false.
2767 * @param pVCpu Pointer to the VMCPU.
2768 */
2769VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2770{
2771 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2772}
2773
2774
2775/**
2776 * Tests if the guest is running in paged protected or not.
2777 *
2778 * @returns true if in paged protected mode, otherwise false.
2779 * @param pVCpu Pointer to the VMCPU.
2780 */
2781VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2782{
2783 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2784}
2785
2786
2787/**
2788 * Tests if the guest is running in long mode or not.
2789 *
2790 * @returns true if in long mode, otherwise false.
2791 * @param pVCpu Pointer to the VMCPU.
2792 */
2793VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2794{
2795 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2796}
2797
2798
2799/**
2800 * Tests if the guest is running in PAE mode or not.
2801 *
2802 * @returns true if in PAE mode, otherwise false.
2803 * @param pVCpu Pointer to the VMCPU.
2804 */
2805VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2806{
2807 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2808 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2809 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LME);
2810}
2811
2812
2813/**
2814 * Tests if the guest is running in 64 bits mode or not.
2815 *
2816 * @returns true if in 64 bits protected mode, otherwise false.
2817 * @param pVCpu The current virtual CPU.
2818 */
2819VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2820{
2821 if (!CPUMIsGuestInLongMode(pVCpu))
2822 return false;
2823 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2824 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2825}
2826
2827
2828/**
2829 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2830 * registers.
2831 *
2832 * @returns true if in 64 bits protected mode, otherwise false.
2833 * @param pCtx Pointer to the current guest CPU context.
2834 */
2835VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2836{
2837 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2838}
2839
2840#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2841
2842/**
2843 *
2844 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2845 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2846 * @param pVCpu The current virtual CPU.
2847 */
2848VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2849{
2850 return pVCpu->cpum.s.fRawEntered;
2851}
2852
2853/**
2854 * Transforms the guest CPU state to raw-ring mode.
2855 *
2856 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2857 *
2858 * @returns VBox status. (recompiler failure)
2859 * @param pVCpu Pointer to the VMCPU.
2860 * @param pCtxCore The context core (for trap usage).
2861 * @see @ref pg_raw
2862 */
2863VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2864{
2865 PVM pVM = pVCpu->CTX_SUFF(pVM);
2866
2867 Assert(!pVCpu->cpum.s.fRawEntered);
2868 Assert(!pVCpu->cpum.s.fRemEntered);
2869 if (!pCtxCore)
2870 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
2871
2872 /*
2873 * Are we in Ring-0?
2874 */
2875 if ( pCtxCore->ss.Sel
2876 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
2877 && !pCtxCore->eflags.Bits.u1VM)
2878 {
2879 /*
2880 * Enter execution mode.
2881 */
2882 PATMRawEnter(pVM, pCtxCore);
2883
2884 /*
2885 * Set CPL to Ring-1.
2886 */
2887 pCtxCore->ss.Sel |= 1;
2888 if ( pCtxCore->cs.Sel
2889 && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
2890 pCtxCore->cs.Sel |= 1;
2891 }
2892 else
2893 {
2894# ifdef VBOX_WITH_RAW_RING1
2895 if ( EMIsRawRing1Enabled(pVM)
2896 && !pCtxCore->eflags.Bits.u1VM
2897 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
2898 {
2899 /* Set CPL to Ring-2. */
2900 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
2901 if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
2902 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
2903 }
2904# else
2905 AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
2906 ("ring-1 code not supported\n"));
2907# endif
2908 /*
2909 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2910 */
2911 PATMRawEnter(pVM, pCtxCore);
2912 }
2913
2914 /*
2915 * Assert sanity.
2916 */
2917 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2918 AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0,
2919 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
2920 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2921
2922 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
2923
2924 pVCpu->cpum.s.fRawEntered = true;
2925 return VINF_SUCCESS;
2926}
2927
2928
2929/**
2930 * Transforms the guest CPU state from raw-ring mode to correct values.
2931 *
2932 * This function will change any selector registers with DPL=1 to DPL=0.
2933 *
2934 * @returns Adjusted rc.
2935 * @param pVCpu Pointer to the VMCPU.
2936 * @param rc Raw mode return code
2937 * @param pCtxCore The context core (for trap usage).
2938 * @see @ref pg_raw
2939 */
2940VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
2941{
2942 PVM pVM = pVCpu->CTX_SUFF(pVM);
2943
2944 /*
2945 * Don't leave if we've already left (in RC).
2946 */
2947 Assert(!pVCpu->cpum.s.fRemEntered);
2948 if (!pVCpu->cpum.s.fRawEntered)
2949 return rc;
2950 pVCpu->cpum.s.fRawEntered = false;
2951
2952 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2953 if (!pCtxCore)
2954 pCtxCore = CPUMCTX2CORE(pCtx);
2955 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss.Sel & X86_SEL_RPL));
2956 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL),
2957 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
2958
2959 /*
2960 * Are we executing in raw ring-1?
2961 */
2962 if ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
2963 && !pCtxCore->eflags.Bits.u1VM)
2964 {
2965 /*
2966 * Leave execution mode.
2967 */
2968 PATMRawLeave(pVM, pCtxCore, rc);
2969 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2970 /** @todo See what happens if we remove this. */
2971 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
2972 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
2973 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
2974 pCtxCore->es.Sel &= ~X86_SEL_RPL;
2975 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
2976 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
2977 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
2978 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
2979
2980 /*
2981 * Ring-1 selector => Ring-0.
2982 */
2983 pCtxCore->ss.Sel &= ~X86_SEL_RPL;
2984 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
2985 pCtxCore->cs.Sel &= ~X86_SEL_RPL;
2986 }
2987 else
2988 {
2989 /*
2990 * PATM is taking care of the IOPL and IF flags for us.
2991 */
2992 PATMRawLeave(pVM, pCtxCore, rc);
2993 if (!pCtxCore->eflags.Bits.u1VM)
2994 {
2995# ifdef VBOX_WITH_RAW_RING1
2996 if ( EMIsRawRing1Enabled(pVM)
2997 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)
2998 {
2999 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
3000 /** @todo See what happens if we remove this. */
3001 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 2)
3002 pCtxCore->ds.Sel = (pCtxCore->ds.Sel & ~X86_SEL_RPL) | 1;
3003 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 2)
3004 pCtxCore->es.Sel = (pCtxCore->es.Sel & ~X86_SEL_RPL) | 1;
3005 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 2)
3006 pCtxCore->fs.Sel = (pCtxCore->fs.Sel & ~X86_SEL_RPL) | 1;
3007 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 2)
3008 pCtxCore->gs.Sel = (pCtxCore->gs.Sel & ~X86_SEL_RPL) | 1;
3009
3010 /*
3011 * Ring-2 selector => Ring-1.
3012 */
3013 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 1;
3014 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 2)
3015 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 1;
3016 }
3017 else
3018 {
3019# endif
3020 /** @todo See what happens if we remove this. */
3021 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
3022 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
3023 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
3024 pCtxCore->es.Sel &= ~X86_SEL_RPL;
3025 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
3026 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
3027 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
3028 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
3029# ifdef VBOX_WITH_RAW_RING1
3030 }
3031# endif
3032 }
3033 }
3034
3035 return rc;
3036}
3037
3038#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3039
3040/**
3041 * Updates the EFLAGS while we're in raw-mode.
3042 *
3043 * @param pVCpu Pointer to the VMCPU.
3044 * @param fEfl The new EFLAGS value.
3045 */
3046VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
3047{
3048#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3049 if (pVCpu->cpum.s.fRawEntered)
3050 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest), fEfl);
3051 else
3052#endif
3053 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
3054}
3055
3056
3057/**
3058 * Gets the EFLAGS while we're in raw-mode.
3059 *
3060 * @returns The eflags.
3061 * @param pVCpu Pointer to the current virtual CPU.
3062 */
3063VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
3064{
3065#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3066 if (pVCpu->cpum.s.fRawEntered)
3067 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest));
3068#endif
3069 return pVCpu->cpum.s.Guest.eflags.u32;
3070}
3071
3072
3073/**
3074 * Sets the specified changed flags (CPUM_CHANGED_*).
3075 *
3076 * @param pVCpu Pointer to the current virtual CPU.
3077 */
3078VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
3079{
3080 pVCpu->cpum.s.fChanged |= fChangedFlags;
3081}
3082
3083
3084/**
3085 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
3086 * @returns true if supported.
3087 * @returns false if not supported.
3088 * @param pVM Pointer to the VM.
3089 */
3090VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
3091{
3092 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
3093}
3094
3095
3096/**
3097 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
3098 * @returns true if used.
3099 * @returns false if not used.
3100 * @param pVM Pointer to the VM.
3101 */
3102VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
3103{
3104 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
3105}
3106
3107
3108/**
3109 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
3110 * @returns true if used.
3111 * @returns false if not used.
3112 * @param pVM Pointer to the VM.
3113 */
3114VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
3115{
3116 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
3117}
3118
3119#ifdef IN_RC
3120
3121/**
3122 * Lazily sync in the FPU/XMM state.
3123 *
3124 * @returns VBox status code.
3125 * @param pVCpu Pointer to the VMCPU.
3126 */
3127VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
3128{
3129 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
3130}
3131
3132#endif /* !IN_RC */
3133
3134/**
3135 * Checks if we activated the FPU/XMM state of the guest OS.
3136 * @returns true if we did.
3137 * @returns false if not.
3138 * @param pVCpu Pointer to the VMCPU.
3139 */
3140VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
3141{
3142 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
3143}
3144
3145
3146/**
3147 * Deactivate the FPU/XMM state of the guest OS.
3148 * @param pVCpu Pointer to the VMCPU.
3149 *
3150 * @todo r=bird: Why is this needed? Looks like a workaround for mishandled
3151 * FPU state management.
3152 */
3153VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
3154{
3155 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU));
3156 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
3157}
3158
3159
3160/**
3161 * Checks if the guest debug state is active.
3162 *
3163 * @returns boolean
3164 * @param pVM Pointer to the VMCPU.
3165 */
3166VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
3167{
3168 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
3169}
3170
3171
3172/**
3173 * Checks if the guest debug state is to be made active during the world-switch
3174 * (currently only used for the 32->64 switcher case).
3175 *
3176 * @returns boolean
3177 * @param pVM Pointer to the VMCPU.
3178 */
3179VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
3180{
3181 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
3182}
3183
3184
3185/**
3186 * Checks if the hyper debug state is active.
3187 *
3188 * @returns boolean
3189 * @param pVM Pointer to the VM.
3190 */
3191VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
3192{
3193 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
3194}
3195
3196
3197/**
3198 * Checks if the hyper debug state is to be made active during the world-switch
3199 * (currently only used for the 32->64 switcher case).
3200 *
3201 * @returns boolean
3202 * @param pVM Pointer to the VMCPU.
3203 */
3204VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
3205{
3206 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
3207}
3208
3209
3210/**
3211 * Mark the guest's debug state as inactive.
3212 *
3213 * @returns boolean
3214 * @param pVM Pointer to the VM.
3215 * @todo This API doesn't make sense any more.
3216 */
3217VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
3218{
3219 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
3220}
3221
3222
3223/**
3224 * Get the current privilege level of the guest.
3225 *
3226 * @returns CPL
3227 * @param pVCpu Pointer to the current virtual CPU.
3228 */
3229VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
3230{
3231 /*
3232 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
3233 *
3234 * Note! We used to check CS.DPL here, assuming it was always equal to
3235 * CPL even if a conforming segment was loaded. But this truned out to
3236 * only apply to older AMD-V. With VT-x we had an ACP2 regression
3237 * during install after a far call to ring 2 with VT-x. Then on newer
3238 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
3239 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
3240 *
3241 * So, forget CS.DPL, always use SS.DPL.
3242 *
3243 * Note! The SS RPL is always equal to the CPL, while the CS RPL
3244 * isn't necessarily equal if the segment is conforming.
3245 * See section 4.11.1 in the AMD manual.
3246 *
3247 * Update: Where the heck does it say CS.RPL can differ from CPL other than
3248 * right after real->prot mode switch and when in V8086 mode? That
3249 * section says the RPL specified in a direct transfere (call, jmp,
3250 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
3251 * it would be impossible for an exception handle or the iret
3252 * instruction to figure out whether SS:ESP are part of the frame
3253 * or not. VBox or qemu bug must've lead to this misconception.
3254 *
3255 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
3256 * selector into SS with an RPL other than the CPL when CPL != 3 and
3257 * we're in 64-bit mode. The intel dev box doesn't allow this, on
3258 * RPL = CPL. Weird.
3259 */
3260 uint32_t uCpl;
3261 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
3262 {
3263 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3264 {
3265 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
3266 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
3267 else
3268 {
3269 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
3270#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3271# ifdef VBOX_WITH_RAW_RING1
3272 if (pVCpu->cpum.s.fRawEntered)
3273 {
3274 if ( uCpl == 2
3275 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
3276 uCpl = 1;
3277 else if (uCpl == 1)
3278 uCpl = 0;
3279 }
3280 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
3281# else
3282 if (uCpl == 1)
3283 uCpl = 0;
3284# endif
3285#endif
3286 }
3287 }
3288 else
3289 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
3290 }
3291 else
3292 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
3293 return uCpl;
3294}
3295
3296
3297/**
3298 * Gets the current guest CPU mode.
3299 *
3300 * If paging mode is what you need, check out PGMGetGuestMode().
3301 *
3302 * @returns The CPU mode.
3303 * @param pVCpu Pointer to the VMCPU.
3304 */
3305VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
3306{
3307 CPUMMODE enmMode;
3308 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3309 enmMode = CPUMMODE_REAL;
3310 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3311 enmMode = CPUMMODE_PROTECTED;
3312 else
3313 enmMode = CPUMMODE_LONG;
3314
3315 return enmMode;
3316}
3317
3318
3319/**
3320 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
3321 *
3322 * @returns 16, 32 or 64.
3323 * @param pVCpu The current virtual CPU.
3324 */
3325VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
3326{
3327 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3328 return 16;
3329
3330 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3331 {
3332 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
3333 return 16;
3334 }
3335
3336 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
3337 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
3338 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3339 return 64;
3340
3341 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
3342 return 32;
3343
3344 return 16;
3345}
3346
3347
3348VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
3349{
3350 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3351 return DISCPUMODE_16BIT;
3352
3353 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3354 {
3355 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
3356 return DISCPUMODE_16BIT;
3357 }
3358
3359 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
3360 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
3361 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3362 return DISCPUMODE_64BIT;
3363
3364 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
3365 return DISCPUMODE_32BIT;
3366
3367 return DISCPUMODE_16BIT;
3368}
3369
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette