VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 72484

Last change on this file since 72484 was 72484, checked in by vboxsync, 7 years ago

IEM,NEM: Define minimum CPUMCTX set for IEM and hook it up to NEM for fetching missing bits as needed. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 79.3 KB
Line 
1/* $Id: CPUMAllRegs.cpp 72484 2018-06-08 17:05:40Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/apic.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
49# pragma optimize("y", off)
50#endif
51
52AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
53AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/**
60 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
61 *
62 * @returns Pointer to the Virtual CPU.
63 * @param a_pGuestCtx Pointer to the guest context.
64 */
65#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
66
67/**
68 * Lazily loads the hidden parts of a selector register when using raw-mode.
69 */
70#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
71# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
72 do \
73 { \
74 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
75 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
76 } while (0)
77#else
78# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
79 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
80#endif
81
82
83
84#ifdef VBOX_WITH_RAW_MODE_NOT_R0
85
86/**
87 * Does the lazy hidden selector register loading.
88 *
89 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
90 * @param pSReg The selector register to lazily load hidden parts of.
91 */
92static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
93{
94 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
95 Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
96 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
97
98 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
99 {
100 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
101 pSReg->Attr.u = 0;
102 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
103 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
104 pSReg->Attr.n.u2Dpl = 3;
105 pSReg->Attr.n.u1Present = 1;
106 pSReg->u32Limit = 0x0000ffff;
107 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
108 pSReg->ValidSel = pSReg->Sel;
109 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
110 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
111 }
112 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
113 {
114 /* Real mode - leave the limit and flags alone here, at least for now. */
115 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
116 pSReg->ValidSel = pSReg->Sel;
117 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
118 }
119 else
120 {
121 /* Protected mode - get it from the selector descriptor tables. */
122 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
123 {
124 Assert(!CPUMIsGuestInLongMode(pVCpu));
125 pSReg->Sel = 0;
126 pSReg->u64Base = 0;
127 pSReg->u32Limit = 0;
128 pSReg->Attr.u = 0;
129 pSReg->ValidSel = 0;
130 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
131 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
132 }
133 else
134 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
135 }
136}
137
138
139/**
140 * Makes sure the hidden CS and SS selector registers are valid, loading them if
141 * necessary.
142 *
143 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
144 */
145VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
146{
147 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
148 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
149}
150
151
152/**
153 * Loads a the hidden parts of a selector register.
154 *
155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
156 * @param pSReg The selector register to lazily load hidden parts of.
157 */
158VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
159{
160 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
161}
162
163#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
164
165
166/**
167 * Obsolete.
168 *
169 * We don't support nested hypervisor context interrupts or traps. Life is much
170 * simpler when we don't. It's also slightly faster at times.
171 *
172 * @param pVCpu The cross context virtual CPU structure.
173 */
174VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
175{
176 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
177}
178
179
180/**
181 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
182 *
183 * @param pVCpu The cross context virtual CPU structure.
184 */
185VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
186{
187 return &pVCpu->cpum.s.Hyper;
188}
189
190
191VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
192{
193 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
194 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
195}
196
197
198VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
199{
200 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
201 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
202}
203
204
205VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
206{
207 pVCpu->cpum.s.Hyper.cr3 = cr3;
208
209#ifdef IN_RC
210 /* Update the current CR3. */
211 ASMSetCR3(cr3);
212#endif
213}
214
215VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
216{
217 return pVCpu->cpum.s.Hyper.cr3;
218}
219
220
221VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
222{
223 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
224}
225
226
227VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
228{
229 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
230}
231
232
233VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
234{
235 pVCpu->cpum.s.Hyper.es.Sel = SelES;
236}
237
238
239VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
240{
241 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
242}
243
244
245VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
246{
247 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
248}
249
250
251VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
252{
253 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
254}
255
256
257VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
258{
259 pVCpu->cpum.s.Hyper.esp = u32ESP;
260}
261
262
263VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
264{
265 pVCpu->cpum.s.Hyper.esp = u32ESP;
266}
267
268
269VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
270{
271 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
272 return VINF_SUCCESS;
273}
274
275
276VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
277{
278 pVCpu->cpum.s.Hyper.eip = u32EIP;
279}
280
281
282/**
283 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
284 * EFLAGS and EIP prior to resuming guest execution.
285 *
286 * All general register not given as a parameter will be set to 0. The EFLAGS
287 * register will be set to sane values for C/C++ code execution with interrupts
288 * disabled and IOPL 0.
289 *
290 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
291 * @param u32EIP The EIP value.
292 * @param u32ESP The ESP value.
293 * @param u32EAX The EAX value.
294 * @param u32EDX The EDX value.
295 */
296VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
297{
298 pVCpu->cpum.s.Hyper.eip = u32EIP;
299 pVCpu->cpum.s.Hyper.esp = u32ESP;
300 pVCpu->cpum.s.Hyper.eax = u32EAX;
301 pVCpu->cpum.s.Hyper.edx = u32EDX;
302 pVCpu->cpum.s.Hyper.ecx = 0;
303 pVCpu->cpum.s.Hyper.ebx = 0;
304 pVCpu->cpum.s.Hyper.ebp = 0;
305 pVCpu->cpum.s.Hyper.esi = 0;
306 pVCpu->cpum.s.Hyper.edi = 0;
307 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
308}
309
310
311VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
312{
313 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
314}
315
316
317VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
318{
319 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
320}
321
322
323/** @def MAYBE_LOAD_DRx
324 * Macro for updating DRx values in raw-mode and ring-0 contexts.
325 */
326#ifdef IN_RING0
327# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
328# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
329 do { \
330 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
331 a_fnLoad(a_uValue); \
332 else \
333 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
334 } while (0)
335# else
336# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
337 do { \
338 a_fnLoad(a_uValue); \
339 } while (0)
340# endif
341
342#elif defined(IN_RC)
343# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
344 do { \
345 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
346 { a_fnLoad(a_uValue); } \
347 } while (0)
348
349#else
350# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
351#endif
352
353VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
354{
355 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
356 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
357}
358
359
360VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
361{
362 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
363 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
364}
365
366
367VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
368{
369 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
370 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
371}
372
373
374VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
375{
376 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
377 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
378}
379
380
381VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
382{
383 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
384}
385
386
387VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
388{
389 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
390#ifdef IN_RC
391 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
392#endif
393}
394
395
396VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
397{
398 return pVCpu->cpum.s.Hyper.cs.Sel;
399}
400
401
402VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
403{
404 return pVCpu->cpum.s.Hyper.ds.Sel;
405}
406
407
408VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
409{
410 return pVCpu->cpum.s.Hyper.es.Sel;
411}
412
413
414VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
415{
416 return pVCpu->cpum.s.Hyper.fs.Sel;
417}
418
419
420VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
421{
422 return pVCpu->cpum.s.Hyper.gs.Sel;
423}
424
425
426VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
427{
428 return pVCpu->cpum.s.Hyper.ss.Sel;
429}
430
431
432VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
433{
434 return pVCpu->cpum.s.Hyper.eax;
435}
436
437
438VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
439{
440 return pVCpu->cpum.s.Hyper.ebx;
441}
442
443
444VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
445{
446 return pVCpu->cpum.s.Hyper.ecx;
447}
448
449
450VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
451{
452 return pVCpu->cpum.s.Hyper.edx;
453}
454
455
456VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
457{
458 return pVCpu->cpum.s.Hyper.esi;
459}
460
461
462VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
463{
464 return pVCpu->cpum.s.Hyper.edi;
465}
466
467
468VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
469{
470 return pVCpu->cpum.s.Hyper.ebp;
471}
472
473
474VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
475{
476 return pVCpu->cpum.s.Hyper.esp;
477}
478
479
480VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
481{
482 return pVCpu->cpum.s.Hyper.eflags.u32;
483}
484
485
486VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
487{
488 return pVCpu->cpum.s.Hyper.eip;
489}
490
491
492VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
493{
494 return pVCpu->cpum.s.Hyper.rip;
495}
496
497
498VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
499{
500 if (pcbLimit)
501 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
502 return pVCpu->cpum.s.Hyper.idtr.pIdt;
503}
504
505
506VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
507{
508 if (pcbLimit)
509 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
510 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
511}
512
513
514VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
515{
516 return pVCpu->cpum.s.Hyper.ldtr.Sel;
517}
518
519
520VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
521{
522 return pVCpu->cpum.s.Hyper.dr[0];
523}
524
525
526VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
527{
528 return pVCpu->cpum.s.Hyper.dr[1];
529}
530
531
532VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
533{
534 return pVCpu->cpum.s.Hyper.dr[2];
535}
536
537
538VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
539{
540 return pVCpu->cpum.s.Hyper.dr[3];
541}
542
543
544VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
545{
546 return pVCpu->cpum.s.Hyper.dr[6];
547}
548
549
550VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
551{
552 return pVCpu->cpum.s.Hyper.dr[7];
553}
554
555
556/**
557 * Gets the pointer to the internal CPUMCTXCORE structure.
558 * This is only for reading in order to save a few calls.
559 *
560 * @param pVCpu The cross context virtual CPU structure.
561 */
562VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
563{
564 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
565}
566
567
568/**
569 * Queries the pointer to the internal CPUMCTX structure.
570 *
571 * @returns The CPUMCTX pointer.
572 * @param pVCpu The cross context virtual CPU structure.
573 */
574VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
575{
576 return &pVCpu->cpum.s.Guest;
577}
578
579
580/**
581 * Queries the pointer to the internal CPUMCTXMSRS structure.
582 *
583 * This is for NEM only.
584 *
585 * @returns The CPUMCTX pointer.
586 * @param pVCpu The cross context virtual CPU structure.
587 */
588VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
589{
590 return &pVCpu->cpum.s.GuestMsrs;
591}
592
593
594VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
595{
596#ifdef VBOX_WITH_RAW_MODE_NOT_R0
597 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
598 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
599#endif
600 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
601 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
602 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
603 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
604 return VINF_SUCCESS; /* formality, consider it void. */
605}
606
607
608VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
609{
610#ifdef VBOX_WITH_RAW_MODE_NOT_R0
611 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
612 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
613#endif
614 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
615 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
616 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
617 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
618 return VINF_SUCCESS; /* formality, consider it void. */
619}
620
621
622VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
623{
624#ifdef VBOX_WITH_RAW_MODE_NOT_R0
625 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
626 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
627#endif
628 pVCpu->cpum.s.Guest.tr.Sel = tr;
629 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
630 return VINF_SUCCESS; /* formality, consider it void. */
631}
632
633
634VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
635{
636#ifdef VBOX_WITH_RAW_MODE_NOT_R0
637 if ( ( ldtr != 0
638 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
639 && VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
640 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
641#endif
642 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
643 /* The caller will set more hidden bits if it has them. */
644 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
645 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
646 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
647 return VINF_SUCCESS; /* formality, consider it void. */
648}
649
650
651/**
652 * Set the guest CR0.
653 *
654 * When called in GC, the hyper CR0 may be updated if that is
655 * required. The caller only has to take special action if AM,
656 * WP, PG or PE changes.
657 *
658 * @returns VINF_SUCCESS (consider it void).
659 * @param pVCpu The cross context virtual CPU structure.
660 * @param cr0 The new CR0 value.
661 */
662VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
663{
664#ifdef IN_RC
665 /*
666 * Check if we need to change hypervisor CR0 because
667 * of math stuff.
668 */
669 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
670 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
671 {
672 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
673 {
674 /*
675 * We haven't loaded the guest FPU state yet, so TS and MT are both set
676 * and EM should be reflecting the guest EM (it always does this).
677 */
678 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
679 {
680 uint32_t HyperCR0 = ASMGetCR0();
681 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
682 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
683 HyperCR0 &= ~X86_CR0_EM;
684 HyperCR0 |= cr0 & X86_CR0_EM;
685 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
686 ASMSetCR0(HyperCR0);
687 }
688# ifdef VBOX_STRICT
689 else
690 {
691 uint32_t HyperCR0 = ASMGetCR0();
692 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
693 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
694 }
695# endif
696 }
697 else
698 {
699 /*
700 * Already loaded the guest FPU state, so we're just mirroring
701 * the guest flags.
702 */
703 uint32_t HyperCR0 = ASMGetCR0();
704 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
705 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
706 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
707 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
708 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
709 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
710 ASMSetCR0(HyperCR0);
711 }
712 }
713#endif /* IN_RC */
714
715 /*
716 * Check for changes causing TLB flushes (for REM).
717 * The caller is responsible for calling PGM when appropriate.
718 */
719 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
720 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
721 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
722 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
723
724 /*
725 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
726 */
727 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
728 PGMCr0WpEnabled(pVCpu);
729
730 /* The ET flag is settable on a 386 and hardwired on 486+. */
731 if ( !(cr0 & X86_CR0_ET)
732 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
733 cr0 |= X86_CR0_ET;
734
735 pVCpu->cpum.s.Guest.cr0 = cr0;
736 return VINF_SUCCESS;
737}
738
739
740VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
741{
742 pVCpu->cpum.s.Guest.cr2 = cr2;
743 return VINF_SUCCESS;
744}
745
746
747VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
748{
749 pVCpu->cpum.s.Guest.cr3 = cr3;
750 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
756{
757 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
758
759 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
760 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
761 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
762
763 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
764 pVCpu->cpum.s.Guest.cr4 = cr4;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
770{
771 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
777{
778 pVCpu->cpum.s.Guest.eip = eip;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
784{
785 pVCpu->cpum.s.Guest.eax = eax;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
791{
792 pVCpu->cpum.s.Guest.ebx = ebx;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
798{
799 pVCpu->cpum.s.Guest.ecx = ecx;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
805{
806 pVCpu->cpum.s.Guest.edx = edx;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
812{
813 pVCpu->cpum.s.Guest.esp = esp;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
819{
820 pVCpu->cpum.s.Guest.ebp = ebp;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
826{
827 pVCpu->cpum.s.Guest.esi = esi;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
833{
834 pVCpu->cpum.s.Guest.edi = edi;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
840{
841 pVCpu->cpum.s.Guest.ss.Sel = ss;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
847{
848 pVCpu->cpum.s.Guest.cs.Sel = cs;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
854{
855 pVCpu->cpum.s.Guest.ds.Sel = ds;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
861{
862 pVCpu->cpum.s.Guest.es.Sel = es;
863 return VINF_SUCCESS;
864}
865
866
867VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
868{
869 pVCpu->cpum.s.Guest.fs.Sel = fs;
870 return VINF_SUCCESS;
871}
872
873
874VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
875{
876 pVCpu->cpum.s.Guest.gs.Sel = gs;
877 return VINF_SUCCESS;
878}
879
880
881VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
882{
883 pVCpu->cpum.s.Guest.msrEFER = val;
884}
885
886
887VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
888{
889 if (pcbLimit)
890 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
891 return pVCpu->cpum.s.Guest.idtr.pIdt;
892}
893
894
895VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
896{
897 if (pHidden)
898 *pHidden = pVCpu->cpum.s.Guest.tr;
899 return pVCpu->cpum.s.Guest.tr.Sel;
900}
901
902
903VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
904{
905 return pVCpu->cpum.s.Guest.cs.Sel;
906}
907
908
909VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
910{
911 return pVCpu->cpum.s.Guest.ds.Sel;
912}
913
914
915VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
916{
917 return pVCpu->cpum.s.Guest.es.Sel;
918}
919
920
921VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
922{
923 return pVCpu->cpum.s.Guest.fs.Sel;
924}
925
926
927VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
928{
929 return pVCpu->cpum.s.Guest.gs.Sel;
930}
931
932
933VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
934{
935 return pVCpu->cpum.s.Guest.ss.Sel;
936}
937
938
939VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
940{
941 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
942 if ( !CPUMIsGuestInLongMode(pVCpu)
943 || pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
944 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
945 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
946}
947
948
949VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
950{
951 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
952 if ( !CPUMIsGuestInLongMode(pVCpu)
953 || pVCpu->cpum.s.Guest.ss.Attr.n.u1Long)
954 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
955 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
956}
957
958
959VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
960{
961 return pVCpu->cpum.s.Guest.ldtr.Sel;
962}
963
964
965VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
966{
967 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
968 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
969 return pVCpu->cpum.s.Guest.ldtr.Sel;
970}
971
972
973VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
974{
975 return pVCpu->cpum.s.Guest.cr0;
976}
977
978
979VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
980{
981 return pVCpu->cpum.s.Guest.cr2;
982}
983
984
985VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
986{
987 return pVCpu->cpum.s.Guest.cr3;
988}
989
990
991VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
992{
993 return pVCpu->cpum.s.Guest.cr4;
994}
995
996
997VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
998{
999 uint64_t u64;
1000 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1001 if (RT_FAILURE(rc))
1002 u64 = 0;
1003 return u64;
1004}
1005
1006
1007VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1008{
1009 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1010}
1011
1012
1013VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1014{
1015 return pVCpu->cpum.s.Guest.eip;
1016}
1017
1018
1019VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1020{
1021 return pVCpu->cpum.s.Guest.rip;
1022}
1023
1024
1025VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1026{
1027 return pVCpu->cpum.s.Guest.eax;
1028}
1029
1030
1031VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1032{
1033 return pVCpu->cpum.s.Guest.ebx;
1034}
1035
1036
1037VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1038{
1039 return pVCpu->cpum.s.Guest.ecx;
1040}
1041
1042
1043VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1044{
1045 return pVCpu->cpum.s.Guest.edx;
1046}
1047
1048
1049VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1050{
1051 return pVCpu->cpum.s.Guest.esi;
1052}
1053
1054
1055VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1056{
1057 return pVCpu->cpum.s.Guest.edi;
1058}
1059
1060
1061VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1062{
1063 return pVCpu->cpum.s.Guest.esp;
1064}
1065
1066
1067VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1068{
1069 return pVCpu->cpum.s.Guest.ebp;
1070}
1071
1072
1073VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1074{
1075 return pVCpu->cpum.s.Guest.eflags.u32;
1076}
1077
1078
1079VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1080{
1081 switch (iReg)
1082 {
1083 case DISCREG_CR0:
1084 *pValue = pVCpu->cpum.s.Guest.cr0;
1085 break;
1086
1087 case DISCREG_CR2:
1088 *pValue = pVCpu->cpum.s.Guest.cr2;
1089 break;
1090
1091 case DISCREG_CR3:
1092 *pValue = pVCpu->cpum.s.Guest.cr3;
1093 break;
1094
1095 case DISCREG_CR4:
1096 *pValue = pVCpu->cpum.s.Guest.cr4;
1097 break;
1098
1099 case DISCREG_CR8:
1100 {
1101 uint8_t u8Tpr;
1102 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1103 if (RT_FAILURE(rc))
1104 {
1105 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1106 *pValue = 0;
1107 return rc;
1108 }
1109 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
1110 break;
1111 }
1112
1113 default:
1114 return VERR_INVALID_PARAMETER;
1115 }
1116 return VINF_SUCCESS;
1117}
1118
1119
1120VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1121{
1122 return pVCpu->cpum.s.Guest.dr[0];
1123}
1124
1125
1126VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1127{
1128 return pVCpu->cpum.s.Guest.dr[1];
1129}
1130
1131
1132VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1133{
1134 return pVCpu->cpum.s.Guest.dr[2];
1135}
1136
1137
1138VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1139{
1140 return pVCpu->cpum.s.Guest.dr[3];
1141}
1142
1143
1144VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1145{
1146 return pVCpu->cpum.s.Guest.dr[6];
1147}
1148
1149
1150VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1151{
1152 return pVCpu->cpum.s.Guest.dr[7];
1153}
1154
1155
1156VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1157{
1158 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1159 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1160 if (iReg == 4 || iReg == 5)
1161 iReg += 2;
1162 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1163 return VINF_SUCCESS;
1164}
1165
1166
1167VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1168{
1169 return pVCpu->cpum.s.Guest.msrEFER;
1170}
1171
1172
1173/**
1174 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1175 *
1176 * @returns Pointer to the leaf if found, NULL if not.
1177 *
1178 * @param pVM The cross context VM structure.
1179 * @param uLeaf The leaf to get.
1180 */
1181PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1182{
1183 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1184 if (iEnd)
1185 {
1186 unsigned iStart = 0;
1187 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1188 for (;;)
1189 {
1190 unsigned i = iStart + (iEnd - iStart) / 2U;
1191 if (uLeaf < paLeaves[i].uLeaf)
1192 {
1193 if (i <= iStart)
1194 return NULL;
1195 iEnd = i;
1196 }
1197 else if (uLeaf > paLeaves[i].uLeaf)
1198 {
1199 i += 1;
1200 if (i >= iEnd)
1201 return NULL;
1202 iStart = i;
1203 }
1204 else
1205 {
1206 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1207 return &paLeaves[i];
1208
1209 /* This shouldn't normally happen. But in case the it does due
1210 to user configuration overrids or something, just return the
1211 first sub-leaf. */
1212 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1213 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1214 while ( paLeaves[i].uSubLeaf != 0
1215 && i > 0
1216 && uLeaf == paLeaves[i - 1].uLeaf)
1217 i--;
1218 return &paLeaves[i];
1219 }
1220 }
1221 }
1222
1223 return NULL;
1224}
1225
1226
1227/**
1228 * Looks up a CPUID leaf in the CPUID leaf array.
1229 *
1230 * @returns Pointer to the leaf if found, NULL if not.
1231 *
1232 * @param pVM The cross context VM structure.
1233 * @param uLeaf The leaf to get.
1234 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1235 * isn't.
1236 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1237 */
1238PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1239{
1240 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1241 if (iEnd)
1242 {
1243 unsigned iStart = 0;
1244 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1245 for (;;)
1246 {
1247 unsigned i = iStart + (iEnd - iStart) / 2U;
1248 if (uLeaf < paLeaves[i].uLeaf)
1249 {
1250 if (i <= iStart)
1251 return NULL;
1252 iEnd = i;
1253 }
1254 else if (uLeaf > paLeaves[i].uLeaf)
1255 {
1256 i += 1;
1257 if (i >= iEnd)
1258 return NULL;
1259 iStart = i;
1260 }
1261 else
1262 {
1263 uSubLeaf &= paLeaves[i].fSubLeafMask;
1264 if (uSubLeaf == paLeaves[i].uSubLeaf)
1265 *pfExactSubLeafHit = true;
1266 else
1267 {
1268 /* Find the right subleaf. We return the last one before
1269 uSubLeaf if we don't find an exact match. */
1270 if (uSubLeaf < paLeaves[i].uSubLeaf)
1271 while ( i > 0
1272 && uLeaf == paLeaves[i - 1].uLeaf
1273 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1274 i--;
1275 else
1276 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1277 && uLeaf == paLeaves[i + 1].uLeaf
1278 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1279 i++;
1280 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1281 }
1282 return &paLeaves[i];
1283 }
1284 }
1285 }
1286
1287 *pfExactSubLeafHit = false;
1288 return NULL;
1289}
1290
1291
1292/**
1293 * Gets a CPUID leaf.
1294 *
1295 * @param pVCpu The cross context virtual CPU structure.
1296 * @param uLeaf The CPUID leaf to get.
1297 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1298 * @param pEax Where to store the EAX value.
1299 * @param pEbx Where to store the EBX value.
1300 * @param pEcx Where to store the ECX value.
1301 * @param pEdx Where to store the EDX value.
1302 */
1303VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1304 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1305{
1306 bool fExactSubLeafHit;
1307 PVM pVM = pVCpu->CTX_SUFF(pVM);
1308 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1309 if (pLeaf)
1310 {
1311 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1312 if (fExactSubLeafHit)
1313 {
1314 *pEax = pLeaf->uEax;
1315 *pEbx = pLeaf->uEbx;
1316 *pEcx = pLeaf->uEcx;
1317 *pEdx = pLeaf->uEdx;
1318
1319 /*
1320 * Deal with CPU specific information.
1321 */
1322 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
1323 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
1324 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
1325 {
1326 if (uLeaf == 1)
1327 {
1328 /* EBX: Bits 31-24: Initial APIC ID. */
1329 Assert(pVCpu->idCpu <= 255);
1330 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1331 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1332
1333 /* EDX: Bit 9: AND with APICBASE.EN. */
1334 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1335 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1336
1337 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1338 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1339 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1340 }
1341 else if (uLeaf == 0xb)
1342 {
1343 /* EDX: Initial extended APIC ID. */
1344 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1345 *pEdx = pVCpu->idCpu;
1346 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
1347 }
1348 else if (uLeaf == UINT32_C(0x8000001e))
1349 {
1350 /* EAX: Initial extended APIC ID. */
1351 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1352 *pEax = pVCpu->idCpu;
1353 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
1354 }
1355 else if (uLeaf == UINT32_C(0x80000001))
1356 {
1357 /* EDX: Bit 9: AND with APICBASE.EN. */
1358 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
1359 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1360 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
1361 }
1362 else
1363 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1364 }
1365 }
1366 /*
1367 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1368 * them here, but we do the best we can here...
1369 */
1370 else
1371 {
1372 *pEax = *pEbx = *pEcx = *pEdx = 0;
1373 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1374 {
1375 *pEcx = uSubLeaf & 0xff;
1376 *pEdx = pVCpu->idCpu;
1377 }
1378 }
1379 }
1380 else
1381 {
1382 /*
1383 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1384 */
1385 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1386 {
1387 default:
1388 AssertFailed();
1389 RT_FALL_THRU();
1390 case CPUMUNKNOWNCPUID_DEFAULTS:
1391 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1392 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1393 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1394 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1395 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1396 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1397 break;
1398 case CPUMUNKNOWNCPUID_PASSTHRU:
1399 *pEax = uLeaf;
1400 *pEbx = 0;
1401 *pEcx = uSubLeaf;
1402 *pEdx = 0;
1403 break;
1404 }
1405 }
1406 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1407}
1408
1409
1410/**
1411 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1412 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1413 *
1414 * @returns Previous value.
1415 * @param pVCpu The cross context virtual CPU structure to make the
1416 * change on. Usually the calling EMT.
1417 * @param fVisible Whether to make it visible (true) or hide it (false).
1418 *
1419 * @remarks This is "VMMDECL" so that it still links with
1420 * the old APIC code which is in VBoxDD2 and not in
1421 * the VMM module.
1422 */
1423VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1424{
1425 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1426 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1427
1428#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1429 /*
1430 * Patch manager saved state legacy pain.
1431 */
1432 PVM pVM = pVCpu->CTX_SUFF(pVM);
1433 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1434 if (pLeaf)
1435 {
1436 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1437 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;
1438 else
1439 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;
1440 }
1441
1442 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1443 if (pLeaf)
1444 {
1445 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1446 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;
1447 else
1448 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1449 }
1450#endif
1451
1452 return fOld;
1453}
1454
1455
1456/**
1457 * Gets the host CPU vendor.
1458 *
1459 * @returns CPU vendor.
1460 * @param pVM The cross context VM structure.
1461 */
1462VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1463{
1464 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1465}
1466
1467
1468/**
1469 * Gets the CPU vendor.
1470 *
1471 * @returns CPU vendor.
1472 * @param pVM The cross context VM structure.
1473 */
1474VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1475{
1476 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1477}
1478
1479
1480VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1481{
1482 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1483 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1484}
1485
1486
1487VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1488{
1489 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1490 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1491}
1492
1493
1494VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1495{
1496 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1497 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1498}
1499
1500
1501VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1502{
1503 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1504 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1505}
1506
1507
1508VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1509{
1510 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1511 return VINF_SUCCESS; /* No need to recalc. */
1512}
1513
1514
1515VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1516{
1517 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1518 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1519}
1520
1521
1522VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1523{
1524 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1525 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1526 if (iReg == 4 || iReg == 5)
1527 iReg += 2;
1528 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1529 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1530}
1531
1532
1533/**
1534 * Recalculates the hypervisor DRx register values based on current guest
1535 * registers and DBGF breakpoints, updating changed registers depending on the
1536 * context.
1537 *
1538 * This is called whenever a guest DRx register is modified (any context) and
1539 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1540 *
1541 * In raw-mode context this function will reload any (hyper) DRx registers which
1542 * comes out with a different value. It may also have to save the host debug
1543 * registers if that haven't been done already. In this context though, we'll
1544 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1545 * are only important when breakpoints are actually enabled.
1546 *
1547 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1548 * reloaded by the HM code if it changes. Further more, we will only use the
1549 * combined register set when the VBox debugger is actually using hardware BPs,
1550 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1551 * concern us here).
1552 *
1553 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1554 * all the time.
1555 *
1556 * @returns VINF_SUCCESS.
1557 * @param pVCpu The cross context virtual CPU structure.
1558 * @param iGstReg The guest debug register number that was modified.
1559 * UINT8_MAX if not guest register.
1560 * @param fForceHyper Used in HM to force hyper registers because of single
1561 * stepping.
1562 */
1563VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1564{
1565 PVM pVM = pVCpu->CTX_SUFF(pVM);
1566#ifndef IN_RING0
1567 RT_NOREF_PV(iGstReg);
1568#endif
1569
1570 /*
1571 * Compare the DR7s first.
1572 *
1573 * We only care about the enabled flags. GD is virtualized when we
1574 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1575 * always have the LE and GE bits set, so no need to check and disable
1576 * stuff if they're cleared like we have to for the guest DR7.
1577 */
1578 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1579 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1580 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1581 uGstDr7 = 0;
1582 else if (!(uGstDr7 & X86_DR7_LE))
1583 uGstDr7 &= ~X86_DR7_LE_ALL;
1584 else if (!(uGstDr7 & X86_DR7_GE))
1585 uGstDr7 &= ~X86_DR7_GE_ALL;
1586
1587 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1588
1589#ifdef IN_RING0
1590 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1591 fForceHyper = true;
1592#endif
1593 if ( (!VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7))
1594 & X86_DR7_ENABLED_MASK)
1595 {
1596 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1597#ifdef IN_RC
1598 bool const fRawModeEnabled = true;
1599#elif defined(IN_RING3)
1600 bool const fRawModeEnabled = VM_IS_RAW_MODE_ENABLED(pVM);
1601#endif
1602
1603 /*
1604 * Ok, something is enabled. Recalc each of the breakpoints, taking
1605 * the VM debugger ones of the guest ones. In raw-mode context we will
1606 * not allow breakpoints with values inside the hypervisor area.
1607 */
1608 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1609
1610 /* bp 0 */
1611 RTGCUINTREG uNewDr0;
1612 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1613 {
1614 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1615 uNewDr0 = DBGFBpGetDR0(pVM);
1616 }
1617 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1618 {
1619 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1620#ifndef IN_RING0
1621 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1622 uNewDr0 = 0;
1623 else
1624#endif
1625 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1626 }
1627 else
1628 uNewDr0 = 0;
1629
1630 /* bp 1 */
1631 RTGCUINTREG uNewDr1;
1632 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1633 {
1634 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1635 uNewDr1 = DBGFBpGetDR1(pVM);
1636 }
1637 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1638 {
1639 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1640#ifndef IN_RING0
1641 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1642 uNewDr1 = 0;
1643 else
1644#endif
1645 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1646 }
1647 else
1648 uNewDr1 = 0;
1649
1650 /* bp 2 */
1651 RTGCUINTREG uNewDr2;
1652 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1653 {
1654 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1655 uNewDr2 = DBGFBpGetDR2(pVM);
1656 }
1657 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1658 {
1659 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1660#ifndef IN_RING0
1661 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1662 uNewDr2 = 0;
1663 else
1664#endif
1665 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1666 }
1667 else
1668 uNewDr2 = 0;
1669
1670 /* bp 3 */
1671 RTGCUINTREG uNewDr3;
1672 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1673 {
1674 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1675 uNewDr3 = DBGFBpGetDR3(pVM);
1676 }
1677 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1678 {
1679 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1680#ifndef IN_RING0
1681 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1682 uNewDr3 = 0;
1683 else
1684#endif
1685 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1686 }
1687 else
1688 uNewDr3 = 0;
1689
1690 /*
1691 * Apply the updates.
1692 */
1693#ifdef IN_RC
1694 /* Make sure to save host registers first. */
1695 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1696 {
1697 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1698 {
1699 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1700 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1701 }
1702 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1703 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1704 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1705 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1706 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1707
1708 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1709 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1710 ASMSetDR0(uNewDr0);
1711 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1712 ASMSetDR1(uNewDr1);
1713 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1714 ASMSetDR2(uNewDr2);
1715 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1716 ASMSetDR3(uNewDr3);
1717 ASMSetDR6(X86_DR6_INIT_VAL);
1718 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
1719 ASMSetDR7(uNewDr7);
1720 }
1721 else
1722#endif
1723 {
1724 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1725 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1726 CPUMSetHyperDR3(pVCpu, uNewDr3);
1727 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1728 CPUMSetHyperDR2(pVCpu, uNewDr2);
1729 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1730 CPUMSetHyperDR1(pVCpu, uNewDr1);
1731 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1732 CPUMSetHyperDR0(pVCpu, uNewDr0);
1733 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1734 CPUMSetHyperDR7(pVCpu, uNewDr7);
1735 }
1736 }
1737#ifdef IN_RING0
1738 else if (CPUMIsGuestDebugStateActive(pVCpu))
1739 {
1740 /*
1741 * Reload the register that was modified. Normally this won't happen
1742 * as we won't intercept DRx writes when not having the hyper debug
1743 * state loaded, but in case we do for some reason we'll simply deal
1744 * with it.
1745 */
1746 switch (iGstReg)
1747 {
1748 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1749 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1750 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1751 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1752 default:
1753 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1754 }
1755 }
1756#endif
1757 else
1758 {
1759 /*
1760 * No active debug state any more. In raw-mode this means we have to
1761 * make sure DR7 has everything disabled now, if we armed it already.
1762 * In ring-0 we might end up here when just single stepping.
1763 */
1764#if defined(IN_RC) || defined(IN_RING0)
1765 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1766 {
1767# ifdef IN_RC
1768 ASMSetDR7(X86_DR7_INIT_VAL);
1769# endif
1770 if (pVCpu->cpum.s.Hyper.dr[0])
1771 ASMSetDR0(0);
1772 if (pVCpu->cpum.s.Hyper.dr[1])
1773 ASMSetDR1(0);
1774 if (pVCpu->cpum.s.Hyper.dr[2])
1775 ASMSetDR2(0);
1776 if (pVCpu->cpum.s.Hyper.dr[3])
1777 ASMSetDR3(0);
1778 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1779 }
1780#endif
1781 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1782
1783 /* Clear all the registers. */
1784 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1785 pVCpu->cpum.s.Hyper.dr[3] = 0;
1786 pVCpu->cpum.s.Hyper.dr[2] = 0;
1787 pVCpu->cpum.s.Hyper.dr[1] = 0;
1788 pVCpu->cpum.s.Hyper.dr[0] = 0;
1789
1790 }
1791 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1792 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1793 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1794 pVCpu->cpum.s.Hyper.dr[7]));
1795
1796 return VINF_SUCCESS;
1797}
1798
1799
1800/**
1801 * Set the guest XCR0 register.
1802 *
1803 * Will load additional state if the FPU state is already loaded (in ring-0 &
1804 * raw-mode context).
1805 *
1806 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1807 * value.
1808 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1809 * @param uNewValue The new value.
1810 * @thread EMT(pVCpu)
1811 */
1812VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
1813{
1814 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1815 /* The X87 bit cannot be cleared. */
1816 && (uNewValue & XSAVE_C_X87)
1817 /* AVX requires SSE. */
1818 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1819 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1820 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1821 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1822 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1823 )
1824 {
1825 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1826
1827 /* If more state components are enabled, we need to take care to load
1828 them if the FPU/SSE state is already loaded. May otherwise leak
1829 host state to the guest. */
1830 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1831 if (fNewComponents)
1832 {
1833#if defined(IN_RING0) || defined(IN_RC)
1834 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1835 {
1836 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1837 /* Adding more components. */
1838 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1839 else
1840 {
1841 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1842 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1843 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1844 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1845 }
1846 }
1847#endif
1848 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1849 }
1850 return VINF_SUCCESS;
1851 }
1852 return VERR_CPUM_RAISE_GP_0;
1853}
1854
1855
1856/**
1857 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1858 *
1859 * @returns true if in real mode, otherwise false.
1860 * @param pVCpu The cross context virtual CPU structure.
1861 */
1862VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1863{
1864 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1865}
1866
1867
1868/**
1869 * Tests if the guest has the Page Size Extension enabled (PSE).
1870 *
1871 * @returns true if in real mode, otherwise false.
1872 * @param pVCpu The cross context virtual CPU structure.
1873 */
1874VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1875{
1876 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1877 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1878}
1879
1880
1881/**
1882 * Tests if the guest has the paging enabled (PG).
1883 *
1884 * @returns true if in real mode, otherwise false.
1885 * @param pVCpu The cross context virtual CPU structure.
1886 */
1887VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1888{
1889 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1890}
1891
1892
1893/**
1894 * Tests if the guest has the paging enabled (PG).
1895 *
1896 * @returns true if in real mode, otherwise false.
1897 * @param pVCpu The cross context virtual CPU structure.
1898 */
1899VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1900{
1901 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1902}
1903
1904
1905/**
1906 * Tests if the guest is running in real mode or not.
1907 *
1908 * @returns true if in real mode, otherwise false.
1909 * @param pVCpu The cross context virtual CPU structure.
1910 */
1911VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1912{
1913 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1914}
1915
1916
1917/**
1918 * Tests if the guest is running in real or virtual 8086 mode.
1919 *
1920 * @returns @c true if it is, @c false if not.
1921 * @param pVCpu The cross context virtual CPU structure.
1922 */
1923VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
1924{
1925 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1926 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1927}
1928
1929
1930/**
1931 * Tests if the guest is running in protected or not.
1932 *
1933 * @returns true if in protected mode, otherwise false.
1934 * @param pVCpu The cross context virtual CPU structure.
1935 */
1936VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1937{
1938 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1939}
1940
1941
1942/**
1943 * Tests if the guest is running in paged protected or not.
1944 *
1945 * @returns true if in paged protected mode, otherwise false.
1946 * @param pVCpu The cross context virtual CPU structure.
1947 */
1948VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1949{
1950 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1951}
1952
1953
1954/**
1955 * Tests if the guest is running in long mode or not.
1956 *
1957 * @returns true if in long mode, otherwise false.
1958 * @param pVCpu The cross context virtual CPU structure.
1959 */
1960VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1961{
1962 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1963}
1964
1965
1966/**
1967 * Tests if the guest is running in PAE mode or not.
1968 *
1969 * @returns true if in PAE mode, otherwise false.
1970 * @param pVCpu The cross context virtual CPU structure.
1971 */
1972VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1973{
1974 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1975 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1976 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1977 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1978 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1979}
1980
1981
1982/**
1983 * Tests if the guest is running in 64 bits mode or not.
1984 *
1985 * @returns true if in 64 bits protected mode, otherwise false.
1986 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1987 */
1988VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1989{
1990 if (!CPUMIsGuestInLongMode(pVCpu))
1991 return false;
1992 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1993 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1994}
1995
1996
1997/**
1998 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1999 * registers.
2000 *
2001 * @returns true if in 64 bits protected mode, otherwise false.
2002 * @param pCtx Pointer to the current guest CPU context.
2003 */
2004VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2005{
2006 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2007}
2008
2009#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2010
2011/**
2012 *
2013 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2014 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2015 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2016 */
2017VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2018{
2019 return pVCpu->cpum.s.fRawEntered;
2020}
2021
2022/**
2023 * Transforms the guest CPU state to raw-ring mode.
2024 *
2025 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2026 *
2027 * @returns VBox status code. (recompiler failure)
2028 * @param pVCpu The cross context virtual CPU structure.
2029 * @see @ref pg_raw
2030 */
2031VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2032{
2033 PVM pVM = pVCpu->CTX_SUFF(pVM);
2034
2035 Assert(!pVCpu->cpum.s.fRawEntered);
2036 Assert(!pVCpu->cpum.s.fRemEntered);
2037 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2038
2039 /*
2040 * Are we in Ring-0?
2041 */
2042 if ( pCtx->ss.Sel
2043 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2044 && !pCtx->eflags.Bits.u1VM)
2045 {
2046 /*
2047 * Enter execution mode.
2048 */
2049 PATMRawEnter(pVM, pCtx);
2050
2051 /*
2052 * Set CPL to Ring-1.
2053 */
2054 pCtx->ss.Sel |= 1;
2055 if ( pCtx->cs.Sel
2056 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2057 pCtx->cs.Sel |= 1;
2058 }
2059 else
2060 {
2061# ifdef VBOX_WITH_RAW_RING1
2062 if ( EMIsRawRing1Enabled(pVM)
2063 && !pCtx->eflags.Bits.u1VM
2064 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2065 {
2066 /* Set CPL to Ring-2. */
2067 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2068 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2069 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2070 }
2071# else
2072 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2073 ("ring-1 code not supported\n"));
2074# endif
2075 /*
2076 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2077 */
2078 PATMRawEnter(pVM, pCtx);
2079 }
2080
2081 /*
2082 * Assert sanity.
2083 */
2084 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2085 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2086 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2087 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE));
2088
2089 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2090
2091 pVCpu->cpum.s.fRawEntered = true;
2092 return VINF_SUCCESS;
2093}
2094
2095
2096/**
2097 * Transforms the guest CPU state from raw-ring mode to correct values.
2098 *
2099 * This function will change any selector registers with DPL=1 to DPL=0.
2100 *
2101 * @returns Adjusted rc.
2102 * @param pVCpu The cross context virtual CPU structure.
2103 * @param rc Raw mode return code
2104 * @see @ref pg_raw
2105 */
2106VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2107{
2108 PVM pVM = pVCpu->CTX_SUFF(pVM);
2109
2110 /*
2111 * Don't leave if we've already left (in RC).
2112 */
2113 Assert(!pVCpu->cpum.s.fRemEntered);
2114 if (!pVCpu->cpum.s.fRawEntered)
2115 return rc;
2116 pVCpu->cpum.s.fRawEntered = false;
2117
2118 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2119 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2120 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2121 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2122
2123 /*
2124 * Are we executing in raw ring-1?
2125 */
2126 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2127 && !pCtx->eflags.Bits.u1VM)
2128 {
2129 /*
2130 * Leave execution mode.
2131 */
2132 PATMRawLeave(pVM, pCtx, rc);
2133 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2134 /** @todo See what happens if we remove this. */
2135 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2136 pCtx->ds.Sel &= ~X86_SEL_RPL;
2137 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2138 pCtx->es.Sel &= ~X86_SEL_RPL;
2139 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2140 pCtx->fs.Sel &= ~X86_SEL_RPL;
2141 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2142 pCtx->gs.Sel &= ~X86_SEL_RPL;
2143
2144 /*
2145 * Ring-1 selector => Ring-0.
2146 */
2147 pCtx->ss.Sel &= ~X86_SEL_RPL;
2148 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2149 pCtx->cs.Sel &= ~X86_SEL_RPL;
2150 }
2151 else
2152 {
2153 /*
2154 * PATM is taking care of the IOPL and IF flags for us.
2155 */
2156 PATMRawLeave(pVM, pCtx, rc);
2157 if (!pCtx->eflags.Bits.u1VM)
2158 {
2159# ifdef VBOX_WITH_RAW_RING1
2160 if ( EMIsRawRing1Enabled(pVM)
2161 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2162 {
2163 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2164 /** @todo See what happens if we remove this. */
2165 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2166 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2167 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2168 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2169 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2170 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2171 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2172 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2173
2174 /*
2175 * Ring-2 selector => Ring-1.
2176 */
2177 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2178 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2179 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2180 }
2181 else
2182 {
2183# endif
2184 /** @todo See what happens if we remove this. */
2185 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2186 pCtx->ds.Sel &= ~X86_SEL_RPL;
2187 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2188 pCtx->es.Sel &= ~X86_SEL_RPL;
2189 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2190 pCtx->fs.Sel &= ~X86_SEL_RPL;
2191 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2192 pCtx->gs.Sel &= ~X86_SEL_RPL;
2193# ifdef VBOX_WITH_RAW_RING1
2194 }
2195# endif
2196 }
2197 }
2198
2199 return rc;
2200}
2201
2202#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2203
2204/**
2205 * Updates the EFLAGS while we're in raw-mode.
2206 *
2207 * @param pVCpu The cross context virtual CPU structure.
2208 * @param fEfl The new EFLAGS value.
2209 */
2210VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2211{
2212#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2213 if (pVCpu->cpum.s.fRawEntered)
2214 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2215 else
2216#endif
2217 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2218}
2219
2220
2221/**
2222 * Gets the EFLAGS while we're in raw-mode.
2223 *
2224 * @returns The eflags.
2225 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2226 */
2227VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2228{
2229#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2230 if (pVCpu->cpum.s.fRawEntered)
2231 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2232#endif
2233 return pVCpu->cpum.s.Guest.eflags.u32;
2234}
2235
2236
2237/**
2238 * Sets the specified changed flags (CPUM_CHANGED_*).
2239 *
2240 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2241 * @param fChangedAdd The changed flags to add.
2242 */
2243VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2244{
2245 pVCpu->cpum.s.fChanged |= fChangedAdd;
2246}
2247
2248
2249/**
2250 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2251 *
2252 * @returns true if supported.
2253 * @returns false if not supported.
2254 * @param pVM The cross context VM structure.
2255 */
2256VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2257{
2258 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2259}
2260
2261
2262/**
2263 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2264 * @returns true if used.
2265 * @returns false if not used.
2266 * @param pVM The cross context VM structure.
2267 */
2268VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2269{
2270 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2271}
2272
2273
2274/**
2275 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2276 * @returns true if used.
2277 * @returns false if not used.
2278 * @param pVM The cross context VM structure.
2279 */
2280VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2281{
2282 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2283}
2284
2285#ifdef IN_RC
2286
2287/**
2288 * Lazily sync in the FPU/XMM state.
2289 *
2290 * @returns VBox status code.
2291 * @param pVCpu The cross context virtual CPU structure.
2292 */
2293VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2294{
2295 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2296}
2297
2298#endif /* !IN_RC */
2299
2300/**
2301 * Checks if we activated the FPU/XMM state of the guest OS.
2302 *
2303 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
2304 * time we'll be executing guest code, so it may return true for 64-on-32 when
2305 * we still haven't actually loaded the FPU status, just scheduled it to be
2306 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
2307 *
2308 * @returns true / false.
2309 * @param pVCpu The cross context virtual CPU structure.
2310 */
2311VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2312{
2313 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
2314}
2315
2316
2317/**
2318 * Checks if we've really loaded the FPU/XMM state of the guest OS.
2319 *
2320 * @returns true / false.
2321 * @param pVCpu The cross context virtual CPU structure.
2322 */
2323VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
2324{
2325 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
2326}
2327
2328
2329/**
2330 * Checks if we saved the FPU/XMM state of the host OS.
2331 *
2332 * @returns true / false.
2333 * @param pVCpu The cross context virtual CPU structure.
2334 */
2335VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
2336{
2337 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
2338}
2339
2340
2341/**
2342 * Checks if the guest debug state is active.
2343 *
2344 * @returns boolean
2345 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2346 */
2347VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2348{
2349 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2350}
2351
2352
2353/**
2354 * Checks if the guest debug state is to be made active during the world-switch
2355 * (currently only used for the 32->64 switcher case).
2356 *
2357 * @returns boolean
2358 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2359 */
2360VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2361{
2362 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2363}
2364
2365
2366/**
2367 * Checks if the hyper debug state is active.
2368 *
2369 * @returns boolean
2370 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2371 */
2372VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2373{
2374 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2375}
2376
2377
2378/**
2379 * Checks if the hyper debug state is to be made active during the world-switch
2380 * (currently only used for the 32->64 switcher case).
2381 *
2382 * @returns boolean
2383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2384 */
2385VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2386{
2387 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2388}
2389
2390
2391/**
2392 * Mark the guest's debug state as inactive.
2393 *
2394 * @returns boolean
2395 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2396 * @todo This API doesn't make sense any more.
2397 */
2398VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2399{
2400 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2401 NOREF(pVCpu);
2402}
2403
2404
2405/**
2406 * Get the current privilege level of the guest.
2407 *
2408 * @returns CPL
2409 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2410 */
2411VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2412{
2413 /*
2414 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2415 *
2416 * Note! We used to check CS.DPL here, assuming it was always equal to
2417 * CPL even if a conforming segment was loaded. But this turned out to
2418 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2419 * during install after a far call to ring 2 with VT-x. Then on newer
2420 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2421 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2422 *
2423 * So, forget CS.DPL, always use SS.DPL.
2424 *
2425 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2426 * isn't necessarily equal if the segment is conforming.
2427 * See section 4.11.1 in the AMD manual.
2428 *
2429 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2430 * right after real->prot mode switch and when in V8086 mode? That
2431 * section says the RPL specified in a direct transfere (call, jmp,
2432 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2433 * it would be impossible for an exception handle or the iret
2434 * instruction to figure out whether SS:ESP are part of the frame
2435 * or not. VBox or qemu bug must've lead to this misconception.
2436 *
2437 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2438 * selector into SS with an RPL other than the CPL when CPL != 3 and
2439 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2440 * RPL = CPL. Weird.
2441 */
2442 uint32_t uCpl;
2443 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2444 {
2445 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2446 {
2447 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2448 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2449 else
2450 {
2451 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2452#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2453# ifdef VBOX_WITH_RAW_RING1
2454 if (pVCpu->cpum.s.fRawEntered)
2455 {
2456 if ( uCpl == 2
2457 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2458 uCpl = 1;
2459 else if (uCpl == 1)
2460 uCpl = 0;
2461 }
2462 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2463# else
2464 if (uCpl == 1)
2465 uCpl = 0;
2466# endif
2467#endif
2468 }
2469 }
2470 else
2471 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2472 }
2473 else
2474 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2475 return uCpl;
2476}
2477
2478
2479/**
2480 * Gets the current guest CPU mode.
2481 *
2482 * If paging mode is what you need, check out PGMGetGuestMode().
2483 *
2484 * @returns The CPU mode.
2485 * @param pVCpu The cross context virtual CPU structure.
2486 */
2487VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2488{
2489 CPUMMODE enmMode;
2490 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2491 enmMode = CPUMMODE_REAL;
2492 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2493 enmMode = CPUMMODE_PROTECTED;
2494 else
2495 enmMode = CPUMMODE_LONG;
2496
2497 return enmMode;
2498}
2499
2500
2501/**
2502 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2503 *
2504 * @returns 16, 32 or 64.
2505 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2506 */
2507VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2508{
2509 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2510 return 16;
2511
2512 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2513 {
2514 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2515 return 16;
2516 }
2517
2518 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2519 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2520 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2521 return 64;
2522
2523 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2524 return 32;
2525
2526 return 16;
2527}
2528
2529
2530VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2531{
2532 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2533 return DISCPUMODE_16BIT;
2534
2535 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2536 {
2537 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2538 return DISCPUMODE_16BIT;
2539 }
2540
2541 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2542 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2543 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2544 return DISCPUMODE_64BIT;
2545
2546 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2547 return DISCPUMODE_32BIT;
2548
2549 return DISCPUMODE_16BIT;
2550}
2551
2552
2553/**
2554 * Gets the guest MXCSR_MASK value.
2555 *
2556 * This does not access the x87 state, but the value we determined at VM
2557 * initialization.
2558 *
2559 * @returns MXCSR mask.
2560 * @param pVM The cross context VM structure.
2561 */
2562VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
2563{
2564 return pVM->cpum.s.GuestInfo.fMxCsrMask;
2565}
2566
2567
2568/**
2569 * Checks whether the SVM nested-guest is in a state to receive physical (APIC)
2570 * interrupts.
2571 *
2572 * @returns VBox status code.
2573 * @retval true if it's ready, false otherwise.
2574 *
2575 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2576 * @param pCtx The guest-CPU context.
2577 *
2578 * @sa hmR0SvmCanNstGstTakePhysIntr.
2579 */
2580VMM_INT_DECL(bool) CPUMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)
2581{
2582 /** @todo Optimization: Avoid this function call and use a pointer to the
2583 * relevant eflags instead (setup during VMRUN instruction emulation). */
2584#ifdef IN_RC
2585 RT_NOREF2(pVCpu, pCtx);
2586 AssertReleaseFailedReturn(false);
2587#else
2588 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2589 Assert(pCtx->hwvirt.fGif);
2590
2591 X86EFLAGS fEFlags;
2592 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2593 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2594 else
2595 fEFlags.u = pCtx->eflags.u;
2596
2597 return fEFlags.Bits.u1IF;
2598#endif
2599}
2600
2601
2602/**
2603 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2604 * for injection by VMRUN instruction) interrupts.
2605 *
2606 * @returns VBox status code.
2607 * @retval true if it's ready, false otherwise.
2608 *
2609 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2610 * @param pCtx The guest-CPU context.
2611 */
2612VMM_INT_DECL(bool) CPUMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)
2613{
2614#ifdef IN_RC
2615 RT_NOREF2(pVCpu, pCtx);
2616 AssertReleaseFailedReturn(false);
2617#else
2618 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2619 Assert(pCtx->hwvirt.fGif);
2620
2621 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2622 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2623 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2624 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2625 return false;
2626
2627 X86EFLAGS fEFlags;
2628 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2629 fEFlags.u = pCtx->eflags.u;
2630 else
2631 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2632
2633 return fEFlags.Bits.u1IF;
2634#endif
2635}
2636
2637
2638/**
2639 * Gets the pending SVM nested-guest interrupt.
2640 *
2641 * @returns The nested-guest interrupt to inject.
2642 * @param pCtx The guest-CPU context.
2643 */
2644VMM_INT_DECL(uint8_t) CPUMGetSvmNstGstInterrupt(PCCPUMCTX pCtx)
2645{
2646#ifdef IN_RC
2647 RT_NOREF(pCtx);
2648 AssertReleaseFailedReturn(0);
2649#else
2650 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2651 return pVmcbCtrl->IntCtrl.n.u8VIntrVector;
2652#endif
2653}
2654
2655
2656/**
2657 * Gets the SVM nested-guest virtual GIF.
2658 *
2659 * @returns The nested-guest virtual GIF.
2660 * @param pCtx The guest-CPU context.
2661 */
2662VMM_INT_DECL(bool) CPUMGetSvmNstGstVGif(PCCPUMCTX pCtx)
2663{
2664#ifdef IN_RC
2665 RT_NOREF(pCtx);
2666 AssertReleaseFailedReturn(false);
2667#else
2668 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2669 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2670 if (pVmcbIntCtrl->n.u1VGifEnable)
2671 return pVmcbIntCtrl->n.u1VGif;
2672 return true;
2673#endif
2674}
2675
2676
2677/**
2678 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2679 *
2680 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2681 * @param pCtx The guest-CPU context.
2682 */
2683VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx)
2684{
2685 /*
2686 * Reload the guest's "host state".
2687 */
2688 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2689 pCtx->es = pHostState->es;
2690 pCtx->cs = pHostState->cs;
2691 pCtx->ss = pHostState->ss;
2692 pCtx->ds = pHostState->ds;
2693 pCtx->gdtr = pHostState->gdtr;
2694 pCtx->idtr = pHostState->idtr;
2695 CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2696 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2697 pCtx->cr3 = pHostState->uCr3;
2698 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2699 pCtx->rflags = pHostState->rflags;
2700 pCtx->rflags.Bits.u1VM = 0;
2701 pCtx->rip = pHostState->uRip;
2702 pCtx->rsp = pHostState->uRsp;
2703 pCtx->rax = pHostState->uRax;
2704 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2705 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2706 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2707
2708 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2709 * raise \#GP(0) in the guest. */
2710
2711 /** @todo check the loaded host-state for consistency. Figure out what
2712 * exactly this involves? */
2713}
2714
2715
2716/**
2717 * Saves the host-state to the host-state save area as part of a VMRUN.
2718 *
2719 * @param pCtx The guest-CPU context.
2720 * @param cbInstr The length of the VMRUN instruction in bytes.
2721 */
2722VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2723{
2724 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2725 pHostState->es = pCtx->es;
2726 pHostState->cs = pCtx->cs;
2727 pHostState->ss = pCtx->ss;
2728 pHostState->ds = pCtx->ds;
2729 pHostState->gdtr = pCtx->gdtr;
2730 pHostState->idtr = pCtx->idtr;
2731 pHostState->uEferMsr = pCtx->msrEFER;
2732 pHostState->uCr0 = pCtx->cr0;
2733 pHostState->uCr3 = pCtx->cr3;
2734 pHostState->uCr4 = pCtx->cr4;
2735 pHostState->rflags = pCtx->rflags;
2736 pHostState->uRip = pCtx->rip + cbInstr;
2737 pHostState->uRsp = pCtx->rsp;
2738 pHostState->uRax = pCtx->rax;
2739}
2740
2741
2742/**
2743 * Applies the TSC offset of a nested-guest if any and returns the new TSC
2744 * value for the guest (or nested-guest).
2745 *
2746 * @returns The TSC offset after applying any nested-guest TSC offset.
2747 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2748 * @param uTicks The guest TSC.
2749 *
2750 * @sa HMSvmNstGstApplyTscOffset.
2751 */
2752VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks)
2753{
2754#ifndef IN_RC
2755 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2756 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2757 {
2758 if (!pCtx->hwvirt.svm.fHMCachedVmcb)
2759 {
2760 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2761 return uTicks + pVmcb->ctrl.u64TSCOffset;
2762 }
2763 return HMSvmNstGstApplyTscOffset(pVCpu, uTicks);
2764 }
2765
2766 /** @todo Intel. */
2767#else
2768 RT_NOREF(pVCpu);
2769#endif
2770 return uTicks;
2771}
2772
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette