VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 72488

Last change on this file since 72488 was 72488, checked in by vboxsync, 7 years ago

NEM,CPUM,EM: Don't sync in/out the entire state when leaving the inner NEM loop, only what IEM/TRPM might need. Speeds up MMIO and I/O requiring return to ring-3. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 81.0 KB
Line 
1/* $Id: CPUMAllRegs.cpp 72488 2018-06-09 12:24:35Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/apic.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#ifndef IN_RC
31# include <VBox/vmm/nem.h>
32# include <VBox/vmm/hm.h>
33#endif
34#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
35# include <VBox/vmm/selm.h>
36#endif
37#include "CPUMInternal.h"
38#include <VBox/vmm/vm.h>
39#include <VBox/err.h>
40#include <VBox/dis.h>
41#include <VBox/log.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/tm.h>
44#include <iprt/assert.h>
45#include <iprt/asm.h>
46#include <iprt/asm-amd64-x86.h>
47#ifdef IN_RING3
48# include <iprt/thread.h>
49#endif
50
51/** Disable stack frame pointer generation here. */
52#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
53# pragma optimize("y", off)
54#endif
55
56AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
57AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63/**
64 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
65 *
66 * @returns Pointer to the Virtual CPU.
67 * @param a_pGuestCtx Pointer to the guest context.
68 */
69#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
70
71/**
72 * Lazily loads the hidden parts of a selector register when using raw-mode.
73 */
74#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
75# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
76 do \
77 { \
78 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
79 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
80 } while (0)
81#else
82# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
83 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
84#endif
85
86
87
88#ifdef VBOX_WITH_RAW_MODE_NOT_R0
89
90/**
91 * Does the lazy hidden selector register loading.
92 *
93 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
94 * @param pSReg The selector register to lazily load hidden parts of.
95 */
96static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
97{
98 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
99 Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
100 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
101
102 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
103 {
104 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
105 pSReg->Attr.u = 0;
106 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
107 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
108 pSReg->Attr.n.u2Dpl = 3;
109 pSReg->Attr.n.u1Present = 1;
110 pSReg->u32Limit = 0x0000ffff;
111 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
112 pSReg->ValidSel = pSReg->Sel;
113 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
114 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
115 }
116 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
117 {
118 /* Real mode - leave the limit and flags alone here, at least for now. */
119 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
120 pSReg->ValidSel = pSReg->Sel;
121 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
122 }
123 else
124 {
125 /* Protected mode - get it from the selector descriptor tables. */
126 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
127 {
128 Assert(!CPUMIsGuestInLongMode(pVCpu));
129 pSReg->Sel = 0;
130 pSReg->u64Base = 0;
131 pSReg->u32Limit = 0;
132 pSReg->Attr.u = 0;
133 pSReg->ValidSel = 0;
134 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
135 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
136 }
137 else
138 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
139 }
140}
141
142
143/**
144 * Makes sure the hidden CS and SS selector registers are valid, loading them if
145 * necessary.
146 *
147 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
148 */
149VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
150{
151 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
152 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
153}
154
155
156/**
157 * Loads a the hidden parts of a selector register.
158 *
159 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
160 * @param pSReg The selector register to lazily load hidden parts of.
161 */
162VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
163{
164 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
165}
166
167#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
168
169
170/**
171 * Obsolete.
172 *
173 * We don't support nested hypervisor context interrupts or traps. Life is much
174 * simpler when we don't. It's also slightly faster at times.
175 *
176 * @param pVCpu The cross context virtual CPU structure.
177 */
178VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
179{
180 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
181}
182
183
184/**
185 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
186 *
187 * @param pVCpu The cross context virtual CPU structure.
188 */
189VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
190{
191 return &pVCpu->cpum.s.Hyper;
192}
193
194
195VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
196{
197 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
198 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
199}
200
201
202VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
203{
204 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
205 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
206}
207
208
209VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
210{
211 pVCpu->cpum.s.Hyper.cr3 = cr3;
212
213#ifdef IN_RC
214 /* Update the current CR3. */
215 ASMSetCR3(cr3);
216#endif
217}
218
219VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
220{
221 return pVCpu->cpum.s.Hyper.cr3;
222}
223
224
225VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
226{
227 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
228}
229
230
231VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
232{
233 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
234}
235
236
237VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
238{
239 pVCpu->cpum.s.Hyper.es.Sel = SelES;
240}
241
242
243VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
244{
245 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
246}
247
248
249VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
250{
251 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
252}
253
254
255VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
256{
257 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
258}
259
260
261VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
262{
263 pVCpu->cpum.s.Hyper.esp = u32ESP;
264}
265
266
267VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
268{
269 pVCpu->cpum.s.Hyper.esp = u32ESP;
270}
271
272
273VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
274{
275 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
276 return VINF_SUCCESS;
277}
278
279
280VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
281{
282 pVCpu->cpum.s.Hyper.eip = u32EIP;
283}
284
285
286/**
287 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
288 * EFLAGS and EIP prior to resuming guest execution.
289 *
290 * All general register not given as a parameter will be set to 0. The EFLAGS
291 * register will be set to sane values for C/C++ code execution with interrupts
292 * disabled and IOPL 0.
293 *
294 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
295 * @param u32EIP The EIP value.
296 * @param u32ESP The ESP value.
297 * @param u32EAX The EAX value.
298 * @param u32EDX The EDX value.
299 */
300VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
301{
302 pVCpu->cpum.s.Hyper.eip = u32EIP;
303 pVCpu->cpum.s.Hyper.esp = u32ESP;
304 pVCpu->cpum.s.Hyper.eax = u32EAX;
305 pVCpu->cpum.s.Hyper.edx = u32EDX;
306 pVCpu->cpum.s.Hyper.ecx = 0;
307 pVCpu->cpum.s.Hyper.ebx = 0;
308 pVCpu->cpum.s.Hyper.ebp = 0;
309 pVCpu->cpum.s.Hyper.esi = 0;
310 pVCpu->cpum.s.Hyper.edi = 0;
311 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
312}
313
314
315VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
316{
317 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
318}
319
320
321VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
322{
323 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
324}
325
326
327/** @def MAYBE_LOAD_DRx
328 * Macro for updating DRx values in raw-mode and ring-0 contexts.
329 */
330#ifdef IN_RING0
331# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
332# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
333 do { \
334 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
335 a_fnLoad(a_uValue); \
336 else \
337 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
338 } while (0)
339# else
340# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
341 do { \
342 a_fnLoad(a_uValue); \
343 } while (0)
344# endif
345
346#elif defined(IN_RC)
347# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
348 do { \
349 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
350 { a_fnLoad(a_uValue); } \
351 } while (0)
352
353#else
354# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
355#endif
356
357VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
358{
359 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
360 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
361}
362
363
364VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
365{
366 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
367 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
368}
369
370
371VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
372{
373 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
374 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
375}
376
377
378VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
379{
380 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
381 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
382}
383
384
385VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
386{
387 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
388}
389
390
391VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
392{
393 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
394#ifdef IN_RC
395 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
396#endif
397}
398
399
400VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
401{
402 return pVCpu->cpum.s.Hyper.cs.Sel;
403}
404
405
406VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
407{
408 return pVCpu->cpum.s.Hyper.ds.Sel;
409}
410
411
412VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
413{
414 return pVCpu->cpum.s.Hyper.es.Sel;
415}
416
417
418VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
419{
420 return pVCpu->cpum.s.Hyper.fs.Sel;
421}
422
423
424VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
425{
426 return pVCpu->cpum.s.Hyper.gs.Sel;
427}
428
429
430VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
431{
432 return pVCpu->cpum.s.Hyper.ss.Sel;
433}
434
435
436VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
437{
438 return pVCpu->cpum.s.Hyper.eax;
439}
440
441
442VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
443{
444 return pVCpu->cpum.s.Hyper.ebx;
445}
446
447
448VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
449{
450 return pVCpu->cpum.s.Hyper.ecx;
451}
452
453
454VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
455{
456 return pVCpu->cpum.s.Hyper.edx;
457}
458
459
460VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
461{
462 return pVCpu->cpum.s.Hyper.esi;
463}
464
465
466VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
467{
468 return pVCpu->cpum.s.Hyper.edi;
469}
470
471
472VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
473{
474 return pVCpu->cpum.s.Hyper.ebp;
475}
476
477
478VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
479{
480 return pVCpu->cpum.s.Hyper.esp;
481}
482
483
484VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
485{
486 return pVCpu->cpum.s.Hyper.eflags.u32;
487}
488
489
490VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
491{
492 return pVCpu->cpum.s.Hyper.eip;
493}
494
495
496VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
497{
498 return pVCpu->cpum.s.Hyper.rip;
499}
500
501
502VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
503{
504 if (pcbLimit)
505 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
506 return pVCpu->cpum.s.Hyper.idtr.pIdt;
507}
508
509
510VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
511{
512 if (pcbLimit)
513 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
514 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
515}
516
517
518VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
519{
520 return pVCpu->cpum.s.Hyper.ldtr.Sel;
521}
522
523
524VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
525{
526 return pVCpu->cpum.s.Hyper.dr[0];
527}
528
529
530VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
531{
532 return pVCpu->cpum.s.Hyper.dr[1];
533}
534
535
536VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
537{
538 return pVCpu->cpum.s.Hyper.dr[2];
539}
540
541
542VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
543{
544 return pVCpu->cpum.s.Hyper.dr[3];
545}
546
547
548VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
549{
550 return pVCpu->cpum.s.Hyper.dr[6];
551}
552
553
554VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
555{
556 return pVCpu->cpum.s.Hyper.dr[7];
557}
558
559
560/**
561 * Gets the pointer to the internal CPUMCTXCORE structure.
562 * This is only for reading in order to save a few calls.
563 *
564 * @param pVCpu The cross context virtual CPU structure.
565 */
566VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
567{
568 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
569}
570
571
572/**
573 * Queries the pointer to the internal CPUMCTX structure.
574 *
575 * @returns The CPUMCTX pointer.
576 * @param pVCpu The cross context virtual CPU structure.
577 */
578VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
579{
580 return &pVCpu->cpum.s.Guest;
581}
582
583
584/**
585 * Queries the pointer to the internal CPUMCTXMSRS structure.
586 *
587 * This is for NEM only.
588 *
589 * @returns The CPUMCTX pointer.
590 * @param pVCpu The cross context virtual CPU structure.
591 */
592VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
593{
594 return &pVCpu->cpum.s.GuestMsrs;
595}
596
597
598VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
599{
600#ifdef VBOX_WITH_RAW_MODE_NOT_R0
601 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
602 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
603#endif
604 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
605 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
606 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
607 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
608 return VINF_SUCCESS; /* formality, consider it void. */
609}
610
611
612VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
613{
614#ifdef VBOX_WITH_RAW_MODE_NOT_R0
615 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
616 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
617#endif
618 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
619 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
620 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
621 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
622 return VINF_SUCCESS; /* formality, consider it void. */
623}
624
625
626VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
627{
628#ifdef VBOX_WITH_RAW_MODE_NOT_R0
629 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
630 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
631#endif
632 pVCpu->cpum.s.Guest.tr.Sel = tr;
633 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
634 return VINF_SUCCESS; /* formality, consider it void. */
635}
636
637
638VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
639{
640#ifdef VBOX_WITH_RAW_MODE_NOT_R0
641 if ( ( ldtr != 0
642 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
643 && VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
644 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
645#endif
646 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
647 /* The caller will set more hidden bits if it has them. */
648 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
649 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
650 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
651 return VINF_SUCCESS; /* formality, consider it void. */
652}
653
654
655/**
656 * Set the guest CR0.
657 *
658 * When called in GC, the hyper CR0 may be updated if that is
659 * required. The caller only has to take special action if AM,
660 * WP, PG or PE changes.
661 *
662 * @returns VINF_SUCCESS (consider it void).
663 * @param pVCpu The cross context virtual CPU structure.
664 * @param cr0 The new CR0 value.
665 */
666VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
667{
668#ifdef IN_RC
669 /*
670 * Check if we need to change hypervisor CR0 because
671 * of math stuff.
672 */
673 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
674 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
675 {
676 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
677 {
678 /*
679 * We haven't loaded the guest FPU state yet, so TS and MT are both set
680 * and EM should be reflecting the guest EM (it always does this).
681 */
682 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
683 {
684 uint32_t HyperCR0 = ASMGetCR0();
685 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
686 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
687 HyperCR0 &= ~X86_CR0_EM;
688 HyperCR0 |= cr0 & X86_CR0_EM;
689 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
690 ASMSetCR0(HyperCR0);
691 }
692# ifdef VBOX_STRICT
693 else
694 {
695 uint32_t HyperCR0 = ASMGetCR0();
696 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
697 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
698 }
699# endif
700 }
701 else
702 {
703 /*
704 * Already loaded the guest FPU state, so we're just mirroring
705 * the guest flags.
706 */
707 uint32_t HyperCR0 = ASMGetCR0();
708 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
709 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
710 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
711 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
712 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
713 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
714 ASMSetCR0(HyperCR0);
715 }
716 }
717#endif /* IN_RC */
718
719 /*
720 * Check for changes causing TLB flushes (for REM).
721 * The caller is responsible for calling PGM when appropriate.
722 */
723 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
724 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
725 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
726 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
727
728 /*
729 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
730 */
731 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
732 PGMCr0WpEnabled(pVCpu);
733
734 /* The ET flag is settable on a 386 and hardwired on 486+. */
735 if ( !(cr0 & X86_CR0_ET)
736 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
737 cr0 |= X86_CR0_ET;
738
739 pVCpu->cpum.s.Guest.cr0 = cr0;
740 return VINF_SUCCESS;
741}
742
743
744VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
745{
746 pVCpu->cpum.s.Guest.cr2 = cr2;
747 return VINF_SUCCESS;
748}
749
750
751VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
752{
753 pVCpu->cpum.s.Guest.cr3 = cr3;
754 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
755 return VINF_SUCCESS;
756}
757
758
759VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
760{
761 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
762
763 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
764 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
765 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
766
767 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
768 pVCpu->cpum.s.Guest.cr4 = cr4;
769 return VINF_SUCCESS;
770}
771
772
773VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
774{
775 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
776 return VINF_SUCCESS;
777}
778
779
780VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
781{
782 pVCpu->cpum.s.Guest.eip = eip;
783 return VINF_SUCCESS;
784}
785
786
787VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
788{
789 pVCpu->cpum.s.Guest.eax = eax;
790 return VINF_SUCCESS;
791}
792
793
794VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
795{
796 pVCpu->cpum.s.Guest.ebx = ebx;
797 return VINF_SUCCESS;
798}
799
800
801VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
802{
803 pVCpu->cpum.s.Guest.ecx = ecx;
804 return VINF_SUCCESS;
805}
806
807
808VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
809{
810 pVCpu->cpum.s.Guest.edx = edx;
811 return VINF_SUCCESS;
812}
813
814
815VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
816{
817 pVCpu->cpum.s.Guest.esp = esp;
818 return VINF_SUCCESS;
819}
820
821
822VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
823{
824 pVCpu->cpum.s.Guest.ebp = ebp;
825 return VINF_SUCCESS;
826}
827
828
829VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
830{
831 pVCpu->cpum.s.Guest.esi = esi;
832 return VINF_SUCCESS;
833}
834
835
836VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
837{
838 pVCpu->cpum.s.Guest.edi = edi;
839 return VINF_SUCCESS;
840}
841
842
843VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
844{
845 pVCpu->cpum.s.Guest.ss.Sel = ss;
846 return VINF_SUCCESS;
847}
848
849
850VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
851{
852 pVCpu->cpum.s.Guest.cs.Sel = cs;
853 return VINF_SUCCESS;
854}
855
856
857VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
858{
859 pVCpu->cpum.s.Guest.ds.Sel = ds;
860 return VINF_SUCCESS;
861}
862
863
864VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
865{
866 pVCpu->cpum.s.Guest.es.Sel = es;
867 return VINF_SUCCESS;
868}
869
870
871VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
872{
873 pVCpu->cpum.s.Guest.fs.Sel = fs;
874 return VINF_SUCCESS;
875}
876
877
878VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
879{
880 pVCpu->cpum.s.Guest.gs.Sel = gs;
881 return VINF_SUCCESS;
882}
883
884
885VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
886{
887 pVCpu->cpum.s.Guest.msrEFER = val;
888}
889
890
891VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
892{
893 if (pcbLimit)
894 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
895 return pVCpu->cpum.s.Guest.idtr.pIdt;
896}
897
898
899VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
900{
901 if (pHidden)
902 *pHidden = pVCpu->cpum.s.Guest.tr;
903 return pVCpu->cpum.s.Guest.tr.Sel;
904}
905
906
907VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
908{
909 return pVCpu->cpum.s.Guest.cs.Sel;
910}
911
912
913VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
914{
915 return pVCpu->cpum.s.Guest.ds.Sel;
916}
917
918
919VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
920{
921 return pVCpu->cpum.s.Guest.es.Sel;
922}
923
924
925VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
926{
927 return pVCpu->cpum.s.Guest.fs.Sel;
928}
929
930
931VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
932{
933 return pVCpu->cpum.s.Guest.gs.Sel;
934}
935
936
937VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
938{
939 return pVCpu->cpum.s.Guest.ss.Sel;
940}
941
942
943VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
944{
945 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
946 if ( !CPUMIsGuestInLongMode(pVCpu)
947 || pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
948 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
949 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
950}
951
952
953VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
954{
955 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
956 if ( !CPUMIsGuestInLongMode(pVCpu)
957 || pVCpu->cpum.s.Guest.ss.Attr.n.u1Long)
958 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
959 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
960}
961
962
963VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
964{
965 return pVCpu->cpum.s.Guest.ldtr.Sel;
966}
967
968
969VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
970{
971 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
972 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
973 return pVCpu->cpum.s.Guest.ldtr.Sel;
974}
975
976
977VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
978{
979 Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_CR0));
980 return pVCpu->cpum.s.Guest.cr0;
981}
982
983
984VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
985{
986 Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_CR2));
987 return pVCpu->cpum.s.Guest.cr2;
988}
989
990
991VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
992{
993 Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_CR3));
994 return pVCpu->cpum.s.Guest.cr3;
995}
996
997
998VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
999{
1000 Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_CR4));
1001 return pVCpu->cpum.s.Guest.cr4;
1002}
1003
1004
1005VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
1006{
1007 uint64_t u64;
1008 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1009 if (RT_FAILURE(rc))
1010 u64 = 0;
1011 return u64;
1012}
1013
1014
1015VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1016{
1017 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1018}
1019
1020
1021VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1022{
1023 Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_RIP));
1024 return pVCpu->cpum.s.Guest.eip;
1025}
1026
1027
1028VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1029{
1030 Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_RIP));
1031 return pVCpu->cpum.s.Guest.rip;
1032}
1033
1034
1035VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1036{
1037 return pVCpu->cpum.s.Guest.eax;
1038}
1039
1040
1041VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1042{
1043 return pVCpu->cpum.s.Guest.ebx;
1044}
1045
1046
1047VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1048{
1049 return pVCpu->cpum.s.Guest.ecx;
1050}
1051
1052
1053VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1054{
1055 return pVCpu->cpum.s.Guest.edx;
1056}
1057
1058
1059VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1060{
1061 return pVCpu->cpum.s.Guest.esi;
1062}
1063
1064
1065VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1066{
1067 return pVCpu->cpum.s.Guest.edi;
1068}
1069
1070
1071VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1072{
1073 return pVCpu->cpum.s.Guest.esp;
1074}
1075
1076
1077VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1078{
1079 return pVCpu->cpum.s.Guest.ebp;
1080}
1081
1082
1083VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1084{
1085 return pVCpu->cpum.s.Guest.eflags.u32;
1086}
1087
1088
1089VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1090{
1091 switch (iReg)
1092 {
1093 case DISCREG_CR0:
1094 *pValue = pVCpu->cpum.s.Guest.cr0;
1095 break;
1096
1097 case DISCREG_CR2:
1098 *pValue = pVCpu->cpum.s.Guest.cr2;
1099 break;
1100
1101 case DISCREG_CR3:
1102 *pValue = pVCpu->cpum.s.Guest.cr3;
1103 break;
1104
1105 case DISCREG_CR4:
1106 *pValue = pVCpu->cpum.s.Guest.cr4;
1107 break;
1108
1109 case DISCREG_CR8:
1110 {
1111 uint8_t u8Tpr;
1112 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1113 if (RT_FAILURE(rc))
1114 {
1115 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1116 *pValue = 0;
1117 return rc;
1118 }
1119 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
1120 break;
1121 }
1122
1123 default:
1124 return VERR_INVALID_PARAMETER;
1125 }
1126 return VINF_SUCCESS;
1127}
1128
1129
1130VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1131{
1132 return pVCpu->cpum.s.Guest.dr[0];
1133}
1134
1135
1136VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1137{
1138 return pVCpu->cpum.s.Guest.dr[1];
1139}
1140
1141
1142VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1143{
1144 return pVCpu->cpum.s.Guest.dr[2];
1145}
1146
1147
1148VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1149{
1150 return pVCpu->cpum.s.Guest.dr[3];
1151}
1152
1153
1154VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1155{
1156 return pVCpu->cpum.s.Guest.dr[6];
1157}
1158
1159
1160VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1161{
1162 return pVCpu->cpum.s.Guest.dr[7];
1163}
1164
1165
1166VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1167{
1168 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1169 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1170 if (iReg == 4 || iReg == 5)
1171 iReg += 2;
1172 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1173 return VINF_SUCCESS;
1174}
1175
1176
1177VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1178{
1179 return pVCpu->cpum.s.Guest.msrEFER;
1180}
1181
1182
1183/**
1184 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1185 *
1186 * @returns Pointer to the leaf if found, NULL if not.
1187 *
1188 * @param pVM The cross context VM structure.
1189 * @param uLeaf The leaf to get.
1190 */
1191PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1192{
1193 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1194 if (iEnd)
1195 {
1196 unsigned iStart = 0;
1197 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1198 for (;;)
1199 {
1200 unsigned i = iStart + (iEnd - iStart) / 2U;
1201 if (uLeaf < paLeaves[i].uLeaf)
1202 {
1203 if (i <= iStart)
1204 return NULL;
1205 iEnd = i;
1206 }
1207 else if (uLeaf > paLeaves[i].uLeaf)
1208 {
1209 i += 1;
1210 if (i >= iEnd)
1211 return NULL;
1212 iStart = i;
1213 }
1214 else
1215 {
1216 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1217 return &paLeaves[i];
1218
1219 /* This shouldn't normally happen. But in case the it does due
1220 to user configuration overrids or something, just return the
1221 first sub-leaf. */
1222 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1223 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1224 while ( paLeaves[i].uSubLeaf != 0
1225 && i > 0
1226 && uLeaf == paLeaves[i - 1].uLeaf)
1227 i--;
1228 return &paLeaves[i];
1229 }
1230 }
1231 }
1232
1233 return NULL;
1234}
1235
1236
1237/**
1238 * Looks up a CPUID leaf in the CPUID leaf array.
1239 *
1240 * @returns Pointer to the leaf if found, NULL if not.
1241 *
1242 * @param pVM The cross context VM structure.
1243 * @param uLeaf The leaf to get.
1244 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1245 * isn't.
1246 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1247 */
1248PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1249{
1250 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1251 if (iEnd)
1252 {
1253 unsigned iStart = 0;
1254 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1255 for (;;)
1256 {
1257 unsigned i = iStart + (iEnd - iStart) / 2U;
1258 if (uLeaf < paLeaves[i].uLeaf)
1259 {
1260 if (i <= iStart)
1261 return NULL;
1262 iEnd = i;
1263 }
1264 else if (uLeaf > paLeaves[i].uLeaf)
1265 {
1266 i += 1;
1267 if (i >= iEnd)
1268 return NULL;
1269 iStart = i;
1270 }
1271 else
1272 {
1273 uSubLeaf &= paLeaves[i].fSubLeafMask;
1274 if (uSubLeaf == paLeaves[i].uSubLeaf)
1275 *pfExactSubLeafHit = true;
1276 else
1277 {
1278 /* Find the right subleaf. We return the last one before
1279 uSubLeaf if we don't find an exact match. */
1280 if (uSubLeaf < paLeaves[i].uSubLeaf)
1281 while ( i > 0
1282 && uLeaf == paLeaves[i - 1].uLeaf
1283 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1284 i--;
1285 else
1286 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1287 && uLeaf == paLeaves[i + 1].uLeaf
1288 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1289 i++;
1290 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1291 }
1292 return &paLeaves[i];
1293 }
1294 }
1295 }
1296
1297 *pfExactSubLeafHit = false;
1298 return NULL;
1299}
1300
1301
1302/**
1303 * Gets a CPUID leaf.
1304 *
1305 * @param pVCpu The cross context virtual CPU structure.
1306 * @param uLeaf The CPUID leaf to get.
1307 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1308 * @param pEax Where to store the EAX value.
1309 * @param pEbx Where to store the EBX value.
1310 * @param pEcx Where to store the ECX value.
1311 * @param pEdx Where to store the EDX value.
1312 */
1313VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1314 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1315{
1316 bool fExactSubLeafHit;
1317 PVM pVM = pVCpu->CTX_SUFF(pVM);
1318 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1319 if (pLeaf)
1320 {
1321 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1322 if (fExactSubLeafHit)
1323 {
1324 *pEax = pLeaf->uEax;
1325 *pEbx = pLeaf->uEbx;
1326 *pEcx = pLeaf->uEcx;
1327 *pEdx = pLeaf->uEdx;
1328
1329 /*
1330 * Deal with CPU specific information.
1331 */
1332 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
1333 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
1334 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
1335 {
1336 if (uLeaf == 1)
1337 {
1338 /* EBX: Bits 31-24: Initial APIC ID. */
1339 Assert(pVCpu->idCpu <= 255);
1340 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1341 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1342
1343 /* EDX: Bit 9: AND with APICBASE.EN. */
1344 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1345 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1346
1347 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1348 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1349 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1350 }
1351 else if (uLeaf == 0xb)
1352 {
1353 /* EDX: Initial extended APIC ID. */
1354 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1355 *pEdx = pVCpu->idCpu;
1356 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
1357 }
1358 else if (uLeaf == UINT32_C(0x8000001e))
1359 {
1360 /* EAX: Initial extended APIC ID. */
1361 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1362 *pEax = pVCpu->idCpu;
1363 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
1364 }
1365 else if (uLeaf == UINT32_C(0x80000001))
1366 {
1367 /* EDX: Bit 9: AND with APICBASE.EN. */
1368 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
1369 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1370 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
1371 }
1372 else
1373 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1374 }
1375 }
1376 /*
1377 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1378 * them here, but we do the best we can here...
1379 */
1380 else
1381 {
1382 *pEax = *pEbx = *pEcx = *pEdx = 0;
1383 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1384 {
1385 *pEcx = uSubLeaf & 0xff;
1386 *pEdx = pVCpu->idCpu;
1387 }
1388 }
1389 }
1390 else
1391 {
1392 /*
1393 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1394 */
1395 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1396 {
1397 default:
1398 AssertFailed();
1399 RT_FALL_THRU();
1400 case CPUMUNKNOWNCPUID_DEFAULTS:
1401 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1402 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1403 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1404 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1405 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1406 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1407 break;
1408 case CPUMUNKNOWNCPUID_PASSTHRU:
1409 *pEax = uLeaf;
1410 *pEbx = 0;
1411 *pEcx = uSubLeaf;
1412 *pEdx = 0;
1413 break;
1414 }
1415 }
1416 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1417}
1418
1419
1420/**
1421 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1422 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1423 *
1424 * @returns Previous value.
1425 * @param pVCpu The cross context virtual CPU structure to make the
1426 * change on. Usually the calling EMT.
1427 * @param fVisible Whether to make it visible (true) or hide it (false).
1428 *
1429 * @remarks This is "VMMDECL" so that it still links with
1430 * the old APIC code which is in VBoxDD2 and not in
1431 * the VMM module.
1432 */
1433VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1434{
1435 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1436 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1437
1438#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1439 /*
1440 * Patch manager saved state legacy pain.
1441 */
1442 PVM pVM = pVCpu->CTX_SUFF(pVM);
1443 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1444 if (pLeaf)
1445 {
1446 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1447 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;
1448 else
1449 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;
1450 }
1451
1452 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1453 if (pLeaf)
1454 {
1455 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1456 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;
1457 else
1458 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1459 }
1460#endif
1461
1462 return fOld;
1463}
1464
1465
1466/**
1467 * Gets the host CPU vendor.
1468 *
1469 * @returns CPU vendor.
1470 * @param pVM The cross context VM structure.
1471 */
1472VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1473{
1474 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1475}
1476
1477
1478/**
1479 * Gets the CPU vendor.
1480 *
1481 * @returns CPU vendor.
1482 * @param pVM The cross context VM structure.
1483 */
1484VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1485{
1486 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1487}
1488
1489
1490VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1491{
1492 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1493 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1494}
1495
1496
1497VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1498{
1499 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1500 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1501}
1502
1503
1504VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1505{
1506 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1507 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1508}
1509
1510
1511VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1512{
1513 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1514 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1515}
1516
1517
1518VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1519{
1520 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1521 return VINF_SUCCESS; /* No need to recalc. */
1522}
1523
1524
1525VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1526{
1527 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1528 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1529}
1530
1531
1532VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1533{
1534 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1535 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1536 if (iReg == 4 || iReg == 5)
1537 iReg += 2;
1538 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1539 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1540}
1541
1542
1543/**
1544 * Recalculates the hypervisor DRx register values based on current guest
1545 * registers and DBGF breakpoints, updating changed registers depending on the
1546 * context.
1547 *
1548 * This is called whenever a guest DRx register is modified (any context) and
1549 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1550 *
1551 * In raw-mode context this function will reload any (hyper) DRx registers which
1552 * comes out with a different value. It may also have to save the host debug
1553 * registers if that haven't been done already. In this context though, we'll
1554 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1555 * are only important when breakpoints are actually enabled.
1556 *
1557 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1558 * reloaded by the HM code if it changes. Further more, we will only use the
1559 * combined register set when the VBox debugger is actually using hardware BPs,
1560 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1561 * concern us here).
1562 *
1563 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1564 * all the time.
1565 *
1566 * @returns VINF_SUCCESS.
1567 * @param pVCpu The cross context virtual CPU structure.
1568 * @param iGstReg The guest debug register number that was modified.
1569 * UINT8_MAX if not guest register.
1570 * @param fForceHyper Used in HM to force hyper registers because of single
1571 * stepping.
1572 */
1573VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1574{
1575 PVM pVM = pVCpu->CTX_SUFF(pVM);
1576#ifndef IN_RING0
1577 RT_NOREF_PV(iGstReg);
1578#endif
1579
1580 /*
1581 * Compare the DR7s first.
1582 *
1583 * We only care about the enabled flags. GD is virtualized when we
1584 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1585 * always have the LE and GE bits set, so no need to check and disable
1586 * stuff if they're cleared like we have to for the guest DR7.
1587 */
1588 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1589 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1590 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1591 uGstDr7 = 0;
1592 else if (!(uGstDr7 & X86_DR7_LE))
1593 uGstDr7 &= ~X86_DR7_LE_ALL;
1594 else if (!(uGstDr7 & X86_DR7_GE))
1595 uGstDr7 &= ~X86_DR7_GE_ALL;
1596
1597 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1598
1599#ifdef IN_RING0
1600 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1601 fForceHyper = true;
1602#endif
1603 if ( (!VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7))
1604 & X86_DR7_ENABLED_MASK)
1605 {
1606 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1607#ifdef IN_RC
1608 bool const fRawModeEnabled = true;
1609#elif defined(IN_RING3)
1610 bool const fRawModeEnabled = VM_IS_RAW_MODE_ENABLED(pVM);
1611#endif
1612
1613 /*
1614 * Ok, something is enabled. Recalc each of the breakpoints, taking
1615 * the VM debugger ones of the guest ones. In raw-mode context we will
1616 * not allow breakpoints with values inside the hypervisor area.
1617 */
1618 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1619
1620 /* bp 0 */
1621 RTGCUINTREG uNewDr0;
1622 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1623 {
1624 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1625 uNewDr0 = DBGFBpGetDR0(pVM);
1626 }
1627 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1628 {
1629 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1630#ifndef IN_RING0
1631 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1632 uNewDr0 = 0;
1633 else
1634#endif
1635 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1636 }
1637 else
1638 uNewDr0 = 0;
1639
1640 /* bp 1 */
1641 RTGCUINTREG uNewDr1;
1642 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1643 {
1644 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1645 uNewDr1 = DBGFBpGetDR1(pVM);
1646 }
1647 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1648 {
1649 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1650#ifndef IN_RING0
1651 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1652 uNewDr1 = 0;
1653 else
1654#endif
1655 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1656 }
1657 else
1658 uNewDr1 = 0;
1659
1660 /* bp 2 */
1661 RTGCUINTREG uNewDr2;
1662 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1663 {
1664 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1665 uNewDr2 = DBGFBpGetDR2(pVM);
1666 }
1667 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1668 {
1669 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1670#ifndef IN_RING0
1671 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1672 uNewDr2 = 0;
1673 else
1674#endif
1675 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1676 }
1677 else
1678 uNewDr2 = 0;
1679
1680 /* bp 3 */
1681 RTGCUINTREG uNewDr3;
1682 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1683 {
1684 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1685 uNewDr3 = DBGFBpGetDR3(pVM);
1686 }
1687 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1688 {
1689 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1690#ifndef IN_RING0
1691 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1692 uNewDr3 = 0;
1693 else
1694#endif
1695 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1696 }
1697 else
1698 uNewDr3 = 0;
1699
1700 /*
1701 * Apply the updates.
1702 */
1703#ifdef IN_RC
1704 /* Make sure to save host registers first. */
1705 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1706 {
1707 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1708 {
1709 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1710 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1711 }
1712 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1713 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1714 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1715 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1716 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1717
1718 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1719 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1720 ASMSetDR0(uNewDr0);
1721 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1722 ASMSetDR1(uNewDr1);
1723 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1724 ASMSetDR2(uNewDr2);
1725 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1726 ASMSetDR3(uNewDr3);
1727 ASMSetDR6(X86_DR6_INIT_VAL);
1728 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
1729 ASMSetDR7(uNewDr7);
1730 }
1731 else
1732#endif
1733 {
1734 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1735 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1736 CPUMSetHyperDR3(pVCpu, uNewDr3);
1737 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1738 CPUMSetHyperDR2(pVCpu, uNewDr2);
1739 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1740 CPUMSetHyperDR1(pVCpu, uNewDr1);
1741 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1742 CPUMSetHyperDR0(pVCpu, uNewDr0);
1743 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1744 CPUMSetHyperDR7(pVCpu, uNewDr7);
1745 }
1746 }
1747#ifdef IN_RING0
1748 else if (CPUMIsGuestDebugStateActive(pVCpu))
1749 {
1750 /*
1751 * Reload the register that was modified. Normally this won't happen
1752 * as we won't intercept DRx writes when not having the hyper debug
1753 * state loaded, but in case we do for some reason we'll simply deal
1754 * with it.
1755 */
1756 switch (iGstReg)
1757 {
1758 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1759 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1760 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1761 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1762 default:
1763 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1764 }
1765 }
1766#endif
1767 else
1768 {
1769 /*
1770 * No active debug state any more. In raw-mode this means we have to
1771 * make sure DR7 has everything disabled now, if we armed it already.
1772 * In ring-0 we might end up here when just single stepping.
1773 */
1774#if defined(IN_RC) || defined(IN_RING0)
1775 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1776 {
1777# ifdef IN_RC
1778 ASMSetDR7(X86_DR7_INIT_VAL);
1779# endif
1780 if (pVCpu->cpum.s.Hyper.dr[0])
1781 ASMSetDR0(0);
1782 if (pVCpu->cpum.s.Hyper.dr[1])
1783 ASMSetDR1(0);
1784 if (pVCpu->cpum.s.Hyper.dr[2])
1785 ASMSetDR2(0);
1786 if (pVCpu->cpum.s.Hyper.dr[3])
1787 ASMSetDR3(0);
1788 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1789 }
1790#endif
1791 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1792
1793 /* Clear all the registers. */
1794 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1795 pVCpu->cpum.s.Hyper.dr[3] = 0;
1796 pVCpu->cpum.s.Hyper.dr[2] = 0;
1797 pVCpu->cpum.s.Hyper.dr[1] = 0;
1798 pVCpu->cpum.s.Hyper.dr[0] = 0;
1799
1800 }
1801 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1802 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1803 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1804 pVCpu->cpum.s.Hyper.dr[7]));
1805
1806 return VINF_SUCCESS;
1807}
1808
1809
1810/**
1811 * Set the guest XCR0 register.
1812 *
1813 * Will load additional state if the FPU state is already loaded (in ring-0 &
1814 * raw-mode context).
1815 *
1816 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1817 * value.
1818 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1819 * @param uNewValue The new value.
1820 * @thread EMT(pVCpu)
1821 */
1822VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
1823{
1824 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1825 /* The X87 bit cannot be cleared. */
1826 && (uNewValue & XSAVE_C_X87)
1827 /* AVX requires SSE. */
1828 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1829 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1830 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1831 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1832 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1833 )
1834 {
1835 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1836
1837 /* If more state components are enabled, we need to take care to load
1838 them if the FPU/SSE state is already loaded. May otherwise leak
1839 host state to the guest. */
1840 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1841 if (fNewComponents)
1842 {
1843#if defined(IN_RING0) || defined(IN_RC)
1844 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1845 {
1846 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1847 /* Adding more components. */
1848 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1849 else
1850 {
1851 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1852 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1853 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1854 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1855 }
1856 }
1857#endif
1858 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1859 }
1860 return VINF_SUCCESS;
1861 }
1862 return VERR_CPUM_RAISE_GP_0;
1863}
1864
1865
1866/**
1867 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1868 *
1869 * @returns true if in real mode, otherwise false.
1870 * @param pVCpu The cross context virtual CPU structure.
1871 */
1872VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1873{
1874 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1875}
1876
1877
1878/**
1879 * Tests if the guest has the Page Size Extension enabled (PSE).
1880 *
1881 * @returns true if in real mode, otherwise false.
1882 * @param pVCpu The cross context virtual CPU structure.
1883 */
1884VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1885{
1886 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1887 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1888}
1889
1890
1891/**
1892 * Tests if the guest has the paging enabled (PG).
1893 *
1894 * @returns true if in real mode, otherwise false.
1895 * @param pVCpu The cross context virtual CPU structure.
1896 */
1897VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1898{
1899 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1900}
1901
1902
1903/**
1904 * Tests if the guest has the paging enabled (PG).
1905 *
1906 * @returns true if in real mode, otherwise false.
1907 * @param pVCpu The cross context virtual CPU structure.
1908 */
1909VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1910{
1911 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1912}
1913
1914
1915/**
1916 * Tests if the guest is running in real mode or not.
1917 *
1918 * @returns true if in real mode, otherwise false.
1919 * @param pVCpu The cross context virtual CPU structure.
1920 */
1921VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1922{
1923 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1924}
1925
1926
1927/**
1928 * Tests if the guest is running in real or virtual 8086 mode.
1929 *
1930 * @returns @c true if it is, @c false if not.
1931 * @param pVCpu The cross context virtual CPU structure.
1932 */
1933VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
1934{
1935 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1936 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1937}
1938
1939
1940/**
1941 * Tests if the guest is running in protected or not.
1942 *
1943 * @returns true if in protected mode, otherwise false.
1944 * @param pVCpu The cross context virtual CPU structure.
1945 */
1946VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1947{
1948 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1949}
1950
1951
1952/**
1953 * Tests if the guest is running in paged protected or not.
1954 *
1955 * @returns true if in paged protected mode, otherwise false.
1956 * @param pVCpu The cross context virtual CPU structure.
1957 */
1958VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1959{
1960 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1961}
1962
1963
1964/**
1965 * Tests if the guest is running in long mode or not.
1966 *
1967 * @returns true if in long mode, otherwise false.
1968 * @param pVCpu The cross context virtual CPU structure.
1969 */
1970VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1971{
1972 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1973}
1974
1975
1976/**
1977 * Tests if the guest is running in PAE mode or not.
1978 *
1979 * @returns true if in PAE mode, otherwise false.
1980 * @param pVCpu The cross context virtual CPU structure.
1981 */
1982VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1983{
1984 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1985 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1986 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1987 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1988 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1989}
1990
1991
1992/**
1993 * Tests if the guest is running in 64 bits mode or not.
1994 *
1995 * @returns true if in 64 bits protected mode, otherwise false.
1996 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1997 */
1998VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1999{
2000 if (!CPUMIsGuestInLongMode(pVCpu))
2001 return false;
2002 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2003 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2004}
2005
2006
2007/**
2008 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2009 * registers.
2010 *
2011 * @returns true if in 64 bits protected mode, otherwise false.
2012 * @param pCtx Pointer to the current guest CPU context.
2013 */
2014VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2015{
2016 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2017}
2018
2019#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2020
2021/**
2022 *
2023 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2024 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2025 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2026 */
2027VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2028{
2029 return pVCpu->cpum.s.fRawEntered;
2030}
2031
2032/**
2033 * Transforms the guest CPU state to raw-ring mode.
2034 *
2035 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2036 *
2037 * @returns VBox status code. (recompiler failure)
2038 * @param pVCpu The cross context virtual CPU structure.
2039 * @see @ref pg_raw
2040 */
2041VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2042{
2043 PVM pVM = pVCpu->CTX_SUFF(pVM);
2044
2045 Assert(!pVCpu->cpum.s.fRawEntered);
2046 Assert(!pVCpu->cpum.s.fRemEntered);
2047 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2048
2049 /*
2050 * Are we in Ring-0?
2051 */
2052 if ( pCtx->ss.Sel
2053 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2054 && !pCtx->eflags.Bits.u1VM)
2055 {
2056 /*
2057 * Enter execution mode.
2058 */
2059 PATMRawEnter(pVM, pCtx);
2060
2061 /*
2062 * Set CPL to Ring-1.
2063 */
2064 pCtx->ss.Sel |= 1;
2065 if ( pCtx->cs.Sel
2066 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2067 pCtx->cs.Sel |= 1;
2068 }
2069 else
2070 {
2071# ifdef VBOX_WITH_RAW_RING1
2072 if ( EMIsRawRing1Enabled(pVM)
2073 && !pCtx->eflags.Bits.u1VM
2074 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2075 {
2076 /* Set CPL to Ring-2. */
2077 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2078 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2079 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2080 }
2081# else
2082 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2083 ("ring-1 code not supported\n"));
2084# endif
2085 /*
2086 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2087 */
2088 PATMRawEnter(pVM, pCtx);
2089 }
2090
2091 /*
2092 * Assert sanity.
2093 */
2094 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2095 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2096 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2097 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE));
2098
2099 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2100
2101 pVCpu->cpum.s.fRawEntered = true;
2102 return VINF_SUCCESS;
2103}
2104
2105
2106/**
2107 * Transforms the guest CPU state from raw-ring mode to correct values.
2108 *
2109 * This function will change any selector registers with DPL=1 to DPL=0.
2110 *
2111 * @returns Adjusted rc.
2112 * @param pVCpu The cross context virtual CPU structure.
2113 * @param rc Raw mode return code
2114 * @see @ref pg_raw
2115 */
2116VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2117{
2118 PVM pVM = pVCpu->CTX_SUFF(pVM);
2119
2120 /*
2121 * Don't leave if we've already left (in RC).
2122 */
2123 Assert(!pVCpu->cpum.s.fRemEntered);
2124 if (!pVCpu->cpum.s.fRawEntered)
2125 return rc;
2126 pVCpu->cpum.s.fRawEntered = false;
2127
2128 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2129 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2130 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2131 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2132
2133 /*
2134 * Are we executing in raw ring-1?
2135 */
2136 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2137 && !pCtx->eflags.Bits.u1VM)
2138 {
2139 /*
2140 * Leave execution mode.
2141 */
2142 PATMRawLeave(pVM, pCtx, rc);
2143 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2144 /** @todo See what happens if we remove this. */
2145 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2146 pCtx->ds.Sel &= ~X86_SEL_RPL;
2147 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2148 pCtx->es.Sel &= ~X86_SEL_RPL;
2149 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2150 pCtx->fs.Sel &= ~X86_SEL_RPL;
2151 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2152 pCtx->gs.Sel &= ~X86_SEL_RPL;
2153
2154 /*
2155 * Ring-1 selector => Ring-0.
2156 */
2157 pCtx->ss.Sel &= ~X86_SEL_RPL;
2158 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2159 pCtx->cs.Sel &= ~X86_SEL_RPL;
2160 }
2161 else
2162 {
2163 /*
2164 * PATM is taking care of the IOPL and IF flags for us.
2165 */
2166 PATMRawLeave(pVM, pCtx, rc);
2167 if (!pCtx->eflags.Bits.u1VM)
2168 {
2169# ifdef VBOX_WITH_RAW_RING1
2170 if ( EMIsRawRing1Enabled(pVM)
2171 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2172 {
2173 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2174 /** @todo See what happens if we remove this. */
2175 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2176 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2177 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2178 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2179 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2180 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2181 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2182 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2183
2184 /*
2185 * Ring-2 selector => Ring-1.
2186 */
2187 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2188 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2189 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2190 }
2191 else
2192 {
2193# endif
2194 /** @todo See what happens if we remove this. */
2195 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2196 pCtx->ds.Sel &= ~X86_SEL_RPL;
2197 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2198 pCtx->es.Sel &= ~X86_SEL_RPL;
2199 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2200 pCtx->fs.Sel &= ~X86_SEL_RPL;
2201 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2202 pCtx->gs.Sel &= ~X86_SEL_RPL;
2203# ifdef VBOX_WITH_RAW_RING1
2204 }
2205# endif
2206 }
2207 }
2208
2209 return rc;
2210}
2211
2212#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2213
2214/**
2215 * Updates the EFLAGS while we're in raw-mode.
2216 *
2217 * @param pVCpu The cross context virtual CPU structure.
2218 * @param fEfl The new EFLAGS value.
2219 */
2220VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2221{
2222#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2223 if (pVCpu->cpum.s.fRawEntered)
2224 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2225 else
2226#endif
2227 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2228}
2229
2230
2231/**
2232 * Gets the EFLAGS while we're in raw-mode.
2233 *
2234 * @returns The eflags.
2235 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2236 */
2237VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2238{
2239#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2240 if (pVCpu->cpum.s.fRawEntered)
2241 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2242#endif
2243 return pVCpu->cpum.s.Guest.eflags.u32;
2244}
2245
2246
2247/**
2248 * Sets the specified changed flags (CPUM_CHANGED_*).
2249 *
2250 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2251 * @param fChangedAdd The changed flags to add.
2252 */
2253VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2254{
2255 pVCpu->cpum.s.fChanged |= fChangedAdd;
2256}
2257
2258
2259/**
2260 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2261 *
2262 * @returns true if supported.
2263 * @returns false if not supported.
2264 * @param pVM The cross context VM structure.
2265 */
2266VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2267{
2268 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2269}
2270
2271
2272/**
2273 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2274 * @returns true if used.
2275 * @returns false if not used.
2276 * @param pVM The cross context VM structure.
2277 */
2278VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2279{
2280 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2281}
2282
2283
2284/**
2285 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2286 * @returns true if used.
2287 * @returns false if not used.
2288 * @param pVM The cross context VM structure.
2289 */
2290VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2291{
2292 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2293}
2294
2295#ifdef IN_RC
2296
2297/**
2298 * Lazily sync in the FPU/XMM state.
2299 *
2300 * @returns VBox status code.
2301 * @param pVCpu The cross context virtual CPU structure.
2302 */
2303VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2304{
2305 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2306}
2307
2308#endif /* !IN_RC */
2309
2310/**
2311 * Checks if we activated the FPU/XMM state of the guest OS.
2312 *
2313 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
2314 * time we'll be executing guest code, so it may return true for 64-on-32 when
2315 * we still haven't actually loaded the FPU status, just scheduled it to be
2316 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
2317 *
2318 * @returns true / false.
2319 * @param pVCpu The cross context virtual CPU structure.
2320 */
2321VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2322{
2323 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
2324}
2325
2326
2327/**
2328 * Checks if we've really loaded the FPU/XMM state of the guest OS.
2329 *
2330 * @returns true / false.
2331 * @param pVCpu The cross context virtual CPU structure.
2332 */
2333VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
2334{
2335 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
2336}
2337
2338
2339/**
2340 * Checks if we saved the FPU/XMM state of the host OS.
2341 *
2342 * @returns true / false.
2343 * @param pVCpu The cross context virtual CPU structure.
2344 */
2345VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
2346{
2347 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
2348}
2349
2350
2351/**
2352 * Checks if the guest debug state is active.
2353 *
2354 * @returns boolean
2355 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2356 */
2357VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2358{
2359 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2360}
2361
2362
2363/**
2364 * Checks if the guest debug state is to be made active during the world-switch
2365 * (currently only used for the 32->64 switcher case).
2366 *
2367 * @returns boolean
2368 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2369 */
2370VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2371{
2372 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2373}
2374
2375
2376/**
2377 * Checks if the hyper debug state is active.
2378 *
2379 * @returns boolean
2380 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2381 */
2382VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2383{
2384 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2385}
2386
2387
2388/**
2389 * Checks if the hyper debug state is to be made active during the world-switch
2390 * (currently only used for the 32->64 switcher case).
2391 *
2392 * @returns boolean
2393 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2394 */
2395VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2396{
2397 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2398}
2399
2400
2401/**
2402 * Mark the guest's debug state as inactive.
2403 *
2404 * @returns boolean
2405 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2406 * @todo This API doesn't make sense any more.
2407 */
2408VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2409{
2410 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2411 NOREF(pVCpu);
2412}
2413
2414
2415/**
2416 * Get the current privilege level of the guest.
2417 *
2418 * @returns CPL
2419 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2420 */
2421VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2422{
2423 /*
2424 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2425 *
2426 * Note! We used to check CS.DPL here, assuming it was always equal to
2427 * CPL even if a conforming segment was loaded. But this turned out to
2428 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2429 * during install after a far call to ring 2 with VT-x. Then on newer
2430 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2431 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2432 *
2433 * So, forget CS.DPL, always use SS.DPL.
2434 *
2435 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2436 * isn't necessarily equal if the segment is conforming.
2437 * See section 4.11.1 in the AMD manual.
2438 *
2439 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2440 * right after real->prot mode switch and when in V8086 mode? That
2441 * section says the RPL specified in a direct transfere (call, jmp,
2442 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2443 * it would be impossible for an exception handle or the iret
2444 * instruction to figure out whether SS:ESP are part of the frame
2445 * or not. VBox or qemu bug must've lead to this misconception.
2446 *
2447 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2448 * selector into SS with an RPL other than the CPL when CPL != 3 and
2449 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2450 * RPL = CPL. Weird.
2451 */
2452 uint32_t uCpl;
2453 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2454 {
2455 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2456 {
2457 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2458 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2459 else
2460 {
2461 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2462#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2463# ifdef VBOX_WITH_RAW_RING1
2464 if (pVCpu->cpum.s.fRawEntered)
2465 {
2466 if ( uCpl == 2
2467 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2468 uCpl = 1;
2469 else if (uCpl == 1)
2470 uCpl = 0;
2471 }
2472 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2473# else
2474 if (uCpl == 1)
2475 uCpl = 0;
2476# endif
2477#endif
2478 }
2479 }
2480 else
2481 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2482 }
2483 else
2484 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2485 return uCpl;
2486}
2487
2488
2489/**
2490 * Gets the current guest CPU mode.
2491 *
2492 * If paging mode is what you need, check out PGMGetGuestMode().
2493 *
2494 * @returns The CPU mode.
2495 * @param pVCpu The cross context virtual CPU structure.
2496 */
2497VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2498{
2499 CPUMMODE enmMode;
2500 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2501 enmMode = CPUMMODE_REAL;
2502 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2503 enmMode = CPUMMODE_PROTECTED;
2504 else
2505 enmMode = CPUMMODE_LONG;
2506
2507 return enmMode;
2508}
2509
2510
2511/**
2512 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2513 *
2514 * @returns 16, 32 or 64.
2515 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2516 */
2517VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2518{
2519 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2520 return 16;
2521
2522 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2523 {
2524 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2525 return 16;
2526 }
2527
2528 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2529 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2530 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2531 return 64;
2532
2533 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2534 return 32;
2535
2536 return 16;
2537}
2538
2539
2540VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2541{
2542 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2543 return DISCPUMODE_16BIT;
2544
2545 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2546 {
2547 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2548 return DISCPUMODE_16BIT;
2549 }
2550
2551 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2552 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2553 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2554 return DISCPUMODE_64BIT;
2555
2556 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2557 return DISCPUMODE_32BIT;
2558
2559 return DISCPUMODE_16BIT;
2560}
2561
2562
2563/**
2564 * Gets the guest MXCSR_MASK value.
2565 *
2566 * This does not access the x87 state, but the value we determined at VM
2567 * initialization.
2568 *
2569 * @returns MXCSR mask.
2570 * @param pVM The cross context VM structure.
2571 */
2572VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
2573{
2574 return pVM->cpum.s.GuestInfo.fMxCsrMask;
2575}
2576
2577
2578/**
2579 * Checks whether the SVM nested-guest is in a state to receive physical (APIC)
2580 * interrupts.
2581 *
2582 * @returns VBox status code.
2583 * @retval true if it's ready, false otherwise.
2584 *
2585 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2586 * @param pCtx The guest-CPU context.
2587 *
2588 * @sa hmR0SvmCanNstGstTakePhysIntr.
2589 */
2590VMM_INT_DECL(bool) CPUMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)
2591{
2592 /** @todo Optimization: Avoid this function call and use a pointer to the
2593 * relevant eflags instead (setup during VMRUN instruction emulation). */
2594#ifdef IN_RC
2595 RT_NOREF2(pVCpu, pCtx);
2596 AssertReleaseFailedReturn(false);
2597#else
2598 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2599 Assert(pCtx->hwvirt.fGif);
2600
2601 X86EFLAGS fEFlags;
2602 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2603 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2604 else
2605 fEFlags.u = pCtx->eflags.u;
2606
2607 return fEFlags.Bits.u1IF;
2608#endif
2609}
2610
2611
2612/**
2613 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2614 * for injection by VMRUN instruction) interrupts.
2615 *
2616 * @returns VBox status code.
2617 * @retval true if it's ready, false otherwise.
2618 *
2619 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2620 * @param pCtx The guest-CPU context.
2621 */
2622VMM_INT_DECL(bool) CPUMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)
2623{
2624#ifdef IN_RC
2625 RT_NOREF2(pVCpu, pCtx);
2626 AssertReleaseFailedReturn(false);
2627#else
2628 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2629 Assert(pCtx->hwvirt.fGif);
2630
2631 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2632 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2633 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2634 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2635 return false;
2636
2637 X86EFLAGS fEFlags;
2638 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2639 fEFlags.u = pCtx->eflags.u;
2640 else
2641 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2642
2643 return fEFlags.Bits.u1IF;
2644#endif
2645}
2646
2647
2648/**
2649 * Gets the pending SVM nested-guest interrupt.
2650 *
2651 * @returns The nested-guest interrupt to inject.
2652 * @param pCtx The guest-CPU context.
2653 */
2654VMM_INT_DECL(uint8_t) CPUMGetSvmNstGstInterrupt(PCCPUMCTX pCtx)
2655{
2656#ifdef IN_RC
2657 RT_NOREF(pCtx);
2658 AssertReleaseFailedReturn(0);
2659#else
2660 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2661 return pVmcbCtrl->IntCtrl.n.u8VIntrVector;
2662#endif
2663}
2664
2665
2666/**
2667 * Gets the SVM nested-guest virtual GIF.
2668 *
2669 * @returns The nested-guest virtual GIF.
2670 * @param pCtx The guest-CPU context.
2671 */
2672VMM_INT_DECL(bool) CPUMGetSvmNstGstVGif(PCCPUMCTX pCtx)
2673{
2674#ifdef IN_RC
2675 RT_NOREF(pCtx);
2676 AssertReleaseFailedReturn(false);
2677#else
2678 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2679 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2680 if (pVmcbIntCtrl->n.u1VGifEnable)
2681 return pVmcbIntCtrl->n.u1VGif;
2682 return true;
2683#endif
2684}
2685
2686
2687/**
2688 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2689 *
2690 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2691 * @param pCtx The guest-CPU context.
2692 */
2693VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx)
2694{
2695 /*
2696 * Reload the guest's "host state".
2697 */
2698 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2699 pCtx->es = pHostState->es;
2700 pCtx->cs = pHostState->cs;
2701 pCtx->ss = pHostState->ss;
2702 pCtx->ds = pHostState->ds;
2703 pCtx->gdtr = pHostState->gdtr;
2704 pCtx->idtr = pHostState->idtr;
2705 CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2706 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2707 pCtx->cr3 = pHostState->uCr3;
2708 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2709 pCtx->rflags = pHostState->rflags;
2710 pCtx->rflags.Bits.u1VM = 0;
2711 pCtx->rip = pHostState->uRip;
2712 pCtx->rsp = pHostState->uRsp;
2713 pCtx->rax = pHostState->uRax;
2714 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2715 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2716 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2717
2718 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2719 * raise \#GP(0) in the guest. */
2720
2721 /** @todo check the loaded host-state for consistency. Figure out what
2722 * exactly this involves? */
2723}
2724
2725
2726/**
2727 * Saves the host-state to the host-state save area as part of a VMRUN.
2728 *
2729 * @param pCtx The guest-CPU context.
2730 * @param cbInstr The length of the VMRUN instruction in bytes.
2731 */
2732VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2733{
2734 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2735 pHostState->es = pCtx->es;
2736 pHostState->cs = pCtx->cs;
2737 pHostState->ss = pCtx->ss;
2738 pHostState->ds = pCtx->ds;
2739 pHostState->gdtr = pCtx->gdtr;
2740 pHostState->idtr = pCtx->idtr;
2741 pHostState->uEferMsr = pCtx->msrEFER;
2742 pHostState->uCr0 = pCtx->cr0;
2743 pHostState->uCr3 = pCtx->cr3;
2744 pHostState->uCr4 = pCtx->cr4;
2745 pHostState->rflags = pCtx->rflags;
2746 pHostState->uRip = pCtx->rip + cbInstr;
2747 pHostState->uRsp = pCtx->rsp;
2748 pHostState->uRax = pCtx->rax;
2749}
2750
2751
2752/**
2753 * Applies the TSC offset of a nested-guest if any and returns the new TSC
2754 * value for the guest (or nested-guest).
2755 *
2756 * @returns The TSC offset after applying any nested-guest TSC offset.
2757 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2758 * @param uTicks The guest TSC.
2759 *
2760 * @sa HMSvmNstGstApplyTscOffset.
2761 */
2762VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks)
2763{
2764#ifndef IN_RC
2765 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2766 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2767 {
2768 if (!pCtx->hwvirt.svm.fHMCachedVmcb)
2769 {
2770 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2771 return uTicks + pVmcb->ctrl.u64TSCOffset;
2772 }
2773 return HMSvmNstGstApplyTscOffset(pVCpu, uTicks);
2774 }
2775
2776 /** @todo Intel. */
2777#else
2778 RT_NOREF(pVCpu);
2779#endif
2780 return uTicks;
2781}
2782
2783
2784/**
2785 * Used to dynamically imports state residing in NEM or HM.
2786 *
2787 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
2788 *
2789 * @returns VBox status code.
2790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2791 * @param fExtrnImport The fields to import.
2792 * @thread EMT(pVCpu)
2793 */
2794VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPU pVCpu, uint64_t fExtrnImport)
2795{
2796 VMCPU_ASSERT_EMT(pVCpu);
2797 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
2798 {
2799#ifndef IN_RC
2800 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
2801 {
2802 case CPUMCTX_EXTRN_KEEPER_NEM:
2803 {
2804 int rc = NEMImportStateOnDemand(pVCpu, &pVCpu->cpum.s.Guest, fExtrnImport);
2805 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2806 return rc;
2807 }
2808
2809 case CPUMCTX_EXTRN_KEEPER_HM: /** @todo make HM use CPUMCTX_EXTRN_XXX. */
2810 default:
2811 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
2812 }
2813#else
2814 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
2815#endif
2816 }
2817 return VINF_SUCCESS;
2818}
2819
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette