VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 12559

Last change on this file since 12559 was 11704, checked in by vboxsync, 16 years ago

Allow enabling and disabling of the PAT cpuid feature.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 50.4 KB
Line 
1/* $Id: CPUMAllRegs.cpp 11704 2008-08-27 14:52:09Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Gets and Sets.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38
39
40
41/** Disable stack frame pointer generation here. */
42#if defined(_MSC_VER) && !defined(DEBUG)
43# pragma optimize("y", off)
44#endif
45
46
47/**
48 * Sets or resets an alternative hypervisor context core.
49 *
50 * This is called when we get a hypervisor trap set switch the context
51 * core with the trap frame on the stack. It is called again to reset
52 * back to the default context core when resuming hypervisor execution.
53 *
54 * @param pVM The VM handle.
55 * @param pCtxCore Pointer to the alternative context core or NULL
56 * to go back to the default context core.
57 */
58CPUMDECL(void) CPUMHyperSetCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
59{
60 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVM->cpum.s.CTXALLSUFF(pHyperCore), pCtxCore));
61 if (!pCtxCore)
62 {
63 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
64 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
65 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
66 pVM->cpum.s.pHyperCoreGC = (RCPTRTYPE(PCPUMCTXCORE))VM_GUEST_ADDR(pVM, pCtxCore);
67 }
68 else
69 {
70 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
71 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
72 pVM->cpum.s.pHyperCoreGC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
73 }
74}
75
76
77/**
78 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
79 * This is only for reading in order to save a few calls.
80 *
81 * @param pVM Handle to the virtual machine.
82 */
83CPUMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVM pVM)
84{
85 return pVM->cpum.s.CTXALLSUFF(pHyperCore);
86}
87
88
89/**
90 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
91 *
92 * @returns VBox status code.
93 * @param pVM Handle to the virtual machine.
94 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
95 *
96 * @deprecated This will *not* (and has never) given the right picture of the
97 * hypervisor register state. With CPUMHyperSetCtxCore() this is
98 * getting much worse. So, use the individual functions for getting
99 * and esp. setting the hypervisor registers.
100 */
101CPUMDECL(int) CPUMQueryHyperCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
102{
103 *ppCtx = &pVM->cpum.s.Hyper;
104 return VINF_SUCCESS;
105}
106
107CPUMDECL(void) CPUMSetHyperGDTR(PVM pVM, uint32_t addr, uint16_t limit)
108{
109 pVM->cpum.s.Hyper.gdtr.cbGdt = limit;
110 pVM->cpum.s.Hyper.gdtr.pGdt = addr;
111 pVM->cpum.s.Hyper.gdtrPadding = 0;
112}
113
114CPUMDECL(void) CPUMSetHyperIDTR(PVM pVM, uint32_t addr, uint16_t limit)
115{
116 pVM->cpum.s.Hyper.idtr.cbIdt = limit;
117 pVM->cpum.s.Hyper.idtr.pIdt = addr;
118 pVM->cpum.s.Hyper.idtrPadding = 0;
119}
120
121CPUMDECL(void) CPUMSetHyperCR3(PVM pVM, uint32_t cr3)
122{
123 pVM->cpum.s.Hyper.cr3 = cr3;
124}
125
126CPUMDECL(void) CPUMSetHyperCS(PVM pVM, RTSEL SelCS)
127{
128 pVM->cpum.s.CTXALLSUFF(pHyperCore)->cs = SelCS;
129}
130
131CPUMDECL(void) CPUMSetHyperDS(PVM pVM, RTSEL SelDS)
132{
133 pVM->cpum.s.CTXALLSUFF(pHyperCore)->ds = SelDS;
134}
135
136CPUMDECL(void) CPUMSetHyperES(PVM pVM, RTSEL SelES)
137{
138 pVM->cpum.s.CTXALLSUFF(pHyperCore)->es = SelES;
139}
140
141CPUMDECL(void) CPUMSetHyperFS(PVM pVM, RTSEL SelFS)
142{
143 pVM->cpum.s.CTXALLSUFF(pHyperCore)->fs = SelFS;
144}
145
146CPUMDECL(void) CPUMSetHyperGS(PVM pVM, RTSEL SelGS)
147{
148 pVM->cpum.s.CTXALLSUFF(pHyperCore)->gs = SelGS;
149}
150
151CPUMDECL(void) CPUMSetHyperSS(PVM pVM, RTSEL SelSS)
152{
153 pVM->cpum.s.CTXALLSUFF(pHyperCore)->ss = SelSS;
154}
155
156CPUMDECL(void) CPUMSetHyperESP(PVM pVM, uint32_t u32ESP)
157{
158 pVM->cpum.s.CTXALLSUFF(pHyperCore)->esp = u32ESP;
159}
160
161CPUMDECL(int) CPUMSetHyperEFlags(PVM pVM, uint32_t Efl)
162{
163 pVM->cpum.s.CTXALLSUFF(pHyperCore)->eflags.u32 = Efl;
164 return VINF_SUCCESS;
165}
166
167CPUMDECL(void) CPUMSetHyperEIP(PVM pVM, uint32_t u32EIP)
168{
169 pVM->cpum.s.CTXALLSUFF(pHyperCore)->eip = u32EIP;
170}
171
172CPUMDECL(void) CPUMSetHyperTR(PVM pVM, RTSEL SelTR)
173{
174 pVM->cpum.s.Hyper.tr = SelTR;
175}
176
177CPUMDECL(void) CPUMSetHyperLDTR(PVM pVM, RTSEL SelLDTR)
178{
179 pVM->cpum.s.Hyper.ldtr = SelLDTR;
180}
181
182CPUMDECL(void) CPUMSetHyperDR0(PVM pVM, RTGCUINTREG uDr0)
183{
184 pVM->cpum.s.Hyper.dr0 = uDr0;
185 /** @todo in GC we must load it! */
186}
187
188CPUMDECL(void) CPUMSetHyperDR1(PVM pVM, RTGCUINTREG uDr1)
189{
190 pVM->cpum.s.Hyper.dr1 = uDr1;
191 /** @todo in GC we must load it! */
192}
193
194CPUMDECL(void) CPUMSetHyperDR2(PVM pVM, RTGCUINTREG uDr2)
195{
196 pVM->cpum.s.Hyper.dr2 = uDr2;
197 /** @todo in GC we must load it! */
198}
199
200CPUMDECL(void) CPUMSetHyperDR3(PVM pVM, RTGCUINTREG uDr3)
201{
202 pVM->cpum.s.Hyper.dr3 = uDr3;
203 /** @todo in GC we must load it! */
204}
205
206CPUMDECL(void) CPUMSetHyperDR6(PVM pVM, RTGCUINTREG uDr6)
207{
208 pVM->cpum.s.Hyper.dr6 = uDr6;
209 /** @todo in GC we must load it! */
210}
211
212CPUMDECL(void) CPUMSetHyperDR7(PVM pVM, RTGCUINTREG uDr7)
213{
214 pVM->cpum.s.Hyper.dr7 = uDr7;
215 /** @todo in GC we must load it! */
216}
217
218
219CPUMDECL(RTSEL) CPUMGetHyperCS(PVM pVM)
220{
221 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->cs;
222}
223
224CPUMDECL(RTSEL) CPUMGetHyperDS(PVM pVM)
225{
226 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ds;
227}
228
229CPUMDECL(RTSEL) CPUMGetHyperES(PVM pVM)
230{
231 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->es;
232}
233
234CPUMDECL(RTSEL) CPUMGetHyperFS(PVM pVM)
235{
236 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->fs;
237}
238
239CPUMDECL(RTSEL) CPUMGetHyperGS(PVM pVM)
240{
241 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->gs;
242}
243
244CPUMDECL(RTSEL) CPUMGetHyperSS(PVM pVM)
245{
246 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ss;
247}
248
249#if 0 /* these are not correct. */
250
251CPUMDECL(uint32_t) CPUMGetHyperCR0(PVM pVM)
252{
253 return pVM->cpum.s.Hyper.cr0;
254}
255
256CPUMDECL(uint32_t) CPUMGetHyperCR2(PVM pVM)
257{
258 return pVM->cpum.s.Hyper.cr2;
259}
260
261CPUMDECL(uint32_t) CPUMGetHyperCR3(PVM pVM)
262{
263 return pVM->cpum.s.Hyper.cr3;
264}
265
266CPUMDECL(uint32_t) CPUMGetHyperCR4(PVM pVM)
267{
268 return pVM->cpum.s.Hyper.cr4;
269}
270
271#endif /* not correct */
272
273CPUMDECL(uint32_t) CPUMGetHyperEAX(PVM pVM)
274{
275 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eax;
276}
277
278CPUMDECL(uint32_t) CPUMGetHyperEBX(PVM pVM)
279{
280 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ebx;
281}
282
283CPUMDECL(uint32_t) CPUMGetHyperECX(PVM pVM)
284{
285 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ecx;
286}
287
288CPUMDECL(uint32_t) CPUMGetHyperEDX(PVM pVM)
289{
290 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->edx;
291}
292
293CPUMDECL(uint32_t) CPUMGetHyperESI(PVM pVM)
294{
295 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->esi;
296}
297
298CPUMDECL(uint32_t) CPUMGetHyperEDI(PVM pVM)
299{
300 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->edi;
301}
302
303CPUMDECL(uint32_t) CPUMGetHyperEBP(PVM pVM)
304{
305 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ebp;
306}
307
308CPUMDECL(uint32_t) CPUMGetHyperESP(PVM pVM)
309{
310 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->esp;
311}
312
313CPUMDECL(uint32_t) CPUMGetHyperEFlags(PVM pVM)
314{
315 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eflags.u32;
316}
317
318CPUMDECL(uint32_t) CPUMGetHyperEIP(PVM pVM)
319{
320 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eip;
321}
322
323CPUMDECL(uint64_t) CPUMGetHyperRIP(PVM pVM)
324{
325 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->rip;
326}
327
328CPUMDECL(uint32_t) CPUMGetHyperIDTR(PVM pVM, uint16_t *pcbLimit)
329{
330 if (pcbLimit)
331 *pcbLimit = pVM->cpum.s.Hyper.idtr.cbIdt;
332 return pVM->cpum.s.Hyper.idtr.pIdt;
333}
334
335CPUMDECL(uint32_t) CPUMGetHyperGDTR(PVM pVM, uint16_t *pcbLimit)
336{
337 if (pcbLimit)
338 *pcbLimit = pVM->cpum.s.Hyper.gdtr.cbGdt;
339 return pVM->cpum.s.Hyper.gdtr.pGdt;
340}
341
342CPUMDECL(RTSEL) CPUMGetHyperLDTR(PVM pVM)
343{
344 return pVM->cpum.s.Hyper.ldtr;
345}
346
347CPUMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVM pVM)
348{
349 return pVM->cpum.s.Hyper.dr0;
350}
351
352CPUMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVM pVM)
353{
354 return pVM->cpum.s.Hyper.dr1;
355}
356
357CPUMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVM pVM)
358{
359 return pVM->cpum.s.Hyper.dr2;
360}
361
362CPUMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVM pVM)
363{
364 return pVM->cpum.s.Hyper.dr3;
365}
366
367CPUMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVM pVM)
368{
369 return pVM->cpum.s.Hyper.dr6;
370}
371
372CPUMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVM pVM)
373{
374 return pVM->cpum.s.Hyper.dr7;
375}
376
377
378/**
379 * Gets the pointer to the internal CPUMCTXCORE structure.
380 * This is only for reading in order to save a few calls.
381 *
382 * @param pVM Handle to the virtual machine.
383 */
384CPUMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVM pVM)
385{
386 return CPUMCTX2CORE(&pVM->cpum.s.Guest);
387}
388
389
390/**
391 * Sets the guest context core registers.
392 *
393 * @param pVM Handle to the virtual machine.
394 * @param pCtxCore The new context core values.
395 */
396CPUMDECL(void) CPUMSetGuestCtxCore(PVM pVM, PCCPUMCTXCORE pCtxCore)
397{
398 /** @todo #1410 requires selectors to be checked. */
399
400 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVM->cpum.s.Guest);
401 *pCtxCoreDst = *pCtxCore;
402
403 /* Mask away invalid parts of the cpu context. */
404 if (!CPUMIsGuestInLongMode(pVM))
405 {
406 uint64_t u64Mask = UINT64_C(0xffffffff);
407
408 pCtxCoreDst->rip &= u64Mask;
409 pCtxCoreDst->rax &= u64Mask;
410 pCtxCoreDst->rbx &= u64Mask;
411 pCtxCoreDst->rcx &= u64Mask;
412 pCtxCoreDst->rdx &= u64Mask;
413 pCtxCoreDst->rsi &= u64Mask;
414 pCtxCoreDst->rdi &= u64Mask;
415 pCtxCoreDst->rbp &= u64Mask;
416 pCtxCoreDst->rsp &= u64Mask;
417 pCtxCoreDst->rflags.u &= u64Mask;
418
419 pCtxCoreDst->r8 = 0;
420 pCtxCoreDst->r9 = 0;
421 pCtxCoreDst->r10 = 0;
422 pCtxCoreDst->r11 = 0;
423 pCtxCoreDst->r12 = 0;
424 pCtxCoreDst->r13 = 0;
425 pCtxCoreDst->r14 = 0;
426 pCtxCoreDst->r15 = 0;
427 }
428}
429
430
431/**
432 * Queries the pointer to the internal CPUMCTX structure
433 *
434 * @returns VBox status code.
435 * @param pVM Handle to the virtual machine.
436 * @param ppCtx Receives the CPUMCTX pointer when successful.
437 */
438CPUMDECL(int) CPUMQueryGuestCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
439{
440 *ppCtx = &pVM->cpum.s.Guest;
441 return VINF_SUCCESS;
442}
443
444
445CPUMDECL(int) CPUMSetGuestGDTR(PVM pVM, uint32_t addr, uint16_t limit)
446{
447 pVM->cpum.s.Guest.gdtr.cbGdt = limit;
448 pVM->cpum.s.Guest.gdtr.pGdt = addr;
449 pVM->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
450 return VINF_SUCCESS;
451}
452
453CPUMDECL(int) CPUMSetGuestIDTR(PVM pVM, uint32_t addr, uint16_t limit)
454{
455 pVM->cpum.s.Guest.idtr.cbIdt = limit;
456 pVM->cpum.s.Guest.idtr.pIdt = addr;
457 pVM->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
458 return VINF_SUCCESS;
459}
460
461CPUMDECL(int) CPUMSetGuestTR(PVM pVM, uint16_t tr)
462{
463 pVM->cpum.s.Guest.tr = tr;
464 pVM->cpum.s.fChanged |= CPUM_CHANGED_TR;
465 return VINF_SUCCESS;
466}
467
468CPUMDECL(int) CPUMSetGuestLDTR(PVM pVM, uint16_t ldtr)
469{
470 pVM->cpum.s.Guest.ldtr = ldtr;
471 pVM->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
472 return VINF_SUCCESS;
473}
474
475
476/**
477 * Set the guest CR0.
478 *
479 * When called in GC, the hyper CR0 may be updated if that is
480 * required. The caller only has to take special action if AM,
481 * WP, PG or PE changes.
482 *
483 * @returns VINF_SUCCESS (consider it void).
484 * @param pVM Pointer to the shared VM structure.
485 * @param cr0 The new CR0 value.
486 */
487CPUMDECL(int) CPUMSetGuestCR0(PVM pVM, uint64_t cr0)
488{
489#ifdef IN_GC
490 /*
491 * Check if we need to change hypervisor CR0 because
492 * of math stuff.
493 */
494 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
495 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
496 {
497 if (!(pVM->cpum.s.fUseFlags & CPUM_USED_FPU))
498 {
499 /*
500 * We haven't saved the host FPU state yet, so TS and MT are both set
501 * and EM should be reflecting the guest EM (it always does this).
502 */
503 if ((cr0 & X86_CR0_EM) != (pVM->cpum.s.Guest.cr0 & X86_CR0_EM))
504 {
505 uint32_t HyperCR0 = ASMGetCR0();
506 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
507 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
508 HyperCR0 &= ~X86_CR0_EM;
509 HyperCR0 |= cr0 & X86_CR0_EM;
510 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
511 ASMSetCR0(HyperCR0);
512 }
513#ifdef VBOX_STRICT
514 else
515 {
516 uint32_t HyperCR0 = ASMGetCR0();
517 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
518 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
519 }
520#endif
521 }
522 else
523 {
524 /*
525 * Already saved the state, so we're just mirroring
526 * the guest flags.
527 */
528 uint32_t HyperCR0 = ASMGetCR0();
529 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
530 == (pVM->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
531 ("%#x %#x\n", HyperCR0, pVM->cpum.s.Guest.cr0));
532 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
533 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
534 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
535 ASMSetCR0(HyperCR0);
536 }
537 }
538#endif
539
540 /*
541 * Check for changes causing TLB flushes (for REM).
542 * The caller is responsible for calling PGM when appropriate.
543 */
544 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
545 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
546 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
547 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR0;
548
549 pVM->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
550 return VINF_SUCCESS;
551}
552
553CPUMDECL(int) CPUMSetGuestCR2(PVM pVM, uint64_t cr2)
554{
555 pVM->cpum.s.Guest.cr2 = cr2;
556 return VINF_SUCCESS;
557}
558
559CPUMDECL(int) CPUMSetGuestCR3(PVM pVM, uint64_t cr3)
560{
561 pVM->cpum.s.Guest.cr3 = cr3;
562 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR3;
563 return VINF_SUCCESS;
564}
565
566CPUMDECL(int) CPUMSetGuestCR4(PVM pVM, uint64_t cr4)
567{
568 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
569 != (pVM->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
570 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
571 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR4;
572 if (!CPUMSupportsFXSR(pVM))
573 cr4 &= ~X86_CR4_OSFSXR;
574 pVM->cpum.s.Guest.cr4 = cr4;
575 return VINF_SUCCESS;
576}
577
578CPUMDECL(int) CPUMSetGuestEFlags(PVM pVM, uint32_t eflags)
579{
580 pVM->cpum.s.Guest.eflags.u32 = eflags;
581 return VINF_SUCCESS;
582}
583
584CPUMDECL(int) CPUMSetGuestEIP(PVM pVM, uint32_t eip)
585{
586 pVM->cpum.s.Guest.eip = eip;
587 return VINF_SUCCESS;
588}
589
590CPUMDECL(int) CPUMSetGuestEAX(PVM pVM, uint32_t eax)
591{
592 pVM->cpum.s.Guest.eax = eax;
593 return VINF_SUCCESS;
594}
595
596CPUMDECL(int) CPUMSetGuestEBX(PVM pVM, uint32_t ebx)
597{
598 pVM->cpum.s.Guest.ebx = ebx;
599 return VINF_SUCCESS;
600}
601
602CPUMDECL(int) CPUMSetGuestECX(PVM pVM, uint32_t ecx)
603{
604 pVM->cpum.s.Guest.ecx = ecx;
605 return VINF_SUCCESS;
606}
607
608CPUMDECL(int) CPUMSetGuestEDX(PVM pVM, uint32_t edx)
609{
610 pVM->cpum.s.Guest.edx = edx;
611 return VINF_SUCCESS;
612}
613
614CPUMDECL(int) CPUMSetGuestESP(PVM pVM, uint32_t esp)
615{
616 pVM->cpum.s.Guest.esp = esp;
617 return VINF_SUCCESS;
618}
619
620CPUMDECL(int) CPUMSetGuestEBP(PVM pVM, uint32_t ebp)
621{
622 pVM->cpum.s.Guest.ebp = ebp;
623 return VINF_SUCCESS;
624}
625
626CPUMDECL(int) CPUMSetGuestESI(PVM pVM, uint32_t esi)
627{
628 pVM->cpum.s.Guest.esi = esi;
629 return VINF_SUCCESS;
630}
631
632CPUMDECL(int) CPUMSetGuestEDI(PVM pVM, uint32_t edi)
633{
634 pVM->cpum.s.Guest.edi = edi;
635 return VINF_SUCCESS;
636}
637
638CPUMDECL(int) CPUMSetGuestSS(PVM pVM, uint16_t ss)
639{
640 pVM->cpum.s.Guest.ss = ss;
641 return VINF_SUCCESS;
642}
643
644CPUMDECL(int) CPUMSetGuestCS(PVM pVM, uint16_t cs)
645{
646 pVM->cpum.s.Guest.cs = cs;
647 return VINF_SUCCESS;
648}
649
650CPUMDECL(int) CPUMSetGuestDS(PVM pVM, uint16_t ds)
651{
652 pVM->cpum.s.Guest.ds = ds;
653 return VINF_SUCCESS;
654}
655
656CPUMDECL(int) CPUMSetGuestES(PVM pVM, uint16_t es)
657{
658 pVM->cpum.s.Guest.es = es;
659 return VINF_SUCCESS;
660}
661
662CPUMDECL(int) CPUMSetGuestFS(PVM pVM, uint16_t fs)
663{
664 pVM->cpum.s.Guest.fs = fs;
665 return VINF_SUCCESS;
666}
667
668CPUMDECL(int) CPUMSetGuestGS(PVM pVM, uint16_t gs)
669{
670 pVM->cpum.s.Guest.gs = gs;
671 return VINF_SUCCESS;
672}
673
674CPUMDECL(void) CPUMSetGuestEFER(PVM pVM, uint64_t val)
675{
676 pVM->cpum.s.Guest.msrEFER = val;
677}
678
679CPUMDECL(uint64_t) CPUMGetGuestMsr(PVM pVM, unsigned idMsr)
680{
681 uint64_t val = 0;
682
683 switch (idMsr)
684 {
685 case MSR_IA32_CR_PAT:
686 val = pVM->cpum.s.Guest.msrPAT;
687 break;
688
689 case MSR_IA32_SYSENTER_CS:
690 val = pVM->cpum.s.Guest.SysEnter.cs;
691 break;
692
693 case MSR_IA32_SYSENTER_EIP:
694 val = pVM->cpum.s.Guest.SysEnter.eip;
695 break;
696
697 case MSR_IA32_SYSENTER_ESP:
698 val = pVM->cpum.s.Guest.SysEnter.esp;
699 break;
700
701 case MSR_K6_EFER:
702 val = pVM->cpum.s.Guest.msrEFER;
703 break;
704
705 case MSR_K8_SF_MASK:
706 val = pVM->cpum.s.Guest.msrSFMASK;
707 break;
708
709 case MSR_K6_STAR:
710 val = pVM->cpum.s.Guest.msrSTAR;
711 break;
712
713 case MSR_K8_LSTAR:
714 val = pVM->cpum.s.Guest.msrLSTAR;
715 break;
716
717 case MSR_K8_CSTAR:
718 val = pVM->cpum.s.Guest.msrCSTAR;
719 break;
720
721 case MSR_K8_KERNEL_GS_BASE:
722 val = pVM->cpum.s.Guest.msrKERNELGSBASE;
723 break;
724
725 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
726 default:
727 AssertFailed();
728 break;
729 }
730 return val;
731}
732
733CPUMDECL(RTGCPTR) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit)
734{
735 if (pcbLimit)
736 *pcbLimit = pVM->cpum.s.Guest.idtr.cbIdt;
737 return pVM->cpum.s.Guest.idtr.pIdt;
738}
739
740CPUMDECL(RTSEL) CPUMGetGuestTR(PVM pVM)
741{
742 return pVM->cpum.s.Guest.tr;
743}
744
745CPUMDECL(RTSEL) CPUMGetGuestCS(PVM pVM)
746{
747 return pVM->cpum.s.Guest.cs;
748}
749
750CPUMDECL(RTSEL) CPUMGetGuestDS(PVM pVM)
751{
752 return pVM->cpum.s.Guest.ds;
753}
754
755CPUMDECL(RTSEL) CPUMGetGuestES(PVM pVM)
756{
757 return pVM->cpum.s.Guest.es;
758}
759
760CPUMDECL(RTSEL) CPUMGetGuestFS(PVM pVM)
761{
762 return pVM->cpum.s.Guest.fs;
763}
764
765CPUMDECL(RTSEL) CPUMGetGuestGS(PVM pVM)
766{
767 return pVM->cpum.s.Guest.gs;
768}
769
770CPUMDECL(RTSEL) CPUMGetGuestSS(PVM pVM)
771{
772 return pVM->cpum.s.Guest.ss;
773}
774
775CPUMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM)
776{
777 return pVM->cpum.s.Guest.ldtr;
778}
779
780CPUMDECL(uint64_t) CPUMGetGuestCR0(PVM pVM)
781{
782 return pVM->cpum.s.Guest.cr0;
783}
784
785CPUMDECL(uint64_t) CPUMGetGuestCR2(PVM pVM)
786{
787 return pVM->cpum.s.Guest.cr2;
788}
789
790CPUMDECL(uint64_t) CPUMGetGuestCR3(PVM pVM)
791{
792 return pVM->cpum.s.Guest.cr3;
793}
794
795CPUMDECL(uint64_t) CPUMGetGuestCR4(PVM pVM)
796{
797 return pVM->cpum.s.Guest.cr4;
798}
799
800CPUMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR)
801{
802 *pGDTR = pVM->cpum.s.Guest.gdtr;
803}
804
805CPUMDECL(uint32_t) CPUMGetGuestEIP(PVM pVM)
806{
807 return pVM->cpum.s.Guest.eip;
808}
809
810CPUMDECL(uint64_t) CPUMGetGuestRIP(PVM pVM)
811{
812 return pVM->cpum.s.Guest.rip;
813}
814
815CPUMDECL(uint32_t) CPUMGetGuestEAX(PVM pVM)
816{
817 return pVM->cpum.s.Guest.eax;
818}
819
820CPUMDECL(uint32_t) CPUMGetGuestEBX(PVM pVM)
821{
822 return pVM->cpum.s.Guest.ebx;
823}
824
825CPUMDECL(uint32_t) CPUMGetGuestECX(PVM pVM)
826{
827 return pVM->cpum.s.Guest.ecx;
828}
829
830CPUMDECL(uint32_t) CPUMGetGuestEDX(PVM pVM)
831{
832 return pVM->cpum.s.Guest.edx;
833}
834
835CPUMDECL(uint32_t) CPUMGetGuestESI(PVM pVM)
836{
837 return pVM->cpum.s.Guest.esi;
838}
839
840CPUMDECL(uint32_t) CPUMGetGuestEDI(PVM pVM)
841{
842 return pVM->cpum.s.Guest.edi;
843}
844
845CPUMDECL(uint32_t) CPUMGetGuestESP(PVM pVM)
846{
847 return pVM->cpum.s.Guest.esp;
848}
849
850CPUMDECL(uint32_t) CPUMGetGuestEBP(PVM pVM)
851{
852 return pVM->cpum.s.Guest.ebp;
853}
854
855CPUMDECL(uint32_t) CPUMGetGuestEFlags(PVM pVM)
856{
857 return pVM->cpum.s.Guest.eflags.u32;
858}
859
860CPUMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM)
861{
862 return &pVM->cpum.s.Guest.trHid;
863}
864
865//@todo: crx should be an array
866CPUMDECL(int) CPUMGetGuestCRx(PVM pVM, unsigned iReg, uint64_t *pValue)
867{
868 switch (iReg)
869 {
870 case USE_REG_CR0:
871 *pValue = pVM->cpum.s.Guest.cr0;
872 break;
873 case USE_REG_CR2:
874 *pValue = pVM->cpum.s.Guest.cr2;
875 break;
876 case USE_REG_CR3:
877 *pValue = pVM->cpum.s.Guest.cr3;
878 break;
879 case USE_REG_CR4:
880 *pValue = pVM->cpum.s.Guest.cr4;
881 break;
882 default:
883 return VERR_INVALID_PARAMETER;
884 }
885 return VINF_SUCCESS;
886}
887
888CPUMDECL(uint64_t) CPUMGetGuestDR0(PVM pVM)
889{
890 return pVM->cpum.s.Guest.dr0;
891}
892
893CPUMDECL(uint64_t) CPUMGetGuestDR1(PVM pVM)
894{
895 return pVM->cpum.s.Guest.dr1;
896}
897
898CPUMDECL(uint64_t) CPUMGetGuestDR2(PVM pVM)
899{
900 return pVM->cpum.s.Guest.dr2;
901}
902
903CPUMDECL(uint64_t) CPUMGetGuestDR3(PVM pVM)
904{
905 return pVM->cpum.s.Guest.dr3;
906}
907
908CPUMDECL(uint64_t) CPUMGetGuestDR6(PVM pVM)
909{
910 return pVM->cpum.s.Guest.dr6;
911}
912
913CPUMDECL(uint64_t) CPUMGetGuestDR7(PVM pVM)
914{
915 return pVM->cpum.s.Guest.dr7;
916}
917
918/** @todo drx should be an array */
919CPUMDECL(int) CPUMGetGuestDRx(PVM pVM, uint32_t iReg, uint64_t *pValue)
920{
921 switch (iReg)
922 {
923 case USE_REG_DR0:
924 *pValue = pVM->cpum.s.Guest.dr0;
925 break;
926 case USE_REG_DR1:
927 *pValue = pVM->cpum.s.Guest.dr1;
928 break;
929 case USE_REG_DR2:
930 *pValue = pVM->cpum.s.Guest.dr2;
931 break;
932 case USE_REG_DR3:
933 *pValue = pVM->cpum.s.Guest.dr3;
934 break;
935 case USE_REG_DR4:
936 case USE_REG_DR6:
937 *pValue = pVM->cpum.s.Guest.dr6;
938 break;
939 case USE_REG_DR5:
940 case USE_REG_DR7:
941 *pValue = pVM->cpum.s.Guest.dr7;
942 break;
943
944 default:
945 return VERR_INVALID_PARAMETER;
946 }
947 return VINF_SUCCESS;
948}
949
950CPUMDECL(uint64_t) CPUMGetGuestEFER(PVM pVM)
951{
952 return pVM->cpum.s.Guest.msrEFER;
953}
954
955/**
956 * Gets a CpuId leaf.
957 *
958 * @param pVM The VM handle.
959 * @param iLeaf The CPUID leaf to get.
960 * @param pEax Where to store the EAX value.
961 * @param pEbx Where to store the EBX value.
962 * @param pEcx Where to store the ECX value.
963 * @param pEdx Where to store the EDX value.
964 */
965CPUMDECL(void) CPUMGetGuestCpuId(PVM pVM, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
966{
967 PCCPUMCPUID pCpuId;
968 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
969 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
970 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
971 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
972 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
973 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
974 else
975 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
976
977 *pEax = pCpuId->eax;
978 *pEbx = pCpuId->ebx;
979 *pEcx = pCpuId->ecx;
980 *pEdx = pCpuId->edx;
981 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
982}
983
984/**
985 * Gets a pointer to the array of standard CPUID leafs.
986 *
987 * CPUMGetGuestCpuIdStdMax() give the size of the array.
988 *
989 * @returns Pointer to the standard CPUID leafs (read-only).
990 * @param pVM The VM handle.
991 * @remark Intended for PATM.
992 */
993CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdStdGCPtr(PVM pVM)
994{
995 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
996}
997
998/**
999 * Gets a pointer to the array of extended CPUID leafs.
1000 *
1001 * CPUMGetGuestCpuIdExtMax() give the size of the array.
1002 *
1003 * @returns Pointer to the extended CPUID leafs (read-only).
1004 * @param pVM The VM handle.
1005 * @remark Intended for PATM.
1006 */
1007CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtGCPtr(PVM pVM)
1008{
1009 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
1010}
1011
1012/**
1013 * Gets a pointer to the array of centaur CPUID leafs.
1014 *
1015 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
1016 *
1017 * @returns Pointer to the centaur CPUID leafs (read-only).
1018 * @param pVM The VM handle.
1019 * @remark Intended for PATM.
1020 */
1021CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdCentaurGCPtr(PVM pVM)
1022{
1023 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
1024}
1025
1026/**
1027 * Gets a pointer to the default CPUID leaf.
1028 *
1029 * @returns Pointer to the default CPUID leaf (read-only).
1030 * @param pVM The VM handle.
1031 * @remark Intended for PATM.
1032 */
1033CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefGCPtr(PVM pVM)
1034{
1035 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
1036}
1037
1038/**
1039 * Gets a number of standard CPUID leafs.
1040 *
1041 * @returns Number of leafs.
1042 * @param pVM The VM handle.
1043 * @remark Intended for PATM.
1044 */
1045CPUMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1046{
1047 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1048}
1049
1050/**
1051 * Gets a number of extended CPUID leafs.
1052 *
1053 * @returns Number of leafs.
1054 * @param pVM The VM handle.
1055 * @remark Intended for PATM.
1056 */
1057CPUMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1058{
1059 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1060}
1061
1062/**
1063 * Gets a number of centaur CPUID leafs.
1064 *
1065 * @returns Number of leafs.
1066 * @param pVM The VM handle.
1067 * @remark Intended for PATM.
1068 */
1069CPUMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1070{
1071 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1072}
1073
1074/**
1075 * Sets a CPUID feature bit.
1076 *
1077 * @param pVM The VM Handle.
1078 * @param enmFeature The feature to set.
1079 */
1080CPUMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1081{
1082 switch (enmFeature)
1083 {
1084 /*
1085 * Set the APIC bit in both feature masks.
1086 */
1087 case CPUMCPUIDFEATURE_APIC:
1088 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1089 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1090 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1091 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1092 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1093 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1094 break;
1095
1096 /*
1097 * Set the sysenter/sysexit bit in the standard feature mask.
1098 * Assumes the caller knows what it's doing! (host must support these)
1099 */
1100 case CPUMCPUIDFEATURE_SEP:
1101 {
1102 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1103 {
1104 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1105 return;
1106 }
1107
1108 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1109 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1110 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1111 break;
1112 }
1113
1114 /*
1115 * Set the syscall/sysret bit in the extended feature mask.
1116 * Assumes the caller knows what it's doing! (host must support these)
1117 */
1118 case CPUMCPUIDFEATURE_SYSCALL:
1119 {
1120 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1121 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1122 {
1123 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1124 return;
1125 }
1126 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1127 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1128 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1129 break;
1130 }
1131
1132 /*
1133 * Set the PAE bit in both feature masks.
1134 * Assumes the caller knows what it's doing! (host must support these)
1135 */
1136 case CPUMCPUIDFEATURE_PAE:
1137 {
1138 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1139 {
1140 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1141 return;
1142 }
1143
1144 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1145 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1146 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1147 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1148 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1149 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1150 break;
1151 }
1152
1153 /*
1154 * Set the LONG MODE bit in the extended feature mask.
1155 * Assumes the caller knows what it's doing! (host must support these)
1156 */
1157 case CPUMCPUIDFEATURE_LONG_MODE:
1158 {
1159 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1160 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1161 {
1162 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1163 return;
1164 }
1165
1166 /* Valid for both Intel and AMD. */
1167 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1168 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1169 break;
1170 }
1171
1172 /*
1173 * Set the NXE bit in the extended feature mask.
1174 * Assumes the caller knows what it's doing! (host must support these)
1175 */
1176 case CPUMCPUIDFEATURE_NXE:
1177 {
1178 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1179 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1180 {
1181 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1182 return;
1183 }
1184
1185 /* Valid for both Intel and AMD. */
1186 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1187 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1188 break;
1189 }
1190
1191 case CPUMCPUIDFEATURE_LAHF:
1192 {
1193 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1194 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1195 {
1196 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1197 return;
1198 }
1199
1200 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1201 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1202 break;
1203 }
1204
1205 case CPUMCPUIDFEATURE_PAT:
1206 {
1207 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1208 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1209 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1210 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1211 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1212 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1213 break;
1214 }
1215
1216 default:
1217 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1218 break;
1219 }
1220 pVM->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1221}
1222
1223/**
1224 * Queries a CPUID feature bit.
1225 *
1226 * @returns boolean for feature presence
1227 * @param pVM The VM Handle.
1228 * @param enmFeature The feature to query.
1229 */
1230CPUMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1231{
1232 switch (enmFeature)
1233 {
1234 case CPUMCPUIDFEATURE_PAE:
1235 {
1236 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1237 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1238 break;
1239 }
1240
1241 default:
1242 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1243 break;
1244 }
1245 return false;
1246}
1247
1248/**
1249 * Clears a CPUID feature bit.
1250 *
1251 * @param pVM The VM Handle.
1252 * @param enmFeature The feature to clear.
1253 */
1254CPUMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1255{
1256 switch (enmFeature)
1257 {
1258 /*
1259 * Set the APIC bit in both feature masks.
1260 */
1261 case CPUMCPUIDFEATURE_APIC:
1262 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1263 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1264 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1265 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1266 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1267 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1268 break;
1269
1270 case CPUMCPUIDFEATURE_PAE:
1271 {
1272 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1273 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1274 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1275 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1276 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1277 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1278 break;
1279 }
1280
1281 case CPUMCPUIDFEATURE_PAT:
1282 {
1283 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1284 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1285 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1286 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1287 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1288 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1289 break;
1290 }
1291
1292 default:
1293 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1294 break;
1295 }
1296 pVM->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1297}
1298
1299/**
1300 * Gets the CPU vendor
1301 *
1302 * @returns CPU vendor
1303 * @param pVM The VM handle.
1304 */
1305CPUMDECL(CPUMCPUVENDOR) CPUMGetCPUVendor(PVM pVM)
1306{
1307 return pVM->cpum.s.enmCPUVendor;
1308}
1309
1310
1311CPUMDECL(int) CPUMSetGuestDR0(PVM pVM, uint64_t uDr0)
1312{
1313 pVM->cpum.s.Guest.dr0 = uDr0;
1314 return CPUMRecalcHyperDRx(pVM);
1315}
1316
1317CPUMDECL(int) CPUMSetGuestDR1(PVM pVM, uint64_t uDr1)
1318{
1319 pVM->cpum.s.Guest.dr1 = uDr1;
1320 return CPUMRecalcHyperDRx(pVM);
1321}
1322
1323CPUMDECL(int) CPUMSetGuestDR2(PVM pVM, uint64_t uDr2)
1324{
1325 pVM->cpum.s.Guest.dr2 = uDr2;
1326 return CPUMRecalcHyperDRx(pVM);
1327}
1328
1329CPUMDECL(int) CPUMSetGuestDR3(PVM pVM, uint64_t uDr3)
1330{
1331 pVM->cpum.s.Guest.dr3 = uDr3;
1332 return CPUMRecalcHyperDRx(pVM);
1333}
1334
1335CPUMDECL(int) CPUMSetGuestDR6(PVM pVM, uint64_t uDr6)
1336{
1337 pVM->cpum.s.Guest.dr6 = uDr6;
1338 return CPUMRecalcHyperDRx(pVM);
1339}
1340
1341CPUMDECL(int) CPUMSetGuestDR7(PVM pVM, uint64_t uDr7)
1342{
1343 pVM->cpum.s.Guest.dr7 = uDr7;
1344 return CPUMRecalcHyperDRx(pVM);
1345}
1346
1347/** @todo drx should be an array */
1348CPUMDECL(int) CPUMSetGuestDRx(PVM pVM, uint32_t iReg, uint64_t Value)
1349{
1350 switch (iReg)
1351 {
1352 case USE_REG_DR0:
1353 pVM->cpum.s.Guest.dr0 = Value;
1354 break;
1355 case USE_REG_DR1:
1356 pVM->cpum.s.Guest.dr1 = Value;
1357 break;
1358 case USE_REG_DR2:
1359 pVM->cpum.s.Guest.dr2 = Value;
1360 break;
1361 case USE_REG_DR3:
1362 pVM->cpum.s.Guest.dr3 = Value;
1363 break;
1364 case USE_REG_DR4:
1365 case USE_REG_DR6:
1366 pVM->cpum.s.Guest.dr6 = Value;
1367 break;
1368 case USE_REG_DR5:
1369 case USE_REG_DR7:
1370 pVM->cpum.s.Guest.dr7 = Value;
1371 break;
1372
1373 default:
1374 return VERR_INVALID_PARAMETER;
1375 }
1376 return CPUMRecalcHyperDRx(pVM);
1377}
1378
1379
1380/**
1381 * Recalculates the hypvervisor DRx register values based on
1382 * current guest registers and DBGF breakpoints.
1383 *
1384 * This is called whenever a guest DRx register is modified and when DBGF
1385 * sets a hardware breakpoint. In guest context this function will reload
1386 * any (hyper) DRx registers which comes out with a different value.
1387 *
1388 * @returns VINF_SUCCESS.
1389 * @param pVM The VM handle.
1390 */
1391CPUMDECL(int) CPUMRecalcHyperDRx(PVM pVM)
1392{
1393 /*
1394 * Compare the DR7s first.
1395 *
1396 * We only care about the enabled flags. The GE and LE flags are always
1397 * set and we don't care if the guest doesn't set them. GD is virtualized
1398 * when we dispatch #DB, we never enable it.
1399 */
1400 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1401#ifdef CPUM_VIRTUALIZE_DRX
1402 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVM);
1403#else
1404 const RTGCUINTREG uGstDr7 = 0;
1405#endif
1406 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1407 {
1408 /*
1409 * Ok, something is enabled. Recalc each of the breakpoints.
1410 * Straight forward code, not optimized/minimized in any way.
1411 */
1412 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1413
1414 /* bp 0 */
1415 RTGCUINTREG uNewDr0;
1416 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1417 {
1418 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1419 uNewDr0 = DBGFBpGetDR0(pVM);
1420 }
1421 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1422 {
1423 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1424 uNewDr0 = CPUMGetGuestDR0(pVM);
1425 }
1426 else
1427 uNewDr0 = pVM->cpum.s.Hyper.dr0;
1428
1429 /* bp 1 */
1430 RTGCUINTREG uNewDr1;
1431 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1432 {
1433 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1434 uNewDr1 = DBGFBpGetDR1(pVM);
1435 }
1436 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1437 {
1438 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1439 uNewDr1 = CPUMGetGuestDR1(pVM);
1440 }
1441 else
1442 uNewDr1 = pVM->cpum.s.Hyper.dr1;
1443
1444 /* bp 2 */
1445 RTGCUINTREG uNewDr2;
1446 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1447 {
1448 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1449 uNewDr2 = DBGFBpGetDR2(pVM);
1450 }
1451 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1452 {
1453 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1454 uNewDr2 = CPUMGetGuestDR2(pVM);
1455 }
1456 else
1457 uNewDr2 = pVM->cpum.s.Hyper.dr2;
1458
1459 /* bp 3 */
1460 RTGCUINTREG uNewDr3;
1461 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1462 {
1463 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1464 uNewDr3 = DBGFBpGetDR3(pVM);
1465 }
1466 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1467 {
1468 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1469 uNewDr3 = CPUMGetGuestDR3(pVM);
1470 }
1471 else
1472 uNewDr3 = pVM->cpum.s.Hyper.dr3;
1473
1474 /*
1475 * Apply the updates.
1476 */
1477#ifdef IN_GC
1478 if (!(pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1479 {
1480 /** @todo save host DBx registers. */
1481 }
1482#endif
1483 pVM->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1484 if (uNewDr3 != pVM->cpum.s.Hyper.dr3)
1485 CPUMSetHyperDR3(pVM, uNewDr3);
1486 if (uNewDr2 != pVM->cpum.s.Hyper.dr2)
1487 CPUMSetHyperDR2(pVM, uNewDr2);
1488 if (uNewDr1 != pVM->cpum.s.Hyper.dr1)
1489 CPUMSetHyperDR1(pVM, uNewDr1);
1490 if (uNewDr0 != pVM->cpum.s.Hyper.dr0)
1491 CPUMSetHyperDR0(pVM, uNewDr0);
1492 if (uNewDr7 != pVM->cpum.s.Hyper.dr7)
1493 CPUMSetHyperDR7(pVM, uNewDr7);
1494 }
1495 else
1496 {
1497#ifdef IN_GC
1498 if (pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1499 {
1500 /** @todo restore host DBx registers. */
1501 }
1502#endif
1503 pVM->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1504 }
1505 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1506 pVM->cpum.s.fUseFlags, pVM->cpum.s.Hyper.dr0, pVM->cpum.s.Hyper.dr1,
1507 pVM->cpum.s.Hyper.dr2, pVM->cpum.s.Hyper.dr3, pVM->cpum.s.Hyper.dr6,
1508 pVM->cpum.s.Hyper.dr7));
1509
1510 return VINF_SUCCESS;
1511}
1512
1513#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1514
1515/**
1516 * Transforms the guest CPU state to raw-ring mode.
1517 *
1518 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1519 *
1520 * @returns VBox status. (recompiler failure)
1521 * @param pVM VM handle.
1522 * @param pCtxCore The context core (for trap usage).
1523 * @see @ref pg_raw
1524 */
1525CPUMDECL(int) CPUMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
1526{
1527 Assert(!pVM->cpum.s.fRawEntered);
1528 if (!pCtxCore)
1529 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Guest);
1530
1531 /*
1532 * Are we in Ring-0?
1533 */
1534 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1535 && !pCtxCore->eflags.Bits.u1VM)
1536 {
1537 /*
1538 * Enter execution mode.
1539 */
1540 PATMRawEnter(pVM, pCtxCore);
1541
1542 /*
1543 * Set CPL to Ring-1.
1544 */
1545 pCtxCore->ss |= 1;
1546 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1547 pCtxCore->cs |= 1;
1548 }
1549 else
1550 {
1551 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1552 ("ring-1 code not supported\n"));
1553 /*
1554 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1555 */
1556 PATMRawEnter(pVM, pCtxCore);
1557 }
1558
1559 /*
1560 * Assert sanity.
1561 */
1562 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1563 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1564 || pCtxCore->eflags.Bits.u1VM,
1565 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1566 Assert((pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1567 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1568
1569 pVM->cpum.s.fRawEntered = true;
1570 return VINF_SUCCESS;
1571}
1572
1573
1574/**
1575 * Transforms the guest CPU state from raw-ring mode to correct values.
1576 *
1577 * This function will change any selector registers with DPL=1 to DPL=0.
1578 *
1579 * @returns Adjusted rc.
1580 * @param pVM VM handle.
1581 * @param rc Raw mode return code
1582 * @param pCtxCore The context core (for trap usage).
1583 * @see @ref pg_raw
1584 */
1585CPUMDECL(int) CPUMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rc)
1586{
1587 /*
1588 * Don't leave if we've already left (in GC).
1589 */
1590 Assert(pVM->cpum.s.fRawEntered);
1591 if (!pVM->cpum.s.fRawEntered)
1592 return rc;
1593 pVM->cpum.s.fRawEntered = false;
1594
1595 PCPUMCTX pCtx = &pVM->cpum.s.Guest;
1596 if (!pCtxCore)
1597 pCtxCore = CPUMCTX2CORE(pCtx);
1598 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1599 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1600 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1601
1602 /*
1603 * Are we executing in raw ring-1?
1604 */
1605 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1606 && !pCtxCore->eflags.Bits.u1VM)
1607 {
1608 /*
1609 * Leave execution mode.
1610 */
1611 PATMRawLeave(pVM, pCtxCore, rc);
1612 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1613 /** @todo See what happens if we remove this. */
1614 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1615 pCtxCore->ds &= ~X86_SEL_RPL;
1616 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1617 pCtxCore->es &= ~X86_SEL_RPL;
1618 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1619 pCtxCore->fs &= ~X86_SEL_RPL;
1620 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1621 pCtxCore->gs &= ~X86_SEL_RPL;
1622
1623 /*
1624 * Ring-1 selector => Ring-0.
1625 */
1626 pCtxCore->ss &= ~X86_SEL_RPL;
1627 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1628 pCtxCore->cs &= ~X86_SEL_RPL;
1629 }
1630 else
1631 {
1632 /*
1633 * PATM is taking care of the IOPL and IF flags for us.
1634 */
1635 PATMRawLeave(pVM, pCtxCore, rc);
1636 if (!pCtxCore->eflags.Bits.u1VM)
1637 {
1638 /** @todo See what happens if we remove this. */
1639 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1640 pCtxCore->ds &= ~X86_SEL_RPL;
1641 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1642 pCtxCore->es &= ~X86_SEL_RPL;
1643 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1644 pCtxCore->fs &= ~X86_SEL_RPL;
1645 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1646 pCtxCore->gs &= ~X86_SEL_RPL;
1647 }
1648 }
1649
1650 return rc;
1651}
1652
1653/**
1654 * Updates the EFLAGS while we're in raw-mode.
1655 *
1656 * @param pVM The VM handle.
1657 * @param pCtxCore The context core.
1658 * @param eflags The new EFLAGS value.
1659 */
1660CPUMDECL(void) CPUMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1661{
1662 if (!pVM->cpum.s.fRawEntered)
1663 {
1664 pCtxCore->eflags.u32 = eflags;
1665 return;
1666 }
1667 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1668}
1669
1670#endif /* !IN_RING0 */
1671
1672/**
1673 * Gets the EFLAGS while we're in raw-mode.
1674 *
1675 * @returns The eflags.
1676 * @param pVM The VM handle.
1677 * @param pCtxCore The context core.
1678 */
1679CPUMDECL(uint32_t) CPUMRawGetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore)
1680{
1681#ifdef IN_RING0
1682 return pCtxCore->eflags.u32;
1683#else
1684 if (!pVM->cpum.s.fRawEntered)
1685 return pCtxCore->eflags.u32;
1686 return PATMRawGetEFlags(pVM, pCtxCore);
1687#endif
1688}
1689
1690
1691
1692
1693/**
1694 * Gets and resets the changed flags (CPUM_CHANGED_*).
1695 * Only REM should call this function.
1696 *
1697 * @returns The changed flags.
1698 * @param pVM The VM handle.
1699 */
1700CPUMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVM pVM)
1701{
1702 unsigned fFlags = pVM->cpum.s.fChanged;
1703 pVM->cpum.s.fChanged = 0;
1704 /** @todo change the switcher to use the fChanged flags. */
1705 if (pVM->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
1706 {
1707 fFlags |= CPUM_CHANGED_FPU_REM;
1708 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
1709 }
1710 return fFlags;
1711}
1712
1713/**
1714 * Sets the specified changed flags (CPUM_CHANGED_*).
1715 *
1716 * @param pVM The VM handle.
1717 */
1718CPUMDECL(void) CPUMSetChangedFlags(PVM pVM, uint32_t fChangedFlags)
1719{
1720 pVM->cpum.s.fChanged |= fChangedFlags;
1721}
1722
1723/**
1724 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
1725 * @returns true if supported.
1726 * @returns false if not supported.
1727 * @param pVM The VM handle.
1728 */
1729CPUMDECL(bool) CPUMSupportsFXSR(PVM pVM)
1730{
1731 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
1732}
1733
1734
1735/**
1736 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1737 * @returns true if used.
1738 * @returns false if not used.
1739 * @param pVM The VM handle.
1740 */
1741CPUMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1742{
1743 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSENTER) != 0;
1744}
1745
1746
1747/**
1748 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1749 * @returns true if used.
1750 * @returns false if not used.
1751 * @param pVM The VM handle.
1752 */
1753CPUMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1754{
1755 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSCALL) != 0;
1756}
1757
1758
1759#ifndef IN_RING3
1760/**
1761 * Lazily sync in the FPU/XMM state
1762 *
1763 * @returns VBox status code.
1764 * @param pVM VM handle.
1765 */
1766CPUMDECL(int) CPUMHandleLazyFPU(PVM pVM)
1767{
1768 return CPUMHandleLazyFPUAsm(&pVM->cpum.s);
1769}
1770
1771
1772/**
1773 * Restore host FPU/XMM state
1774 *
1775 * @returns VBox status code.
1776 * @param pVM VM handle.
1777 */
1778CPUMDECL(int) CPUMRestoreHostFPUState(PVM pVM)
1779{
1780 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
1781 return CPUMRestoreHostFPUStateAsm(&pVM->cpum.s);
1782}
1783#endif /* !IN_RING3 */
1784
1785
1786/**
1787 * Checks if we activated the FPU/XMM state of the guest OS
1788 * @returns true if we did.
1789 * @returns false if not.
1790 * @param pVM The VM handle.
1791 */
1792CPUMDECL(bool) CPUMIsGuestFPUStateActive(PVM pVM)
1793{
1794 return (pVM->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
1795}
1796
1797
1798/**
1799 * Deactivate the FPU/XMM state of the guest OS
1800 * @param pVM The VM handle.
1801 */
1802CPUMDECL(void) CPUMDeactivateGuestFPUState(PVM pVM)
1803{
1804 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
1805}
1806
1807
1808/**
1809 * Checks if the hidden selector registers are valid
1810 * @returns true if they are.
1811 * @returns false if not.
1812 * @param pVM The VM handle.
1813 */
1814CPUMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
1815{
1816 return !!pVM->cpum.s.fValidHiddenSelRegs; /** @todo change fValidHiddenSelRegs to bool! */
1817}
1818
1819
1820/**
1821 * Checks if the hidden selector registers are valid
1822 * @param pVM The VM handle.
1823 * @param fValid Valid or not
1824 */
1825CPUMDECL(void) CPUMSetHiddenSelRegsValid(PVM pVM, bool fValid)
1826{
1827 pVM->cpum.s.fValidHiddenSelRegs = fValid;
1828}
1829
1830
1831/**
1832 * Get the current privilege level of the guest.
1833 *
1834 * @returns cpl
1835 * @param pVM VM Handle.
1836 * @param pRegFrame Trap register frame.
1837 */
1838CPUMDECL(uint32_t) CPUMGetGuestCPL(PVM pVM, PCPUMCTXCORE pCtxCore)
1839{
1840 uint32_t cpl;
1841
1842 if (CPUMAreHiddenSelRegsValid(pVM))
1843 {
1844 /*
1845 * The hidden CS.DPL register is always equal to the CPL, it is
1846 * not affected by loading a conforming coding segment.
1847 *
1848 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look at SS. (ACP2 regression during install after a far call to ring 2)
1849 */
1850 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
1851 }
1852 else if (RT_LIKELY(pVM->cpum.s.Guest.cr0 & X86_CR0_PE))
1853 {
1854 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
1855 {
1856 /*
1857 * The SS RPL is always equal to the CPL, while the CS RPL
1858 * isn't necessarily equal if the segment is conforming.
1859 * See section 4.11.1 in the AMD manual.
1860 */
1861 cpl = (pCtxCore->ss & X86_SEL_RPL);
1862#ifndef IN_RING0
1863 if (cpl == 1)
1864 cpl = 0;
1865#endif
1866 }
1867 else
1868 cpl = 3;
1869 }
1870 else
1871 cpl = 0; /* real mode; cpl is zero */
1872
1873 return cpl;
1874}
1875
1876
1877/**
1878 * Gets the current guest CPU mode.
1879 *
1880 * If paging mode is what you need, check out PGMGetGuestMode().
1881 *
1882 * @returns The CPU mode.
1883 * @param pVM The VM handle.
1884 */
1885CPUMDECL(CPUMMODE) CPUMGetGuestMode(PVM pVM)
1886{
1887 CPUMMODE enmMode;
1888 if (!(pVM->cpum.s.Guest.cr0 & X86_CR0_PE))
1889 enmMode = CPUMMODE_REAL;
1890 else
1891 if (!(pVM->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1892 enmMode = CPUMMODE_PROTECTED;
1893 else
1894 enmMode = CPUMMODE_LONG;
1895
1896 return enmMode;
1897}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette