VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 23794

Last change on this file since 23794 was 23794, checked in by vboxsync, 15 years ago

More synthetic cpu work

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 53.9 KB
Line 
1/* $Id: CPUMAllRegs.cpp 23794 2009-10-15 11:50:03Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <VBox/hwaccm.h>
37#include <VBox/tm.h>
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#ifdef IN_RING3
41#include <iprt/thread.h>
42#endif
43
44/** Disable stack frame pointer generation here. */
45#if defined(_MSC_VER) && !defined(DEBUG)
46# pragma optimize("y", off)
47#endif
48
49
50/**
51 * Sets or resets an alternative hypervisor context core.
52 *
53 * This is called when we get a hypervisor trap set switch the context
54 * core with the trap frame on the stack. It is called again to reset
55 * back to the default context core when resuming hypervisor execution.
56 *
57 * @param pVCpu The VMCPU handle.
58 * @param pCtxCore Pointer to the alternative context core or NULL
59 * to go back to the default context core.
60 */
61VMMDECL(void) CPUMHyperSetCtxCore(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
62{
63 PVM pVM = pVCpu->CTX_SUFF(pVM);
64
65 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVCpu->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
66 if (!pCtxCore)
67 {
68 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
69 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
70 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
71 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
72 }
73 else
74 {
75 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
76 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
77 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
78 }
79}
80
81
82/**
83 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
84 * This is only for reading in order to save a few calls.
85 *
86 * @param pVM Handle to the virtual machine.
87 */
88VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
89{
90 return pVCpu->cpum.s.CTX_SUFF(pHyperCore);
91}
92
93
94/**
95 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
96 *
97 * @returns VBox status code.
98 * @param pVM Handle to the virtual machine.
99 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
100 *
101 * @deprecated This will *not* (and has never) given the right picture of the
102 * hypervisor register state. With CPUMHyperSetCtxCore() this is
103 * getting much worse. So, use the individual functions for getting
104 * and esp. setting the hypervisor registers.
105 */
106VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx)
107{
108 *ppCtx = &pVCpu->cpum.s.Hyper;
109 return VINF_SUCCESS;
110}
111
112
113VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
114{
115 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
116 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
117 pVCpu->cpum.s.Hyper.gdtrPadding = 0;
118}
119
120
121VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
122{
123 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
124 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
125 pVCpu->cpum.s.Hyper.idtrPadding = 0;
126}
127
128
129VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
130{
131 pVCpu->cpum.s.Hyper.cr3 = cr3;
132
133#ifdef IN_RC
134 /* Update the current CR3. */
135 ASMSetCR3(cr3);
136#endif
137}
138
139VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
140{
141 return pVCpu->cpum.s.Hyper.cr3;
142}
143
144
145VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
146{
147 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
148}
149
150
151VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
152{
153 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
154}
155
156
157VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
158{
159 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
160}
161
162
163VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
164{
165 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
166}
167
168
169VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
170{
171 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
172}
173
174
175VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
176{
177 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
178}
179
180
181VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
182{
183 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
184}
185
186
187VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
188{
189 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
190 return VINF_SUCCESS;
191}
192
193
194VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
195{
196 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
197}
198
199
200VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
201{
202 pVCpu->cpum.s.Hyper.tr = SelTR;
203}
204
205
206VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
207{
208 pVCpu->cpum.s.Hyper.ldtr = SelLDTR;
209}
210
211
212VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
213{
214 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
215 /** @todo in GC we must load it! */
216}
217
218
219VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
220{
221 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
222 /** @todo in GC we must load it! */
223}
224
225
226VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
227{
228 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
229 /** @todo in GC we must load it! */
230}
231
232
233VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
234{
235 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
236 /** @todo in GC we must load it! */
237}
238
239
240VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
241{
242 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
243 /** @todo in GC we must load it! */
244}
245
246
247VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
248{
249 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
250 /** @todo in GC we must load it! */
251}
252
253
254VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
255{
256 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs;
257}
258
259
260VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
261{
262 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds;
263}
264
265
266VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
267{
268 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es;
269}
270
271
272VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
273{
274 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs;
275}
276
277
278VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
279{
280 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs;
281}
282
283
284VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
285{
286 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss;
287}
288
289
290VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
291{
292 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eax;
293}
294
295
296VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
297{
298 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebx;
299}
300
301
302VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
303{
304 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ecx;
305}
306
307
308VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
309{
310 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edx;
311}
312
313
314VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
315{
316 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esi;
317}
318
319
320VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
321{
322 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edi;
323}
324
325
326VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
327{
328 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebp;
329}
330
331
332VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
333{
334 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp;
335}
336
337
338VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
339{
340 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
341}
342
343
344VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
345{
346 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip;
347}
348
349
350VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
351{
352 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->rip;
353}
354
355
356VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
357{
358 if (pcbLimit)
359 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
360 return pVCpu->cpum.s.Hyper.idtr.pIdt;
361}
362
363
364VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
365{
366 if (pcbLimit)
367 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
368 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
369}
370
371
372VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
373{
374 return pVCpu->cpum.s.Hyper.ldtr;
375}
376
377
378VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
379{
380 return pVCpu->cpum.s.Hyper.dr[0];
381}
382
383
384VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
385{
386 return pVCpu->cpum.s.Hyper.dr[1];
387}
388
389
390VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
391{
392 return pVCpu->cpum.s.Hyper.dr[2];
393}
394
395
396VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
397{
398 return pVCpu->cpum.s.Hyper.dr[3];
399}
400
401
402VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
403{
404 return pVCpu->cpum.s.Hyper.dr[6];
405}
406
407
408VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
409{
410 return pVCpu->cpum.s.Hyper.dr[7];
411}
412
413
414/**
415 * Gets the pointer to the internal CPUMCTXCORE structure.
416 * This is only for reading in order to save a few calls.
417 *
418 * @param pVCpu Handle to the virtual cpu.
419 */
420VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
421{
422 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
423}
424
425
426/**
427 * Sets the guest context core registers.
428 *
429 * @param pVCpu Handle to the virtual cpu.
430 * @param pCtxCore The new context core values.
431 */
432VMMDECL(void) CPUMSetGuestCtxCore(PVMCPU pVCpu, PCCPUMCTXCORE pCtxCore)
433{
434 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
435
436 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
437 *pCtxCoreDst = *pCtxCore;
438
439 /* Mask away invalid parts of the cpu context. */
440 if (!CPUMIsGuestInLongMode(pVCpu))
441 {
442 uint64_t u64Mask = UINT64_C(0xffffffff);
443
444 pCtxCoreDst->rip &= u64Mask;
445 pCtxCoreDst->rax &= u64Mask;
446 pCtxCoreDst->rbx &= u64Mask;
447 pCtxCoreDst->rcx &= u64Mask;
448 pCtxCoreDst->rdx &= u64Mask;
449 pCtxCoreDst->rsi &= u64Mask;
450 pCtxCoreDst->rdi &= u64Mask;
451 pCtxCoreDst->rbp &= u64Mask;
452 pCtxCoreDst->rsp &= u64Mask;
453 pCtxCoreDst->rflags.u &= u64Mask;
454
455 pCtxCoreDst->r8 = 0;
456 pCtxCoreDst->r9 = 0;
457 pCtxCoreDst->r10 = 0;
458 pCtxCoreDst->r11 = 0;
459 pCtxCoreDst->r12 = 0;
460 pCtxCoreDst->r13 = 0;
461 pCtxCoreDst->r14 = 0;
462 pCtxCoreDst->r15 = 0;
463 }
464}
465
466
467/**
468 * Queries the pointer to the internal CPUMCTX structure
469 *
470 * @returns The CPUMCTX pointer.
471 * @param pVCpu Handle to the virtual cpu.
472 */
473VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
474{
475 return &pVCpu->cpum.s.Guest;
476}
477
478VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
479{
480 pVCpu->cpum.s.Guest.gdtr.cbGdt = limit;
481 pVCpu->cpum.s.Guest.gdtr.pGdt = addr;
482 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
483 return VINF_SUCCESS;
484}
485
486VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
487{
488 pVCpu->cpum.s.Guest.idtr.cbIdt = limit;
489 pVCpu->cpum.s.Guest.idtr.pIdt = addr;
490 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
491 return VINF_SUCCESS;
492}
493
494VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
495{
496 AssertMsgFailed(("Need to load the hidden bits too!\n"));
497
498 pVCpu->cpum.s.Guest.tr = tr;
499 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
500 return VINF_SUCCESS;
501}
502
503VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
504{
505 pVCpu->cpum.s.Guest.ldtr = ldtr;
506 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
507 return VINF_SUCCESS;
508}
509
510
511/**
512 * Set the guest CR0.
513 *
514 * When called in GC, the hyper CR0 may be updated if that is
515 * required. The caller only has to take special action if AM,
516 * WP, PG or PE changes.
517 *
518 * @returns VINF_SUCCESS (consider it void).
519 * @param pVCpu Handle to the virtual cpu.
520 * @param cr0 The new CR0 value.
521 */
522VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
523{
524#ifdef IN_RC
525 /*
526 * Check if we need to change hypervisor CR0 because
527 * of math stuff.
528 */
529 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
530 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
531 {
532 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
533 {
534 /*
535 * We haven't saved the host FPU state yet, so TS and MT are both set
536 * and EM should be reflecting the guest EM (it always does this).
537 */
538 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
539 {
540 uint32_t HyperCR0 = ASMGetCR0();
541 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
542 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
543 HyperCR0 &= ~X86_CR0_EM;
544 HyperCR0 |= cr0 & X86_CR0_EM;
545 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
546 ASMSetCR0(HyperCR0);
547 }
548# ifdef VBOX_STRICT
549 else
550 {
551 uint32_t HyperCR0 = ASMGetCR0();
552 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
553 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
554 }
555# endif
556 }
557 else
558 {
559 /*
560 * Already saved the state, so we're just mirroring
561 * the guest flags.
562 */
563 uint32_t HyperCR0 = ASMGetCR0();
564 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
565 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
566 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
567 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
568 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
569 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
570 ASMSetCR0(HyperCR0);
571 }
572 }
573#endif /* IN_RC */
574
575 /*
576 * Check for changes causing TLB flushes (for REM).
577 * The caller is responsible for calling PGM when appropriate.
578 */
579 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
580 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
581 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
582 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
583
584 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
585 return VINF_SUCCESS;
586}
587
588
589VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
590{
591 pVCpu->cpum.s.Guest.cr2 = cr2;
592 return VINF_SUCCESS;
593}
594
595
596VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
597{
598 pVCpu->cpum.s.Guest.cr3 = cr3;
599 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
600 return VINF_SUCCESS;
601}
602
603
604VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
605{
606 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
607 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
608 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
609 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
610 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
611 cr4 &= ~X86_CR4_OSFSXR;
612 pVCpu->cpum.s.Guest.cr4 = cr4;
613 return VINF_SUCCESS;
614}
615
616
617VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
618{
619 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
620 return VINF_SUCCESS;
621}
622
623
624VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
625{
626 pVCpu->cpum.s.Guest.eip = eip;
627 return VINF_SUCCESS;
628}
629
630
631VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
632{
633 pVCpu->cpum.s.Guest.eax = eax;
634 return VINF_SUCCESS;
635}
636
637
638VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
639{
640 pVCpu->cpum.s.Guest.ebx = ebx;
641 return VINF_SUCCESS;
642}
643
644
645VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
646{
647 pVCpu->cpum.s.Guest.ecx = ecx;
648 return VINF_SUCCESS;
649}
650
651
652VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
653{
654 pVCpu->cpum.s.Guest.edx = edx;
655 return VINF_SUCCESS;
656}
657
658
659VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
660{
661 pVCpu->cpum.s.Guest.esp = esp;
662 return VINF_SUCCESS;
663}
664
665
666VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
667{
668 pVCpu->cpum.s.Guest.ebp = ebp;
669 return VINF_SUCCESS;
670}
671
672
673VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
674{
675 pVCpu->cpum.s.Guest.esi = esi;
676 return VINF_SUCCESS;
677}
678
679
680VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
681{
682 pVCpu->cpum.s.Guest.edi = edi;
683 return VINF_SUCCESS;
684}
685
686
687VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
688{
689 pVCpu->cpum.s.Guest.ss = ss;
690 return VINF_SUCCESS;
691}
692
693
694VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
695{
696 pVCpu->cpum.s.Guest.cs = cs;
697 return VINF_SUCCESS;
698}
699
700
701VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
702{
703 pVCpu->cpum.s.Guest.ds = ds;
704 return VINF_SUCCESS;
705}
706
707
708VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
709{
710 pVCpu->cpum.s.Guest.es = es;
711 return VINF_SUCCESS;
712}
713
714
715VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
716{
717 pVCpu->cpum.s.Guest.fs = fs;
718 return VINF_SUCCESS;
719}
720
721
722VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
723{
724 pVCpu->cpum.s.Guest.gs = gs;
725 return VINF_SUCCESS;
726}
727
728
729VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
730{
731 pVCpu->cpum.s.Guest.msrEFER = val;
732}
733
734
735VMMDECL(uint64_t) CPUMGetGuestMsr(PVMCPU pVCpu, unsigned idMsr)
736{
737 uint64_t u64 = 0;
738
739 switch (idMsr)
740 {
741 case MSR_IA32_TSC:
742 u64 = TMCpuTickGet(pVCpu);
743 break;
744
745 case MSR_IA32_CR_PAT:
746 u64 = pVCpu->cpum.s.Guest.msrPAT;
747 break;
748
749 case MSR_IA32_SYSENTER_CS:
750 u64 = pVCpu->cpum.s.Guest.SysEnter.cs;
751 break;
752
753 case MSR_IA32_SYSENTER_EIP:
754 u64 = pVCpu->cpum.s.Guest.SysEnter.eip;
755 break;
756
757 case MSR_IA32_SYSENTER_ESP:
758 u64 = pVCpu->cpum.s.Guest.SysEnter.esp;
759 break;
760
761 case MSR_K6_EFER:
762 u64 = pVCpu->cpum.s.Guest.msrEFER;
763 break;
764
765 case MSR_K8_SF_MASK:
766 u64 = pVCpu->cpum.s.Guest.msrSFMASK;
767 break;
768
769 case MSR_K6_STAR:
770 u64 = pVCpu->cpum.s.Guest.msrSTAR;
771 break;
772
773 case MSR_K8_LSTAR:
774 u64 = pVCpu->cpum.s.Guest.msrLSTAR;
775 break;
776
777 case MSR_K8_CSTAR:
778 u64 = pVCpu->cpum.s.Guest.msrCSTAR;
779 break;
780
781 case MSR_K8_KERNEL_GS_BASE:
782 u64 = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
783 break;
784
785 case MSR_K8_TSC_AUX:
786 u64 = pVCpu->cpum.s.GuestMsr.msr.tscAux;
787 break;
788
789 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
790 default:
791 AssertFailed();
792 break;
793 }
794 return u64;
795}
796
797VMMDECL(void) CPUMSetGuestMsr(PVMCPU pVCpu, unsigned idMsr, uint64_t valMsr)
798{
799 /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
800 switch (idMsr)
801 {
802 case MSR_K8_TSC_AUX:
803 pVCpu->cpum.s.GuestMsr.msr.tscAux = valMsr;
804 break;
805
806 default:
807 AssertFailed();
808 break;
809 }
810}
811
812VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
813{
814 if (pcbLimit)
815 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
816 return pVCpu->cpum.s.Guest.idtr.pIdt;
817}
818
819
820VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
821{
822 if (pHidden)
823 *pHidden = pVCpu->cpum.s.Guest.trHid;
824 return pVCpu->cpum.s.Guest.tr;
825}
826
827
828VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
829{
830 return pVCpu->cpum.s.Guest.cs;
831}
832
833
834VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
835{
836 return pVCpu->cpum.s.Guest.ds;
837}
838
839
840VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
841{
842 return pVCpu->cpum.s.Guest.es;
843}
844
845
846VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
847{
848 return pVCpu->cpum.s.Guest.fs;
849}
850
851
852VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
853{
854 return pVCpu->cpum.s.Guest.gs;
855}
856
857
858VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
859{
860 return pVCpu->cpum.s.Guest.ss;
861}
862
863
864VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
865{
866 return pVCpu->cpum.s.Guest.ldtr;
867}
868
869
870VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
871{
872 return pVCpu->cpum.s.Guest.cr0;
873}
874
875
876VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
877{
878 return pVCpu->cpum.s.Guest.cr2;
879}
880
881
882VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
883{
884 return pVCpu->cpum.s.Guest.cr3;
885}
886
887
888VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
889{
890 return pVCpu->cpum.s.Guest.cr4;
891}
892
893
894VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
895{
896 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
897}
898
899
900VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
901{
902 return pVCpu->cpum.s.Guest.eip;
903}
904
905
906VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
907{
908 return pVCpu->cpum.s.Guest.rip;
909}
910
911
912VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
913{
914 return pVCpu->cpum.s.Guest.eax;
915}
916
917
918VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
919{
920 return pVCpu->cpum.s.Guest.ebx;
921}
922
923
924VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
925{
926 return pVCpu->cpum.s.Guest.ecx;
927}
928
929
930VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
931{
932 return pVCpu->cpum.s.Guest.edx;
933}
934
935
936VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
937{
938 return pVCpu->cpum.s.Guest.esi;
939}
940
941
942VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
943{
944 return pVCpu->cpum.s.Guest.edi;
945}
946
947
948VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
949{
950 return pVCpu->cpum.s.Guest.esp;
951}
952
953
954VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
955{
956 return pVCpu->cpum.s.Guest.ebp;
957}
958
959
960VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
961{
962 return pVCpu->cpum.s.Guest.eflags.u32;
963}
964
965
966///@todo: crx should be an array
967VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
968{
969 switch (iReg)
970 {
971 case USE_REG_CR0:
972 *pValue = pVCpu->cpum.s.Guest.cr0;
973 break;
974 case USE_REG_CR2:
975 *pValue = pVCpu->cpum.s.Guest.cr2;
976 break;
977 case USE_REG_CR3:
978 *pValue = pVCpu->cpum.s.Guest.cr3;
979 break;
980 case USE_REG_CR4:
981 *pValue = pVCpu->cpum.s.Guest.cr4;
982 break;
983 default:
984 return VERR_INVALID_PARAMETER;
985 }
986 return VINF_SUCCESS;
987}
988
989
990VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
991{
992 return pVCpu->cpum.s.Guest.dr[0];
993}
994
995
996VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
997{
998 return pVCpu->cpum.s.Guest.dr[1];
999}
1000
1001
1002VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1003{
1004 return pVCpu->cpum.s.Guest.dr[2];
1005}
1006
1007
1008VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1009{
1010 return pVCpu->cpum.s.Guest.dr[3];
1011}
1012
1013
1014VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1015{
1016 return pVCpu->cpum.s.Guest.dr[6];
1017}
1018
1019
1020VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1021{
1022 return pVCpu->cpum.s.Guest.dr[7];
1023}
1024
1025
1026VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1027{
1028 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1029 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1030 if (iReg == 4 || iReg == 5)
1031 iReg += 2;
1032 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1033 return VINF_SUCCESS;
1034}
1035
1036
1037VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1038{
1039 return pVCpu->cpum.s.Guest.msrEFER;
1040}
1041
1042
1043/**
1044 * Gets a CpuId leaf.
1045 *
1046 * @param pVCpu The VMCPU handle.
1047 * @param iLeaf The CPUID leaf to get.
1048 * @param pEax Where to store the EAX value.
1049 * @param pEbx Where to store the EBX value.
1050 * @param pEcx Where to store the ECX value.
1051 * @param pEdx Where to store the EDX value.
1052 */
1053VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1054{
1055 PVM pVM = pVCpu->CTX_SUFF(pVM);
1056
1057 PCCPUMCPUID pCpuId;
1058 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1059 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1060 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1061 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1062 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1063 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1064 else
1065 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1066
1067 bool fHasMoreCaches = (*pEcx == 0);
1068
1069 *pEax = pCpuId->eax;
1070 *pEbx = pCpuId->ebx;
1071 *pEcx = pCpuId->ecx;
1072 *pEdx = pCpuId->edx;
1073
1074 if ( iLeaf == 1
1075 && pVM->cCpus > 1)
1076 {
1077 /* Bits 31-24: Initial APIC ID */
1078 Assert(pVCpu->idCpu <= 255);
1079 *pEbx |= (pVCpu->idCpu << 24);
1080 }
1081
1082 if ( iLeaf == 4
1083 && fHasMoreCaches
1084 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1085 {
1086 /* Report unified L0 cache, Linux'es num_cpu_cores() requires
1087 * that to be non-0 to detect core count correctly. */
1088 *pEax |= (1 << 5) | 3;
1089 }
1090
1091 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1092}
1093
1094/**
1095 * Gets a number of standard CPUID leafs.
1096 *
1097 * @returns Number of leafs.
1098 * @param pVM The VM handle.
1099 * @remark Intended for PATM.
1100 */
1101VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1102{
1103 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1104}
1105
1106
1107/**
1108 * Gets a number of extended CPUID leafs.
1109 *
1110 * @returns Number of leafs.
1111 * @param pVM The VM handle.
1112 * @remark Intended for PATM.
1113 */
1114VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1115{
1116 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1117}
1118
1119
1120/**
1121 * Gets a number of centaur CPUID leafs.
1122 *
1123 * @returns Number of leafs.
1124 * @param pVM The VM handle.
1125 * @remark Intended for PATM.
1126 */
1127VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1128{
1129 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1130}
1131
1132
1133/**
1134 * Sets a CPUID feature bit.
1135 *
1136 * @param pVM The VM Handle.
1137 * @param enmFeature The feature to set.
1138 */
1139VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1140{
1141 switch (enmFeature)
1142 {
1143 /*
1144 * Set the APIC bit in both feature masks.
1145 */
1146 case CPUMCPUIDFEATURE_APIC:
1147 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1148 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1149 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1150 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1151 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1152 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1153 break;
1154
1155 /*
1156 * Set the x2APIC bit in the standard feature mask.
1157 */
1158 case CPUMCPUIDFEATURE_X2APIC:
1159 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1160 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1161 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1162 break;
1163
1164 /*
1165 * Set the sysenter/sysexit bit in the standard feature mask.
1166 * Assumes the caller knows what it's doing! (host must support these)
1167 */
1168 case CPUMCPUIDFEATURE_SEP:
1169 {
1170 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1171 {
1172 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1173 return;
1174 }
1175
1176 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1177 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1178 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1179 break;
1180 }
1181
1182 /*
1183 * Set the syscall/sysret bit in the extended feature mask.
1184 * Assumes the caller knows what it's doing! (host must support these)
1185 */
1186 case CPUMCPUIDFEATURE_SYSCALL:
1187 {
1188 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1189 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1190 {
1191#if HC_ARCH_BITS == 32
1192 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1193 * Even when the cpu is capable of doing so in 64 bits mode.
1194 */
1195 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1196 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1197 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1198#endif
1199 {
1200 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1201 return;
1202 }
1203 }
1204 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1205 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1206 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1207 break;
1208 }
1209
1210 /*
1211 * Set the PAE bit in both feature masks.
1212 * Assumes the caller knows what it's doing! (host must support these)
1213 */
1214 case CPUMCPUIDFEATURE_PAE:
1215 {
1216 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1217 {
1218 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1219 return;
1220 }
1221
1222 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1223 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1224 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1225 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1226 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1227 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1228 break;
1229 }
1230
1231 /*
1232 * Set the LONG MODE bit in the extended feature mask.
1233 * Assumes the caller knows what it's doing! (host must support these)
1234 */
1235 case CPUMCPUIDFEATURE_LONG_MODE:
1236 {
1237 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1238 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1239 {
1240 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1241 return;
1242 }
1243
1244 /* Valid for both Intel and AMD. */
1245 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1246 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1247 break;
1248 }
1249
1250 /*
1251 * Set the NXE bit in the extended feature mask.
1252 * Assumes the caller knows what it's doing! (host must support these)
1253 */
1254 case CPUMCPUIDFEATURE_NXE:
1255 {
1256 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1257 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1258 {
1259 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1260 return;
1261 }
1262
1263 /* Valid for both Intel and AMD. */
1264 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1265 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1266 break;
1267 }
1268
1269 case CPUMCPUIDFEATURE_LAHF:
1270 {
1271 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1272 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1273 {
1274 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1275 return;
1276 }
1277
1278 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1279 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1280 break;
1281 }
1282
1283 case CPUMCPUIDFEATURE_PAT:
1284 {
1285 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1286 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1287 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1288 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1289 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1290 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1291 break;
1292 }
1293
1294 case CPUMCPUIDFEATURE_RDTSCP:
1295 {
1296 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1297 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP))
1298 {
1299 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1300 return;
1301 }
1302
1303 /* Valid for AMD only (for now). */
1304 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1305 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1306 break;
1307 }
1308
1309 default:
1310 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1311 break;
1312 }
1313 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1314 {
1315 PVMCPU pVCpu = &pVM->aCpus[i];
1316 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1317 }
1318}
1319
1320
1321/**
1322 * Queries a CPUID feature bit.
1323 *
1324 * @returns boolean for feature presence
1325 * @param pVM The VM Handle.
1326 * @param enmFeature The feature to query.
1327 */
1328VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1329{
1330 switch (enmFeature)
1331 {
1332 case CPUMCPUIDFEATURE_PAE:
1333 {
1334 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1335 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1336 break;
1337 }
1338
1339 case CPUMCPUIDFEATURE_RDTSCP:
1340 {
1341 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1342 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1343 break;
1344 }
1345
1346 case CPUMCPUIDFEATURE_LONG_MODE:
1347 {
1348 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1349 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1350 break;
1351 }
1352
1353 default:
1354 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1355 break;
1356 }
1357 return false;
1358}
1359
1360
1361/**
1362 * Clears a CPUID feature bit.
1363 *
1364 * @param pVM The VM Handle.
1365 * @param enmFeature The feature to clear.
1366 */
1367VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1368{
1369 switch (enmFeature)
1370 {
1371 /*
1372 * Set the APIC bit in both feature masks.
1373 */
1374 case CPUMCPUIDFEATURE_APIC:
1375 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1376 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1377 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1378 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1379 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1380 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1381 break;
1382
1383 /*
1384 * Clear the x2APIC bit in the standard feature mask.
1385 */
1386 case CPUMCPUIDFEATURE_X2APIC:
1387 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1388 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1389 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1390 break;
1391
1392 case CPUMCPUIDFEATURE_PAE:
1393 {
1394 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1395 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1396 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1397 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1398 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1399 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1400 break;
1401 }
1402
1403 case CPUMCPUIDFEATURE_PAT:
1404 {
1405 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1406 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1407 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1408 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1409 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1410 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1411 break;
1412 }
1413
1414 case CPUMCPUIDFEATURE_LONG_MODE:
1415 {
1416 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1417 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1418 break;
1419 }
1420
1421 case CPUMCPUIDFEATURE_LAHF:
1422 {
1423 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1424 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1425 break;
1426 }
1427
1428 default:
1429 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1430 break;
1431 }
1432 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1433 {
1434 PVMCPU pVCpu = &pVM->aCpus[i];
1435 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1436 }
1437}
1438
1439
1440/**
1441 * Gets the host CPU vendor
1442 *
1443 * @returns CPU vendor
1444 * @param pVM The VM handle.
1445 */
1446VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1447{
1448 return pVM->cpum.s.enmHostCpuVendor;
1449}
1450
1451/**
1452 * Gets the CPU vendor
1453 *
1454 * @returns CPU vendor
1455 * @param pVM The VM handle.
1456 */
1457VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1458{
1459 return pVM->cpum.s.enmGuestCpuVendor;
1460}
1461
1462
1463VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1464{
1465 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1466 return CPUMRecalcHyperDRx(pVCpu);
1467}
1468
1469
1470VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1471{
1472 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1473 return CPUMRecalcHyperDRx(pVCpu);
1474}
1475
1476
1477VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1478{
1479 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1480 return CPUMRecalcHyperDRx(pVCpu);
1481}
1482
1483
1484VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1485{
1486 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1487 return CPUMRecalcHyperDRx(pVCpu);
1488}
1489
1490
1491VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1492{
1493 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1494 return CPUMRecalcHyperDRx(pVCpu);
1495}
1496
1497
1498VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1499{
1500 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1501 return CPUMRecalcHyperDRx(pVCpu);
1502}
1503
1504
1505VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1506{
1507 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1508 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1509 if (iReg == 4 || iReg == 5)
1510 iReg += 2;
1511 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1512 return CPUMRecalcHyperDRx(pVCpu);
1513}
1514
1515
1516/**
1517 * Recalculates the hypvervisor DRx register values based on
1518 * current guest registers and DBGF breakpoints.
1519 *
1520 * This is called whenever a guest DRx register is modified and when DBGF
1521 * sets a hardware breakpoint. In guest context this function will reload
1522 * any (hyper) DRx registers which comes out with a different value.
1523 *
1524 * @returns VINF_SUCCESS.
1525 * @param pVCpu The VMCPU handle.
1526 */
1527VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
1528{
1529 PVM pVM = pVCpu->CTX_SUFF(pVM);
1530
1531 /*
1532 * Compare the DR7s first.
1533 *
1534 * We only care about the enabled flags. The GE and LE flags are always
1535 * set and we don't care if the guest doesn't set them. GD is virtualized
1536 * when we dispatch #DB, we never enable it.
1537 */
1538 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1539#ifdef CPUM_VIRTUALIZE_DRX
1540 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1541#else
1542 const RTGCUINTREG uGstDr7 = 0;
1543#endif
1544 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1545 {
1546 /*
1547 * Ok, something is enabled. Recalc each of the breakpoints.
1548 * Straight forward code, not optimized/minimized in any way.
1549 */
1550 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1551
1552 /* bp 0 */
1553 RTGCUINTREG uNewDr0;
1554 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1555 {
1556 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1557 uNewDr0 = DBGFBpGetDR0(pVM);
1558 }
1559 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1560 {
1561 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1562 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1563 }
1564 else
1565 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
1566
1567 /* bp 1 */
1568 RTGCUINTREG uNewDr1;
1569 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1570 {
1571 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1572 uNewDr1 = DBGFBpGetDR1(pVM);
1573 }
1574 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1575 {
1576 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1577 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1578 }
1579 else
1580 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
1581
1582 /* bp 2 */
1583 RTGCUINTREG uNewDr2;
1584 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1585 {
1586 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1587 uNewDr2 = DBGFBpGetDR2(pVM);
1588 }
1589 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1590 {
1591 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1592 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1593 }
1594 else
1595 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
1596
1597 /* bp 3 */
1598 RTGCUINTREG uNewDr3;
1599 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1600 {
1601 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1602 uNewDr3 = DBGFBpGetDR3(pVM);
1603 }
1604 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1605 {
1606 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1607 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1608 }
1609 else
1610 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
1611
1612 /*
1613 * Apply the updates.
1614 */
1615#ifdef IN_RC
1616 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1617 {
1618 /** @todo save host DBx registers. */
1619 }
1620#endif
1621 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1622 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1623 CPUMSetHyperDR3(pVCpu, uNewDr3);
1624 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1625 CPUMSetHyperDR2(pVCpu, uNewDr2);
1626 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1627 CPUMSetHyperDR1(pVCpu, uNewDr1);
1628 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1629 CPUMSetHyperDR0(pVCpu, uNewDr0);
1630 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1631 CPUMSetHyperDR7(pVCpu, uNewDr7);
1632 }
1633 else
1634 {
1635#ifdef IN_RC
1636 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1637 {
1638 /** @todo restore host DBx registers. */
1639 }
1640#endif
1641 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1642 }
1643 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1644 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1645 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1646 pVCpu->cpum.s.Hyper.dr[7]));
1647
1648 return VINF_SUCCESS;
1649}
1650
1651#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1652
1653/**
1654 * Transforms the guest CPU state to raw-ring mode.
1655 *
1656 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1657 *
1658 * @returns VBox status. (recompiler failure)
1659 * @param pVCpu The VMCPU handle.
1660 * @param pCtxCore The context core (for trap usage).
1661 * @see @ref pg_raw
1662 */
1663VMMDECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
1664{
1665 PVM pVM = pVCpu->CTX_SUFF(pVM);
1666
1667 Assert(!pVM->cpum.s.fRawEntered);
1668 if (!pCtxCore)
1669 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
1670
1671 /*
1672 * Are we in Ring-0?
1673 */
1674 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1675 && !pCtxCore->eflags.Bits.u1VM)
1676 {
1677 /*
1678 * Enter execution mode.
1679 */
1680 PATMRawEnter(pVM, pCtxCore);
1681
1682 /*
1683 * Set CPL to Ring-1.
1684 */
1685 pCtxCore->ss |= 1;
1686 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1687 pCtxCore->cs |= 1;
1688 }
1689 else
1690 {
1691 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1692 ("ring-1 code not supported\n"));
1693 /*
1694 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1695 */
1696 PATMRawEnter(pVM, pCtxCore);
1697 }
1698
1699 /*
1700 * Assert sanity.
1701 */
1702 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1703 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1704 || pCtxCore->eflags.Bits.u1VM,
1705 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1706 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1707 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1708
1709 pVM->cpum.s.fRawEntered = true;
1710 return VINF_SUCCESS;
1711}
1712
1713
1714/**
1715 * Transforms the guest CPU state from raw-ring mode to correct values.
1716 *
1717 * This function will change any selector registers with DPL=1 to DPL=0.
1718 *
1719 * @returns Adjusted rc.
1720 * @param pVCpu The VMCPU handle.
1721 * @param rc Raw mode return code
1722 * @param pCtxCore The context core (for trap usage).
1723 * @see @ref pg_raw
1724 */
1725VMMDECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
1726{
1727 PVM pVM = pVCpu->CTX_SUFF(pVM);
1728
1729 /*
1730 * Don't leave if we've already left (in GC).
1731 */
1732 Assert(pVM->cpum.s.fRawEntered);
1733 if (!pVM->cpum.s.fRawEntered)
1734 return rc;
1735 pVM->cpum.s.fRawEntered = false;
1736
1737 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1738 if (!pCtxCore)
1739 pCtxCore = CPUMCTX2CORE(pCtx);
1740 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1741 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1742 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1743
1744 /*
1745 * Are we executing in raw ring-1?
1746 */
1747 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1748 && !pCtxCore->eflags.Bits.u1VM)
1749 {
1750 /*
1751 * Leave execution mode.
1752 */
1753 PATMRawLeave(pVM, pCtxCore, rc);
1754 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1755 /** @todo See what happens if we remove this. */
1756 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1757 pCtxCore->ds &= ~X86_SEL_RPL;
1758 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1759 pCtxCore->es &= ~X86_SEL_RPL;
1760 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1761 pCtxCore->fs &= ~X86_SEL_RPL;
1762 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1763 pCtxCore->gs &= ~X86_SEL_RPL;
1764
1765 /*
1766 * Ring-1 selector => Ring-0.
1767 */
1768 pCtxCore->ss &= ~X86_SEL_RPL;
1769 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1770 pCtxCore->cs &= ~X86_SEL_RPL;
1771 }
1772 else
1773 {
1774 /*
1775 * PATM is taking care of the IOPL and IF flags for us.
1776 */
1777 PATMRawLeave(pVM, pCtxCore, rc);
1778 if (!pCtxCore->eflags.Bits.u1VM)
1779 {
1780 /** @todo See what happens if we remove this. */
1781 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1782 pCtxCore->ds &= ~X86_SEL_RPL;
1783 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1784 pCtxCore->es &= ~X86_SEL_RPL;
1785 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1786 pCtxCore->fs &= ~X86_SEL_RPL;
1787 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1788 pCtxCore->gs &= ~X86_SEL_RPL;
1789 }
1790 }
1791
1792 return rc;
1793}
1794
1795/**
1796 * Updates the EFLAGS while we're in raw-mode.
1797 *
1798 * @param pVCpu The VMCPU handle.
1799 * @param pCtxCore The context core.
1800 * @param eflags The new EFLAGS value.
1801 */
1802VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1803{
1804 PVM pVM = pVCpu->CTX_SUFF(pVM);
1805
1806 if (!pVM->cpum.s.fRawEntered)
1807 {
1808 pCtxCore->eflags.u32 = eflags;
1809 return;
1810 }
1811 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1812}
1813
1814#endif /* !IN_RING0 */
1815
1816/**
1817 * Gets the EFLAGS while we're in raw-mode.
1818 *
1819 * @returns The eflags.
1820 * @param pVCpu The VMCPU handle.
1821 * @param pCtxCore The context core.
1822 */
1823VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
1824{
1825#ifdef IN_RING0
1826 return pCtxCore->eflags.u32;
1827#else
1828 PVM pVM = pVCpu->CTX_SUFF(pVM);
1829
1830 if (!pVM->cpum.s.fRawEntered)
1831 return pCtxCore->eflags.u32;
1832 return PATMRawGetEFlags(pVM, pCtxCore);
1833#endif
1834}
1835
1836
1837/**
1838 * Gets and resets the changed flags (CPUM_CHANGED_*).
1839 * Only REM should call this function.
1840 *
1841 * @returns The changed flags.
1842 * @param pVCpu The VMCPU handle.
1843 */
1844VMMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVMCPU pVCpu)
1845{
1846 unsigned fFlags = pVCpu->cpum.s.fChanged;
1847 pVCpu->cpum.s.fChanged = 0;
1848 /** @todo change the switcher to use the fChanged flags. */
1849 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
1850 {
1851 fFlags |= CPUM_CHANGED_FPU_REM;
1852 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
1853 }
1854 return fFlags;
1855}
1856
1857
1858/**
1859 * Sets the specified changed flags (CPUM_CHANGED_*).
1860 *
1861 * @param pVCpu The VMCPU handle.
1862 */
1863VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
1864{
1865 pVCpu->cpum.s.fChanged |= fChangedFlags;
1866}
1867
1868
1869/**
1870 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
1871 * @returns true if supported.
1872 * @returns false if not supported.
1873 * @param pVM The VM handle.
1874 */
1875VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
1876{
1877 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
1878}
1879
1880
1881/**
1882 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1883 * @returns true if used.
1884 * @returns false if not used.
1885 * @param pVM The VM handle.
1886 */
1887VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1888{
1889 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
1890}
1891
1892
1893/**
1894 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1895 * @returns true if used.
1896 * @returns false if not used.
1897 * @param pVM The VM handle.
1898 */
1899VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1900{
1901 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
1902}
1903
1904#ifndef IN_RING3
1905
1906/**
1907 * Lazily sync in the FPU/XMM state
1908 *
1909 * @returns VBox status code.
1910 * @param pVCpu VMCPU handle
1911 */
1912VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
1913{
1914 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
1915}
1916
1917#endif /* !IN_RING3 */
1918
1919/**
1920 * Checks if we activated the FPU/XMM state of the guest OS
1921 * @returns true if we did.
1922 * @returns false if not.
1923 * @param pVCpu The VMCPU handle.
1924 */
1925VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
1926{
1927 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
1928}
1929
1930
1931/**
1932 * Deactivate the FPU/XMM state of the guest OS
1933 * @param pVCpu The VMCPU handle.
1934 */
1935VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
1936{
1937 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
1938}
1939
1940
1941/**
1942 * Checks if the guest debug state is active
1943 *
1944 * @returns boolean
1945 * @param pVM VM handle.
1946 */
1947VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
1948{
1949 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
1950}
1951
1952/**
1953 * Checks if the hyper debug state is active
1954 *
1955 * @returns boolean
1956 * @param pVM VM handle.
1957 */
1958VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
1959{
1960 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
1961}
1962
1963
1964/**
1965 * Mark the guest's debug state as inactive
1966 *
1967 * @returns boolean
1968 * @param pVM VM handle.
1969 */
1970VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
1971{
1972 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1973}
1974
1975
1976/**
1977 * Mark the hypervisor's debug state as inactive
1978 *
1979 * @returns boolean
1980 * @param pVM VM handle.
1981 */
1982VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
1983{
1984 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1985}
1986
1987/**
1988 * Checks if the hidden selector registers are valid
1989 * @returns true if they are.
1990 * @returns false if not.
1991 * @param pVM The VM handle.
1992 */
1993VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
1994{
1995 return HWACCMIsEnabled(pVM);
1996}
1997
1998
1999
2000/**
2001 * Get the current privilege level of the guest.
2002 *
2003 * @returns cpl
2004 * @param pVM VM Handle.
2005 * @param pRegFrame Trap register frame.
2006 */
2007VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2008{
2009 uint32_t cpl;
2010
2011 if (CPUMAreHiddenSelRegsValid(pVCpu->CTX_SUFF(pVM)))
2012 {
2013 /*
2014 * The hidden CS.DPL register is always equal to the CPL, it is
2015 * not affected by loading a conforming coding segment.
2016 *
2017 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2018 * at SS. (ACP2 regression during install after a far call to ring 2)
2019 */
2020 if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2021 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2022 else
2023 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2024 }
2025 else if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2026 {
2027 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2028 {
2029 /*
2030 * The SS RPL is always equal to the CPL, while the CS RPL
2031 * isn't necessarily equal if the segment is conforming.
2032 * See section 4.11.1 in the AMD manual.
2033 */
2034 cpl = (pCtxCore->ss & X86_SEL_RPL);
2035#ifndef IN_RING0
2036 if (cpl == 1)
2037 cpl = 0;
2038#endif
2039 }
2040 else
2041 cpl = 3;
2042 }
2043 else
2044 cpl = 0; /* real mode; cpl is zero */
2045
2046 return cpl;
2047}
2048
2049
2050/**
2051 * Gets the current guest CPU mode.
2052 *
2053 * If paging mode is what you need, check out PGMGetGuestMode().
2054 *
2055 * @returns The CPU mode.
2056 * @param pVCpu The VMCPU handle.
2057 */
2058VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2059{
2060 CPUMMODE enmMode;
2061 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2062 enmMode = CPUMMODE_REAL;
2063 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2064 enmMode = CPUMMODE_PROTECTED;
2065 else
2066 enmMode = CPUMMODE_LONG;
2067
2068 return enmMode;
2069}
2070
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette