VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 771

Last change on this file since 771 was 771, checked in by vboxsync, 18 years ago

AMD-V was stil left disabled. Now enabled.
Enabled sysenter/sysexit in hwaccm mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 36.9 KB
Line 
1/* $Id: CPUMAllRegs.cpp 771 2007-02-08 10:41:53Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Gets and Sets.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37
38
39
40/** Disable stack frame pointer generation here. */
41#if defined(_MSC_VER) && !defined(DEBUG)
42# pragma optimize("y", off)
43#endif
44
45
46/**
47 * Sets or resets an alternative hypervisor context core.
48 *
49 * This is called when we get a hypervisor trap set switch the context
50 * core with the trap frame on the stack. It is called again to reset
51 * back to the default context core when resuming hypervisor execution.
52 *
53 * @param pVM The VM handle.
54 * @param pCtxCore Pointer to the alternative context core or NULL
55 * to go back to the default context core.
56 */
57CPUMDECL(void) CPUMHyperSetCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
58{
59 LogFlow(("CPUMHyperSetCtxCore: %p -> %p\n", pVM->cpum.s.CTXSUFF(pHyperCore), pCtxCore));
60 if (!pCtxCore)
61 {
62 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
63#ifdef IN_GC
64 pVM->cpum.s.pHyperCoreHC = VM_HOST_ADDR(pVM, pCtxCore);
65#else
66 pVM->cpum.s.pHyperCoreGC = VM_GUEST_ADDR(pVM, pCtxCore);
67#endif
68 }
69 else
70 {
71#ifdef IN_GC
72 pVM->cpum.s.pHyperCoreHC = MMHyperGC2HC(pVM, pCtxCore);
73#else
74 pVM->cpum.s.pHyperCoreGC = MMHyperHC2GC(pVM, pCtxCore);
75#endif
76 }
77 pVM->cpum.s.CTXSUFF(pHyperCore) = pCtxCore;
78}
79
80
81/**
82 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
83 * This is only for reading in order to save a few calls.
84 *
85 * @param pVM Handle to the virtual machine.
86 */
87CPUMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVM pVM)
88{
89 return pVM->cpum.s.CTXSUFF(pHyperCore);
90}
91
92
93/**
94 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
95 *
96 * @returns VBox status code.
97 * @param pVM Handle to the virtual machine.
98 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
99 *
100 * @deprecated This will *not* (and has never) given the right picture of the
101 * hypervisor register state. With CPUMHyperSetCtxCore() this is
102 * getting much worse. So, use the individual functions for getting
103 * and esp. setting the hypervisor registers.
104 */
105CPUMDECL(int) CPUMQueryHyperCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
106{
107 *ppCtx = &pVM->cpum.s.Hyper;
108 return VINF_SUCCESS;
109}
110
111CPUMDECL(void) CPUMSetHyperGDTR(PVM pVM, uint32_t addr, uint16_t limit)
112{
113 pVM->cpum.s.Hyper.gdtr.cbGdt = limit;
114 pVM->cpum.s.Hyper.gdtr.pGdt = addr;
115 pVM->cpum.s.Hyper.gdtrPadding = 0;
116 pVM->cpum.s.Hyper.gdtrPadding64 = 0;
117}
118
119CPUMDECL(void) CPUMSetHyperIDTR(PVM pVM, uint32_t addr, uint16_t limit)
120{
121 pVM->cpum.s.Hyper.idtr.cbIdt = limit;
122 pVM->cpum.s.Hyper.idtr.pIdt = addr;
123 pVM->cpum.s.Hyper.idtrPadding = 0;
124 pVM->cpum.s.Hyper.idtrPadding64 = 0;
125}
126
127CPUMDECL(void) CPUMSetHyperCR3(PVM pVM, uint32_t cr3)
128{
129 pVM->cpum.s.Hyper.cr3 = cr3;
130}
131
132CPUMDECL(void) CPUMSetHyperCS(PVM pVM, RTSEL SelCS)
133{
134 pVM->cpum.s.CTXSUFF(pHyperCore)->cs = SelCS;
135}
136
137CPUMDECL(void) CPUMSetHyperDS(PVM pVM, RTSEL SelDS)
138{
139 pVM->cpum.s.CTXSUFF(pHyperCore)->ds = SelDS;
140}
141
142CPUMDECL(void) CPUMSetHyperES(PVM pVM, RTSEL SelES)
143{
144 pVM->cpum.s.CTXSUFF(pHyperCore)->es = SelES;
145}
146
147CPUMDECL(void) CPUMSetHyperFS(PVM pVM, RTSEL SelFS)
148{
149 pVM->cpum.s.CTXSUFF(pHyperCore)->fs = SelFS;
150}
151
152CPUMDECL(void) CPUMSetHyperGS(PVM pVM, RTSEL SelGS)
153{
154 pVM->cpum.s.CTXSUFF(pHyperCore)->gs = SelGS;
155}
156
157CPUMDECL(void) CPUMSetHyperSS(PVM pVM, RTSEL SelSS)
158{
159 pVM->cpum.s.CTXSUFF(pHyperCore)->ss = SelSS;
160}
161
162CPUMDECL(void) CPUMSetHyperESP(PVM pVM, uint32_t u32ESP)
163{
164 pVM->cpum.s.CTXSUFF(pHyperCore)->esp = u32ESP;
165}
166
167CPUMDECL(int) CPUMSetHyperEFlags(PVM pVM, uint32_t Efl)
168{
169 pVM->cpum.s.CTXSUFF(pHyperCore)->eflags.u32 = Efl;
170 return VINF_SUCCESS;
171}
172
173CPUMDECL(void) CPUMSetHyperEIP(PVM pVM, uint32_t u32EIP)
174{
175 pVM->cpum.s.CTXSUFF(pHyperCore)->eip = u32EIP;
176}
177
178CPUMDECL(void) CPUMSetHyperTR(PVM pVM, RTSEL SelTR)
179{
180 pVM->cpum.s.Hyper.tr = SelTR;
181}
182
183CPUMDECL(void) CPUMSetHyperLDTR(PVM pVM, RTSEL SelLDTR)
184{
185 pVM->cpum.s.Hyper.ldtr = SelLDTR;
186}
187
188CPUMDECL(void) CPUMSetHyperDR0(PVM pVM, RTGCUINTREG uDr0)
189{
190 pVM->cpum.s.Hyper.dr0 = uDr0;
191 /** @todo in GC we must load it! */
192}
193
194CPUMDECL(void) CPUMSetHyperDR1(PVM pVM, RTGCUINTREG uDr1)
195{
196 pVM->cpum.s.Hyper.dr1 = uDr1;
197 /** @todo in GC we must load it! */
198}
199
200CPUMDECL(void) CPUMSetHyperDR2(PVM pVM, RTGCUINTREG uDr2)
201{
202 pVM->cpum.s.Hyper.dr2 = uDr2;
203 /** @todo in GC we must load it! */
204}
205
206CPUMDECL(void) CPUMSetHyperDR3(PVM pVM, RTGCUINTREG uDr3)
207{
208 pVM->cpum.s.Hyper.dr3 = uDr3;
209 /** @todo in GC we must load it! */
210}
211
212CPUMDECL(void) CPUMSetHyperDR6(PVM pVM, RTGCUINTREG uDr6)
213{
214 pVM->cpum.s.Hyper.dr6 = uDr6;
215 /** @todo in GC we must load it! */
216}
217
218CPUMDECL(void) CPUMSetHyperDR7(PVM pVM, RTGCUINTREG uDr7)
219{
220 pVM->cpum.s.Hyper.dr7 = uDr7;
221 /** @todo in GC we must load it! */
222}
223
224
225CPUMDECL(RTSEL) CPUMGetHyperCS(PVM pVM)
226{
227 return pVM->cpum.s.CTXSUFF(pHyperCore)->cs;
228}
229
230CPUMDECL(RTSEL) CPUMGetHyperDS(PVM pVM)
231{
232 return pVM->cpum.s.CTXSUFF(pHyperCore)->ds;
233}
234
235CPUMDECL(RTSEL) CPUMGetHyperES(PVM pVM)
236{
237 return pVM->cpum.s.CTXSUFF(pHyperCore)->es;
238}
239
240CPUMDECL(RTSEL) CPUMGetHyperFS(PVM pVM)
241{
242 return pVM->cpum.s.CTXSUFF(pHyperCore)->fs;
243}
244
245CPUMDECL(RTSEL) CPUMGetHyperGS(PVM pVM)
246{
247 return pVM->cpum.s.CTXSUFF(pHyperCore)->gs;
248}
249
250CPUMDECL(RTSEL) CPUMGetHyperSS(PVM pVM)
251{
252 return pVM->cpum.s.CTXSUFF(pHyperCore)->ss;
253}
254
255#if 0 /* these are not correct. */
256
257CPUMDECL(uint32_t) CPUMGetHyperCR0(PVM pVM)
258{
259 return pVM->cpum.s.Hyper.cr0;
260}
261
262CPUMDECL(uint32_t) CPUMGetHyperCR2(PVM pVM)
263{
264 return pVM->cpum.s.Hyper.cr2;
265}
266
267CPUMDECL(uint32_t) CPUMGetHyperCR3(PVM pVM)
268{
269 return pVM->cpum.s.Hyper.cr3;
270}
271
272CPUMDECL(uint32_t) CPUMGetHyperCR4(PVM pVM)
273{
274 return pVM->cpum.s.Hyper.cr4;
275}
276
277#endif /* not correct */
278
279CPUMDECL(uint32_t) CPUMGetHyperEAX(PVM pVM)
280{
281 return pVM->cpum.s.CTXSUFF(pHyperCore)->eax;
282}
283
284CPUMDECL(uint32_t) CPUMGetHyperEBX(PVM pVM)
285{
286 return pVM->cpum.s.CTXSUFF(pHyperCore)->ebx;
287}
288
289CPUMDECL(uint32_t) CPUMGetHyperECX(PVM pVM)
290{
291 return pVM->cpum.s.CTXSUFF(pHyperCore)->ecx;
292}
293
294CPUMDECL(uint32_t) CPUMGetHyperEDX(PVM pVM)
295{
296 return pVM->cpum.s.CTXSUFF(pHyperCore)->edx;
297}
298
299CPUMDECL(uint32_t) CPUMGetHyperESI(PVM pVM)
300{
301 return pVM->cpum.s.CTXSUFF(pHyperCore)->esi;
302}
303
304CPUMDECL(uint32_t) CPUMGetHyperEDI(PVM pVM)
305{
306 return pVM->cpum.s.CTXSUFF(pHyperCore)->edi;
307}
308
309CPUMDECL(uint32_t) CPUMGetHyperEBP(PVM pVM)
310{
311 return pVM->cpum.s.CTXSUFF(pHyperCore)->ebp;
312}
313
314CPUMDECL(uint32_t) CPUMGetHyperESP(PVM pVM)
315{
316 return pVM->cpum.s.CTXSUFF(pHyperCore)->esp;
317}
318
319CPUMDECL(uint32_t) CPUMGetHyperEFlags(PVM pVM)
320{
321 return pVM->cpum.s.CTXSUFF(pHyperCore)->eflags.u32;
322}
323
324CPUMDECL(uint32_t) CPUMGetHyperEIP(PVM pVM)
325{
326 return pVM->cpum.s.CTXSUFF(pHyperCore)->eip;
327}
328
329CPUMDECL(uint32_t) CPUMGetHyperIDTR(PVM pVM, uint16_t *pcbLimit)
330{
331 if (pcbLimit)
332 *pcbLimit = pVM->cpum.s.Hyper.idtr.cbIdt;
333 return pVM->cpum.s.Hyper.idtr.pIdt;
334}
335
336CPUMDECL(uint32_t) CPUMGetHyperGDTR(PVM pVM, uint16_t *pcbLimit)
337{
338 if (pcbLimit)
339 *pcbLimit = pVM->cpum.s.Hyper.gdtr.cbGdt;
340 return pVM->cpum.s.Hyper.gdtr.pGdt;
341}
342
343CPUMDECL(RTSEL) CPUMGetHyperLDTR(PVM pVM)
344{
345 return pVM->cpum.s.Hyper.ldtr;
346}
347
348CPUMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVM pVM)
349{
350 return pVM->cpum.s.Hyper.dr0;
351}
352
353CPUMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVM pVM)
354{
355 return pVM->cpum.s.Hyper.dr1;
356}
357
358CPUMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVM pVM)
359{
360 return pVM->cpum.s.Hyper.dr2;
361}
362
363CPUMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVM pVM)
364{
365 return pVM->cpum.s.Hyper.dr3;
366}
367
368CPUMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVM pVM)
369{
370 return pVM->cpum.s.Hyper.dr6;
371}
372
373CPUMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVM pVM)
374{
375 return pVM->cpum.s.Hyper.dr7;
376}
377
378
379/**
380 * Gets the pointer to the internal CPUMCTXCORE structure.
381 * This is only for reading in order to save a few calls.
382 *
383 * @param pVM Handle to the virtual machine.
384 */
385CPUMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVM pVM)
386{
387 return CPUMCTX2CORE(&pVM->cpum.s.Guest);
388}
389
390
391/**
392 * Sets the guest context core registers.
393 *
394 * @param pVM Handle to the virtual machine.
395 * @param pCtxCore The new context core values.
396 */
397CPUMDECL(void) CPUMSetGuestCtxCore(PVM pVM, PCCPUMCTXCORE pCtxCore)
398{
399 /** @todo #1410 requires selectors to be checked. */
400
401 PCPUMCTXCORE pCtxCoreDst CPUMCTX2CORE(&pVM->cpum.s.Guest);
402 *pCtxCoreDst = *pCtxCore;
403}
404
405
406/**
407 * Queries the pointer to the internal CPUMCTX structure
408 *
409 * @returns VBox status code.
410 * @param pVM Handle to the virtual machine.
411 * @param ppCtx Receives the CPUMCTX pointer when successful.
412 */
413CPUMDECL(int) CPUMQueryGuestCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
414{
415 *ppCtx = &pVM->cpum.s.Guest;
416 return VINF_SUCCESS;
417}
418
419
420CPUMDECL(int) CPUMSetGuestGDTR(PVM pVM, uint32_t addr, uint16_t limit)
421{
422 pVM->cpum.s.Guest.gdtr.cbGdt = limit;
423 pVM->cpum.s.Guest.gdtr.pGdt = addr;
424 pVM->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
425 return VINF_SUCCESS;
426}
427
428CPUMDECL(int) CPUMSetGuestIDTR(PVM pVM, uint32_t addr, uint16_t limit)
429{
430 pVM->cpum.s.Guest.idtr.cbIdt = limit;
431 pVM->cpum.s.Guest.idtr.pIdt = addr;
432 pVM->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
433 return VINF_SUCCESS;
434}
435
436CPUMDECL(int) CPUMSetGuestTR(PVM pVM, uint16_t tr)
437{
438 pVM->cpum.s.Guest.tr = tr;
439 pVM->cpum.s.fChanged |= CPUM_CHANGED_TR;
440 return VINF_SUCCESS;
441}
442
443CPUMDECL(int) CPUMSetGuestLDTR(PVM pVM, uint16_t ldtr)
444{
445 pVM->cpum.s.Guest.ldtr = ldtr;
446 pVM->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
447 return VINF_SUCCESS;
448}
449
450
451CPUMDECL(int) CPUMSetGuestCR0(PVM pVM, uint32_t cr0)
452{
453 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
454 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
455 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
456 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR0;
457 pVM->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
458 return VINF_SUCCESS;
459}
460
461CPUMDECL(int) CPUMSetGuestCR2(PVM pVM, uint32_t cr2)
462{
463 pVM->cpum.s.Guest.cr2 = cr2;
464 return VINF_SUCCESS;
465}
466
467CPUMDECL(int) CPUMSetGuestCR3(PVM pVM, uint32_t cr3)
468{
469 pVM->cpum.s.Guest.cr3 = cr3;
470 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR3;
471 return VINF_SUCCESS;
472}
473
474CPUMDECL(int) CPUMSetGuestCR4(PVM pVM, uint32_t cr4)
475{
476 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
477 != (pVM->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
478 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
479 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR4;
480 if (!CPUMSupportsFXSR(pVM))
481 cr4 &= ~X86_CR4_OSFSXR;
482 pVM->cpum.s.Guest.cr4 = cr4;
483 return VINF_SUCCESS;
484}
485
486CPUMDECL(int) CPUMSetGuestEFlags(PVM pVM, uint32_t eflags)
487{
488 pVM->cpum.s.Guest.eflags.u32 = eflags;
489 return VINF_SUCCESS;
490}
491
492CPUMDECL(int) CPUMSetGuestEIP(PVM pVM, uint32_t eip)
493{
494 pVM->cpum.s.Guest.eip = eip;
495 return VINF_SUCCESS;
496}
497
498CPUMDECL(int) CPUMSetGuestEAX(PVM pVM, uint32_t eax)
499{
500 pVM->cpum.s.Guest.eax = eax;
501 return VINF_SUCCESS;
502}
503
504CPUMDECL(int) CPUMSetGuestEBX(PVM pVM, uint32_t ebx)
505{
506 pVM->cpum.s.Guest.ebx = ebx;
507 return VINF_SUCCESS;
508}
509
510CPUMDECL(int) CPUMSetGuestECX(PVM pVM, uint32_t ecx)
511{
512 pVM->cpum.s.Guest.ecx = ecx;
513 return VINF_SUCCESS;
514}
515
516CPUMDECL(int) CPUMSetGuestEDX(PVM pVM, uint32_t edx)
517{
518 pVM->cpum.s.Guest.edx = edx;
519 return VINF_SUCCESS;
520}
521
522CPUMDECL(int) CPUMSetGuestESP(PVM pVM, uint32_t esp)
523{
524 pVM->cpum.s.Guest.esp = esp;
525 return VINF_SUCCESS;
526}
527
528CPUMDECL(int) CPUMSetGuestEBP(PVM pVM, uint32_t ebp)
529{
530 pVM->cpum.s.Guest.ebp = ebp;
531 return VINF_SUCCESS;
532}
533
534CPUMDECL(int) CPUMSetGuestESI(PVM pVM, uint32_t esi)
535{
536 pVM->cpum.s.Guest.esi = esi;
537 return VINF_SUCCESS;
538}
539
540CPUMDECL(int) CPUMSetGuestEDI(PVM pVM, uint32_t edi)
541{
542 pVM->cpum.s.Guest.edi = edi;
543 return VINF_SUCCESS;
544}
545
546CPUMDECL(int) CPUMSetGuestSS(PVM pVM, uint16_t ss)
547{
548 pVM->cpum.s.Guest.ss = ss;
549 return VINF_SUCCESS;
550}
551
552CPUMDECL(int) CPUMSetGuestCS(PVM pVM, uint16_t cs)
553{
554 pVM->cpum.s.Guest.cs = cs;
555 return VINF_SUCCESS;
556}
557
558CPUMDECL(int) CPUMSetGuestDS(PVM pVM, uint16_t ds)
559{
560 pVM->cpum.s.Guest.ds = ds;
561 return VINF_SUCCESS;
562}
563
564CPUMDECL(int) CPUMSetGuestES(PVM pVM, uint16_t es)
565{
566 pVM->cpum.s.Guest.es = es;
567 return VINF_SUCCESS;
568}
569
570CPUMDECL(int) CPUMSetGuestFS(PVM pVM, uint16_t fs)
571{
572 pVM->cpum.s.Guest.fs = fs;
573 return VINF_SUCCESS;
574}
575
576CPUMDECL(int) CPUMSetGuestGS(PVM pVM, uint16_t gs)
577{
578 pVM->cpum.s.Guest.gs = gs;
579 return VINF_SUCCESS;
580}
581
582
583CPUMDECL(uint32_t) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit)
584{
585 if (pcbLimit)
586 *pcbLimit = pVM->cpum.s.Guest.idtr.cbIdt;
587 return pVM->cpum.s.Guest.idtr.pIdt;
588}
589
590CPUMDECL(RTSEL) CPUMGetGuestTR(PVM pVM)
591{
592 return pVM->cpum.s.Guest.tr;
593}
594
595CPUMDECL(RTSEL) CPUMGetGuestCS(PVM pVM)
596{
597 return pVM->cpum.s.Guest.cs;
598}
599
600CPUMDECL(RTSEL) CPUMGetGuestDS(PVM pVM)
601{
602 return pVM->cpum.s.Guest.ds;
603}
604
605CPUMDECL(RTSEL) CPUMGetGuestES(PVM pVM)
606{
607 return pVM->cpum.s.Guest.es;
608}
609
610CPUMDECL(RTSEL) CPUMGetGuestFS(PVM pVM)
611{
612 return pVM->cpum.s.Guest.fs;
613}
614
615CPUMDECL(RTSEL) CPUMGetGuestGS(PVM pVM)
616{
617 return pVM->cpum.s.Guest.gs;
618}
619
620CPUMDECL(RTSEL) CPUMGetGuestSS(PVM pVM)
621{
622 return pVM->cpum.s.Guest.ss;
623}
624
625CPUMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM)
626{
627 return pVM->cpum.s.Guest.ldtr;
628}
629
630
631CPUMDECL(uint32_t) CPUMGetGuestCR0(PVM pVM)
632{
633 return pVM->cpum.s.Guest.cr0;
634}
635
636CPUMDECL(uint32_t) CPUMGetGuestCR2(PVM pVM)
637{
638 return pVM->cpum.s.Guest.cr2;
639}
640
641CPUMDECL(uint32_t) CPUMGetGuestCR3(PVM pVM)
642{
643 return pVM->cpum.s.Guest.cr3;
644}
645
646CPUMDECL(uint32_t) CPUMGetGuestCR4(PVM pVM)
647{
648 return pVM->cpum.s.Guest.cr4;
649}
650
651CPUMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR)
652{
653 *pGDTR = pVM->cpum.s.Guest.gdtr;
654}
655
656CPUMDECL(uint32_t) CPUMGetGuestEIP(PVM pVM)
657{
658 return pVM->cpum.s.Guest.eip;
659}
660
661CPUMDECL(uint32_t) CPUMGetGuestEAX(PVM pVM)
662{
663 return pVM->cpum.s.Guest.eax;
664}
665
666CPUMDECL(uint32_t) CPUMGetGuestEBX(PVM pVM)
667{
668 return pVM->cpum.s.Guest.ebx;
669}
670
671CPUMDECL(uint32_t) CPUMGetGuestECX(PVM pVM)
672{
673 return pVM->cpum.s.Guest.ecx;
674}
675
676CPUMDECL(uint32_t) CPUMGetGuestEDX(PVM pVM)
677{
678 return pVM->cpum.s.Guest.edx;
679}
680
681CPUMDECL(uint32_t) CPUMGetGuestESI(PVM pVM)
682{
683 return pVM->cpum.s.Guest.esi;
684}
685
686CPUMDECL(uint32_t) CPUMGetGuestEDI(PVM pVM)
687{
688 return pVM->cpum.s.Guest.edi;
689}
690
691CPUMDECL(uint32_t) CPUMGetGuestESP(PVM pVM)
692{
693 return pVM->cpum.s.Guest.esp;
694}
695
696CPUMDECL(uint32_t) CPUMGetGuestEBP(PVM pVM)
697{
698 return pVM->cpum.s.Guest.ebp;
699}
700
701CPUMDECL(uint32_t) CPUMGetGuestEFlags(PVM pVM)
702{
703 return pVM->cpum.s.Guest.eflags.u32;
704}
705
706CPUMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM)
707{
708 return &pVM->cpum.s.Guest.trHid;
709}
710
711//@todo: crx should be an array
712CPUMDECL(int) CPUMGetGuestCRx(PVM pVM, uint32_t iReg, uint32_t *pValue)
713{
714 switch (iReg)
715 {
716 case USE_REG_CR0:
717 *pValue = pVM->cpum.s.Guest.cr0;
718 break;
719 case USE_REG_CR2:
720 *pValue = pVM->cpum.s.Guest.cr2;
721 break;
722 case USE_REG_CR3:
723 *pValue = pVM->cpum.s.Guest.cr3;
724 break;
725 case USE_REG_CR4:
726 *pValue = pVM->cpum.s.Guest.cr4;
727 break;
728 default:
729 return VERR_INVALID_PARAMETER;
730 }
731 return VINF_SUCCESS;
732}
733
734CPUMDECL(RTUINTREG) CPUMGetGuestDR0(PVM pVM)
735{
736 return pVM->cpum.s.Guest.dr0;
737}
738
739CPUMDECL(RTUINTREG) CPUMGetGuestDR1(PVM pVM)
740{
741 return pVM->cpum.s.Guest.dr1;
742}
743
744CPUMDECL(RTUINTREG) CPUMGetGuestDR2(PVM pVM)
745{
746 return pVM->cpum.s.Guest.dr2;
747}
748
749CPUMDECL(RTUINTREG) CPUMGetGuestDR3(PVM pVM)
750{
751 return pVM->cpum.s.Guest.dr3;
752}
753
754CPUMDECL(RTUINTREG) CPUMGetGuestDR6(PVM pVM)
755{
756 return pVM->cpum.s.Guest.dr6;
757}
758
759CPUMDECL(RTUINTREG) CPUMGetGuestDR7(PVM pVM)
760{
761 return pVM->cpum.s.Guest.dr7;
762}
763
764/** @todo drx should be an array */
765CPUMDECL(int) CPUMGetGuestDRx(PVM pVM, uint32_t iReg, uint32_t *pValue)
766{
767 switch (iReg)
768 {
769 case USE_REG_DR0:
770 *pValue = pVM->cpum.s.Guest.dr0;
771 break;
772 case USE_REG_DR1:
773 *pValue = pVM->cpum.s.Guest.dr1;
774 break;
775 case USE_REG_DR2:
776 *pValue = pVM->cpum.s.Guest.dr2;
777 break;
778 case USE_REG_DR3:
779 *pValue = pVM->cpum.s.Guest.dr3;
780 break;
781 case USE_REG_DR4:
782 case USE_REG_DR6:
783 *pValue = pVM->cpum.s.Guest.dr6;
784 break;
785 case USE_REG_DR5:
786 case USE_REG_DR7:
787 *pValue = pVM->cpum.s.Guest.dr7;
788 break;
789
790 default:
791 return VERR_INVALID_PARAMETER;
792 }
793 return VINF_SUCCESS;
794}
795
796/**
797 * Gets a CpuId leaf.
798 *
799 * @param pVM The VM handle.
800 * @param iLeaf The CPUID leaf to get.
801 * @param pEax Where to store the EAX value.
802 * @param pEbx Where to store the EBX value.
803 * @param pEcx Where to store the ECX value.
804 * @param pEdx Where to store the EDX value.
805 */
806CPUMDECL(void) CPUMGetGuestCpuId(PVM pVM, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
807{
808 PCCPUMCPUID pCpuId;
809 if (iLeaf < ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
810 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
811 else if (iLeaf - UINT32_C(0x80000000) < ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
812 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
813 else
814 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
815
816 *pEax = pCpuId->eax;
817 *pEbx = pCpuId->ebx;
818 *pEcx = pCpuId->ecx;
819 *pEdx = pCpuId->edx;
820 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
821}
822
823/**
824 * Gets a pointer to the array of standard CPUID leafs.
825 *
826 * CPUMGetGuestCpuIdStdMax() give the size of the array.
827 *
828 * @returns Pointer to the standard CPUID leafs (read-only).
829 * @param pVM The VM handle.
830 * @remark Intended for PATM.
831 */
832CPUMDECL(GCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdStdGCPtr(PVM pVM)
833{
834 return GCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
835}
836
837/**
838 * Gets a pointer to the array of extended CPUID leafs.
839 *
840 * CPUMGetGuestCpuIdExtMax() give the size of the array.
841 *
842 * @returns Pointer to the extended CPUID leafs (read-only).
843 * @param pVM The VM handle.
844 * @remark Intended for PATM.
845 */
846CPUMDECL(GCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtGCPtr(PVM pVM)
847{
848 return GCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
849}
850
851/**
852 * Gets a pointer to the default CPUID leaf.
853 *
854 * @returns Pointer to the default CPUID leaf (read-only).
855 * @param pVM The VM handle.
856 * @remark Intended for PATM.
857 */
858CPUMDECL(GCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefGCPtr(PVM pVM)
859{
860 return GCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
861}
862
863/**
864 * Gets a number of standard CPUID leafs.
865 *
866 * @returns Number of leafs.
867 * @param pVM The VM handle.
868 * @remark Intended for PATM.
869 */
870CPUMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
871{
872 return ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
873}
874
875/**
876 * Gets a number of extended CPUID leafs.
877 *
878 * @returns Number of leafs.
879 * @param pVM The VM handle.
880 * @remark Intended for PATM.
881 */
882CPUMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
883{
884 return ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
885}
886
887/**
888 * Sets a CPUID feature bit.
889 *
890 * @param pVM The VM Handle.
891 * @param enmFeature The feature to set.
892 */
893CPUMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
894{
895 switch (enmFeature)
896 {
897 /*
898 * Set the APIC bit in both feature masks.
899 */
900 case CPUMCPUIDFEATURE_APIC:
901 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
902 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
903 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
904 && pVM->cpum.s.aGuestCpuIdExt[1].edx)
905 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
906 Log(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
907 break;
908
909 /*
910 * Set the sysenter/sysexit bit in both feature masks.
911 * Assumes the caller knows what it's doing! (host must support these)
912 */
913 case CPUMCPUIDFEATURE_SEP:
914 {
915 uint32_t ulEdx, ulDummy;
916
917 ASMCpuId(1, &ulDummy, &ulDummy, &ulDummy, &ulEdx);
918 if (!(ulEdx & X86_CPUID_FEATURE_EDX_SEP))
919 {
920 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
921 return;
922 }
923
924 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
925 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
926 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
927 && pVM->cpum.s.aGuestCpuIdExt[1].edx)
928 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
929 Log(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
930 break;
931 }
932
933 default:
934 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
935 break;
936 }
937}
938
939/**
940 * Clears a CPUID feature bit.
941 *
942 * @param pVM The VM Handle.
943 * @param enmFeature The feature to clear.
944 */
945CPUMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
946{
947 switch (enmFeature)
948 {
949 /*
950 * Set the APIC bit in both feature masks.
951 */
952 case CPUMCPUIDFEATURE_APIC:
953 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
954 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
955 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
956 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
957 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
958 break;
959
960 default:
961 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
962 break;
963 }
964}
965
966
967
968CPUMDECL(int) CPUMSetGuestDR0(PVM pVM, RTGCUINTREG uDr0)
969{
970 pVM->cpum.s.Guest.dr0 = uDr0;
971 return CPUMRecalcHyperDRx(pVM);
972}
973
974CPUMDECL(int) CPUMSetGuestDR1(PVM pVM, RTGCUINTREG uDr1)
975{
976 pVM->cpum.s.Guest.dr1 = uDr1;
977 return CPUMRecalcHyperDRx(pVM);
978}
979
980CPUMDECL(int) CPUMSetGuestDR2(PVM pVM, RTGCUINTREG uDr2)
981{
982 pVM->cpum.s.Guest.dr2 = uDr2;
983 return CPUMRecalcHyperDRx(pVM);
984}
985
986CPUMDECL(int) CPUMSetGuestDR3(PVM pVM, RTGCUINTREG uDr3)
987{
988 pVM->cpum.s.Guest.dr3 = uDr3;
989 return CPUMRecalcHyperDRx(pVM);
990}
991
992CPUMDECL(int) CPUMSetGuestDR6(PVM pVM, RTGCUINTREG uDr6)
993{
994 pVM->cpum.s.Guest.dr6 = uDr6;
995 return CPUMRecalcHyperDRx(pVM);
996}
997
998CPUMDECL(int) CPUMSetGuestDR7(PVM pVM, RTGCUINTREG uDr7)
999{
1000 pVM->cpum.s.Guest.dr7 = uDr7;
1001 return CPUMRecalcHyperDRx(pVM);
1002}
1003
1004/** @todo drx should be an array */
1005CPUMDECL(int) CPUMSetGuestDRx(PVM pVM, uint32_t iReg, uint32_t Value)
1006{
1007 switch (iReg)
1008 {
1009 case USE_REG_DR0:
1010 pVM->cpum.s.Guest.dr0 = Value;
1011 break;
1012 case USE_REG_DR1:
1013 pVM->cpum.s.Guest.dr1 = Value;
1014 break;
1015 case USE_REG_DR2:
1016 pVM->cpum.s.Guest.dr2 = Value;
1017 break;
1018 case USE_REG_DR3:
1019 pVM->cpum.s.Guest.dr3 = Value;
1020 break;
1021 case USE_REG_DR4:
1022 case USE_REG_DR6:
1023 pVM->cpum.s.Guest.dr6 = Value;
1024 break;
1025 case USE_REG_DR5:
1026 case USE_REG_DR7:
1027 pVM->cpum.s.Guest.dr7 = Value;
1028 break;
1029
1030 default:
1031 return VERR_INVALID_PARAMETER;
1032 }
1033 return CPUMRecalcHyperDRx(pVM);
1034}
1035
1036
1037/**
1038 * Recalculates the hypvervisor DRx register values based on
1039 * current guest registers and DBGF breakpoints.
1040 *
1041 * This is called whenever a guest DRx register is modified and when DBGF
1042 * sets a hardware breakpoint. In guest context this function will reload
1043 * any (hyper) DRx registers which comes out with a different value.
1044 *
1045 * @returns VINF_SUCCESS.
1046 * @param pVM The VM handle.
1047 */
1048CPUMDECL(int) CPUMRecalcHyperDRx(PVM pVM)
1049{
1050 /*
1051 * Compare the DR7s first.
1052 *
1053 * We only care about the enabled flags. The GE and LE flags are always
1054 * set and we don't care if the guest doesn't set them. GD is virtualized
1055 * when we dispatch #DB, we never enable it.
1056 */
1057 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1058#ifdef CPUM_VIRTUALIZE_DRX
1059 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVM);
1060#else
1061 const RTGCUINTREG uGstDr7 = 0;
1062#endif
1063 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1064 {
1065 /*
1066 * Ok, something is enabled. Recalc each of the breakpoints.
1067 * Straight forward code, not optimized/minimized in any way.
1068 */
1069 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1070
1071 /* bp 0 */
1072 RTGCUINTREG uNewDr0;
1073 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1074 {
1075 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1076 uNewDr0 = DBGFBpGetDR0(pVM);
1077 }
1078 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1079 {
1080 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1081 uNewDr0 = CPUMGetGuestDR0(pVM);
1082 }
1083 else
1084 uNewDr0 = pVM->cpum.s.Hyper.dr0;
1085
1086 /* bp 1 */
1087 RTGCUINTREG uNewDr1;
1088 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1089 {
1090 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1091 uNewDr1 = DBGFBpGetDR1(pVM);
1092 }
1093 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1094 {
1095 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1096 uNewDr1 = CPUMGetGuestDR1(pVM);
1097 }
1098 else
1099 uNewDr1 = pVM->cpum.s.Hyper.dr1;
1100
1101 /* bp 2 */
1102 RTGCUINTREG uNewDr2;
1103 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1104 {
1105 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1106 uNewDr2 = DBGFBpGetDR2(pVM);
1107 }
1108 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1109 {
1110 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1111 uNewDr2 = CPUMGetGuestDR2(pVM);
1112 }
1113 else
1114 uNewDr2 = pVM->cpum.s.Hyper.dr2;
1115
1116 /* bp 3 */
1117 RTGCUINTREG uNewDr3;
1118 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1119 {
1120 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1121 uNewDr3 = DBGFBpGetDR3(pVM);
1122 }
1123 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1124 {
1125 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1126 uNewDr3 = CPUMGetGuestDR3(pVM);
1127 }
1128 else
1129 uNewDr3 = pVM->cpum.s.Hyper.dr3;
1130
1131 /*
1132 * Apply the updates.
1133 */
1134#ifdef IN_GC
1135 if (!(pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1136 {
1137 /** @todo save host DBx registers. */
1138 }
1139#endif
1140 pVM->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1141 if (uNewDr3 != pVM->cpum.s.Hyper.dr3)
1142 CPUMSetHyperDR3(pVM, uNewDr3);
1143 if (uNewDr2 != pVM->cpum.s.Hyper.dr2)
1144 CPUMSetHyperDR2(pVM, uNewDr2);
1145 if (uNewDr1 != pVM->cpum.s.Hyper.dr1)
1146 CPUMSetHyperDR1(pVM, uNewDr1);
1147 if (uNewDr0 != pVM->cpum.s.Hyper.dr0)
1148 CPUMSetHyperDR0(pVM, uNewDr0);
1149 if (uNewDr7 != pVM->cpum.s.Hyper.dr7)
1150 CPUMSetHyperDR7(pVM, uNewDr7);
1151 }
1152 else
1153 {
1154#ifdef IN_GC
1155 if (pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1156 {
1157 /** @todo restore host DBx registers. */
1158 }
1159#endif
1160 pVM->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1161 }
1162 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1163 pVM->cpum.s.fUseFlags, pVM->cpum.s.Hyper.dr0, pVM->cpum.s.Hyper.dr1,
1164 pVM->cpum.s.Hyper.dr2, pVM->cpum.s.Hyper.dr3, pVM->cpum.s.Hyper.dr6,
1165 pVM->cpum.s.Hyper.dr7));
1166
1167 return VINF_SUCCESS;
1168}
1169
1170#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1171
1172/**
1173 * Transforms the guest CPU state to raw-ring mode.
1174 *
1175 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1176 *
1177 * @returns VBox status. (recompiler failure)
1178 * @param pVM VM handle.
1179 * @param pCtxCore The context core (for trap usage).
1180 * @see @ref pg_raw
1181 */
1182CPUMDECL(int) CPUMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
1183{
1184 Assert(!pVM->cpum.s.fRawEntered);
1185 if (!pCtxCore)
1186 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Guest);
1187
1188 /*
1189 * Are we in Ring-0?
1190 */
1191 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1192 && !pCtxCore->eflags.Bits.u1VM)
1193 {
1194 /*
1195 * Enter execution mode.
1196 */
1197 PATMRawEnter(pVM, pCtxCore);
1198
1199 /*
1200 * Set CPL to Ring-1.
1201 */
1202 pCtxCore->ss |= 1;
1203 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1204 pCtxCore->cs |= 1;
1205 }
1206 else
1207 {
1208 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 && !pCtxCore->eflags.Bits.u1VM,
1209 ("ring-1 code not supported\n"));
1210 /*
1211 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1212 */
1213 PATMRawEnter(pVM, pCtxCore);
1214 }
1215
1216 /*
1217 * Assert sanity.
1218 */
1219 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1220 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1221 || pCtxCore->eflags.Bits.u1VM,
1222 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1223 Assert((pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1224 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1225
1226 pVM->cpum.s.fRawEntered = true;
1227 return VINF_SUCCESS;
1228}
1229
1230
1231/**
1232 * Transforms the guest CPU state from raw-ring mode to correct values.
1233 *
1234 * This function will change any selector registers with DPL=1 to DPL=0.
1235 *
1236 * @returns Adjusted rc.
1237 * @param pVM VM handle.
1238 * @param rc Raw mode return code
1239 * @param pCtxCore The context core (for trap usage).
1240 * @see @ref pg_raw
1241 */
1242CPUMDECL(int) CPUMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rc)
1243{
1244 /*
1245 * Don't leave if we've already left (in GC).
1246 */
1247 Assert(pVM->cpum.s.fRawEntered);
1248 if (!pVM->cpum.s.fRawEntered)
1249 return rc;
1250 pVM->cpum.s.fRawEntered = false;
1251
1252 PCPUMCTX pCtx = &pVM->cpum.s.Guest;
1253 if (!pCtxCore)
1254 pCtxCore = CPUMCTX2CORE(pCtx);
1255 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1256 AssertMsg(pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1257 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1258
1259 /*
1260 * Are we executing in raw ring-1?
1261 */
1262 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1263 && !pCtxCore->eflags.Bits.u1VM)
1264 {
1265 /*
1266 * Leave execution mode.
1267 */
1268 PATMRawLeave(pVM, pCtxCore, rc);
1269 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1270 /** @todo See what happens if we remove this. */
1271 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1272 pCtxCore->ds &= ~X86_SEL_RPL;
1273 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1274 pCtxCore->es &= ~X86_SEL_RPL;
1275 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1276 pCtxCore->fs &= ~X86_SEL_RPL;
1277 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1278 pCtxCore->gs &= ~X86_SEL_RPL;
1279
1280 /*
1281 * Ring-1 selector => Ring-0.
1282 */
1283 pCtxCore->ss &= ~X86_SEL_RPL;
1284 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1285 pCtxCore->cs &= ~X86_SEL_RPL;
1286 }
1287 else
1288 {
1289 /*
1290 * PATM is taking care of the IOPL and IF flags for us.
1291 */
1292 PATMRawLeave(pVM, pCtxCore, rc);
1293 /** @todo See what happens if we remove this. */
1294 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1295 pCtxCore->ds &= ~X86_SEL_RPL;
1296 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1297 pCtxCore->es &= ~X86_SEL_RPL;
1298 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1299 pCtxCore->fs &= ~X86_SEL_RPL;
1300 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1301 pCtxCore->gs &= ~X86_SEL_RPL;
1302 }
1303
1304 return rc;
1305}
1306
1307/**
1308 * Updates the EFLAGS while we're in raw-mode.
1309 *
1310 * @param pVM The VM handle.
1311 * @param pCtxCore The context core.
1312 * @param eflags The new EFLAGS value.
1313 */
1314CPUMDECL(void) CPUMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1315{
1316 if (!pVM->cpum.s.fRawEntered)
1317 {
1318 pCtxCore->eflags.u32 = eflags;
1319 return;
1320 }
1321 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1322}
1323
1324#endif /* !IN_RING0 */
1325
1326/**
1327 * Gets the EFLAGS while we're in raw-mode.
1328 *
1329 * @returns The eflags.
1330 * @param pVM The VM handle.
1331 * @param pCtxCore The context core.
1332 */
1333CPUMDECL(uint32_t) CPUMRawGetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore)
1334{
1335#ifdef IN_RING0
1336 return pCtxCore->eflags.u32;
1337#else
1338 if (!pVM->cpum.s.fRawEntered)
1339 return pCtxCore->eflags.u32;
1340 return PATMRawGetEFlags(pVM, pCtxCore);
1341#endif
1342}
1343
1344
1345
1346
1347/**
1348 * Gets and resets the changed flags (CPUM_CHANGED_*).
1349 * Only REM should call this function.
1350 *
1351 * @returns The changed flags.
1352 * @param pVM The VM handle.
1353 */
1354CPUMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVM pVM)
1355{
1356 unsigned fFlags = pVM->cpum.s.fChanged;
1357 pVM->cpum.s.fChanged = 0;
1358 /** @todo change the switcher to use the fChanged flags. */
1359 if (pVM->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
1360 {
1361 fFlags |= CPUM_CHANGED_FPU_REM;
1362 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
1363 }
1364 return fFlags;
1365}
1366
1367/**
1368 * Sets the specified changed flags (CPUM_CHANGED_*).
1369 *
1370 * @param pVM The VM handle.
1371 */
1372CPUMDECL(void) CPUMSetChangedFlags(PVM pVM, uint32_t fChangedFlags)
1373{
1374 pVM->cpum.s.fChanged |= fChangedFlags;
1375}
1376
1377/**
1378 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
1379 * @returns true if supported.
1380 * @returns false if not supported.
1381 * @param pVM The VM handle.
1382 */
1383CPUMDECL(bool) CPUMSupportsFXSR(PVM pVM)
1384{
1385 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
1386}
1387
1388
1389/**
1390 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1391 * @returns true if used.
1392 * @returns false if not used.
1393 * @param pVM The VM handle.
1394 */
1395CPUMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1396{
1397 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSENTER) != 0;
1398}
1399
1400
1401/**
1402 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1403 * @returns true if used.
1404 * @returns false if not used.
1405 * @param pVM The VM handle.
1406 */
1407CPUMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1408{
1409 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSCALL) != 0;
1410}
1411
1412/**
1413 * Lazily sync in the FPU/XMM state
1414 *
1415 * @returns VBox status code.
1416 * @param pVM VM handle.
1417 */
1418CPUMDECL(int) CPUMHandleLazyFPU(PVM pVM)
1419{
1420 return CPUMHandleLazyFPUAsm(&pVM->cpum.s);
1421}
1422
1423/**
1424 * Restore host FPU/XMM state
1425 *
1426 * @returns VBox status code.
1427 * @param pVM VM handle.
1428 */
1429CPUMDECL(int) CPUMRestoreHostFPUState(PVM pVM)
1430{
1431 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
1432 return CPUMRestoreHostFPUStateAsm(&pVM->cpum.s);
1433}
1434
1435/**
1436 * Checks if we activated the FPU/XMM state of the guest OS
1437 * @returns true if we did.
1438 * @returns false if not.
1439 * @param pVM The VM handle.
1440 */
1441CPUMDECL(bool) CPUMIsGuestFPUStateActive(PVM pVM)
1442{
1443 return (pVM->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
1444}
1445
1446/**
1447 * Deactivate the FPU/XMM state of the guest OS
1448 * @param pVM The VM handle.
1449 */
1450CPUMDECL(void) CPUMDeactivateGuestFPUState(PVM pVM)
1451{
1452 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
1453}
1454
1455/**
1456 * Checks if the hidden selector registers are valid
1457 * @returns true if they are.
1458 * @returns false if not.
1459 * @param pVM The VM handle.
1460 */
1461CPUMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
1462{
1463 return !!pVM->cpum.s.fValidHiddenSelRegs; /** @todo change fValidHiddenSelRegs to bool! */
1464}
1465
1466/**
1467 * Checks if the hidden selector registers are valid
1468 * @param pVM The VM handle.
1469 * @param fValid Valid or not
1470 */
1471CPUMDECL(void) CPUMSetHiddenSelRegsValid(PVM pVM, bool fValid)
1472{
1473 pVM->cpum.s.fValidHiddenSelRegs = fValid;
1474}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette