VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 400

Last change on this file since 400 was 338, checked in by vboxsync, 18 years ago

CPUMSetGuestDR*() takes a RTGCUINTREG not a RTUINTREG.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 35.9 KB
Line 
1/* $Id: CPUMAllRegs.cpp 338 2007-01-25 23:20:25Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Gets and Sets.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37
38
39
40/** Disable stack frame pointer generation here. */
41#if defined(_MSC_VER) && !defined(DEBUG)
42# pragma optimize("y", off)
43#endif
44
45
46/**
47 * Sets or resets an alternative hypervisor context core.
48 *
49 * This is called when we get a hypervisor trap set switch the context
50 * core with the trap frame on the stack. It is called again to reset
51 * back to the default context core when resuming hypervisor execution.
52 *
53 * @param pVM The VM handle.
54 * @param pCtxCore Pointer to the alternative context core or NULL
55 * to go back to the default context core.
56 */
57CPUMDECL(void) CPUMHyperSetCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
58{
59 LogFlow(("CPUMHyperSetCtxCore: %p -> %p\n", pVM->cpum.s.CTXSUFF(pHyperCore), pCtxCore));
60 if (!pCtxCore)
61 {
62 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
63#ifdef IN_GC
64 pVM->cpum.s.pHyperCoreHC = VM_HOST_ADDR(pVM, pCtxCore);
65#else
66 pVM->cpum.s.pHyperCoreGC = VM_GUEST_ADDR(pVM, pCtxCore);
67#endif
68 }
69 else
70 {
71#ifdef IN_GC
72 pVM->cpum.s.pHyperCoreHC = MMHyperGC2HC(pVM, pCtxCore);
73#else
74 pVM->cpum.s.pHyperCoreGC = MMHyperHC2GC(pVM, pCtxCore);
75#endif
76 }
77 pVM->cpum.s.CTXSUFF(pHyperCore) = pCtxCore;
78}
79
80
81/**
82 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
83 * This is only for reading in order to save a few calls.
84 *
85 * @param pVM Handle to the virtual machine.
86 */
87CPUMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVM pVM)
88{
89 return pVM->cpum.s.CTXSUFF(pHyperCore);
90}
91
92
93/**
94 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
95 *
96 * @returns VBox status code.
97 * @param pVM Handle to the virtual machine.
98 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
99 *
100 * @deprecated This will *not* (and has never) given the right picture of the
101 * hypervisor register state. With CPUMHyperSetCtxCore() this is
102 * getting much worse. So, use the individual functions for getting
103 * and esp. setting the hypervisor registers.
104 */
105CPUMDECL(int) CPUMQueryHyperCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
106{
107 *ppCtx = &pVM->cpum.s.Hyper;
108 return VINF_SUCCESS;
109}
110
111CPUMDECL(void) CPUMSetHyperGDTR(PVM pVM, uint32_t addr, uint16_t limit)
112{
113 pVM->cpum.s.Hyper.gdtr.cbGdt = limit;
114 pVM->cpum.s.Hyper.gdtr.pGdt = addr;
115 pVM->cpum.s.Hyper.gdtrPadding = 0;
116 pVM->cpum.s.Hyper.gdtrPadding64 = 0;
117}
118
119CPUMDECL(void) CPUMSetHyperIDTR(PVM pVM, uint32_t addr, uint16_t limit)
120{
121 pVM->cpum.s.Hyper.idtr.cbIdt = limit;
122 pVM->cpum.s.Hyper.idtr.pIdt = addr;
123 pVM->cpum.s.Hyper.idtrPadding = 0;
124 pVM->cpum.s.Hyper.idtrPadding64 = 0;
125}
126
127CPUMDECL(void) CPUMSetHyperCR3(PVM pVM, uint32_t cr3)
128{
129 pVM->cpum.s.Hyper.cr3 = cr3;
130}
131
132CPUMDECL(void) CPUMSetHyperCS(PVM pVM, RTSEL SelCS)
133{
134 pVM->cpum.s.CTXSUFF(pHyperCore)->cs = SelCS;
135}
136
137CPUMDECL(void) CPUMSetHyperDS(PVM pVM, RTSEL SelDS)
138{
139 pVM->cpum.s.CTXSUFF(pHyperCore)->ds = SelDS;
140}
141
142CPUMDECL(void) CPUMSetHyperES(PVM pVM, RTSEL SelES)
143{
144 pVM->cpum.s.CTXSUFF(pHyperCore)->es = SelES;
145}
146
147CPUMDECL(void) CPUMSetHyperFS(PVM pVM, RTSEL SelFS)
148{
149 pVM->cpum.s.CTXSUFF(pHyperCore)->fs = SelFS;
150}
151
152CPUMDECL(void) CPUMSetHyperGS(PVM pVM, RTSEL SelGS)
153{
154 pVM->cpum.s.CTXSUFF(pHyperCore)->gs = SelGS;
155}
156
157CPUMDECL(void) CPUMSetHyperSS(PVM pVM, RTSEL SelSS)
158{
159 pVM->cpum.s.CTXSUFF(pHyperCore)->ss = SelSS;
160}
161
162CPUMDECL(void) CPUMSetHyperESP(PVM pVM, uint32_t u32ESP)
163{
164 pVM->cpum.s.CTXSUFF(pHyperCore)->esp = u32ESP;
165}
166
167CPUMDECL(int) CPUMSetHyperEFlags(PVM pVM, uint32_t Efl)
168{
169 pVM->cpum.s.CTXSUFF(pHyperCore)->eflags.u32 = Efl;
170 return VINF_SUCCESS;
171}
172
173CPUMDECL(void) CPUMSetHyperEIP(PVM pVM, uint32_t u32EIP)
174{
175 pVM->cpum.s.CTXSUFF(pHyperCore)->eip = u32EIP;
176}
177
178CPUMDECL(void) CPUMSetHyperTR(PVM pVM, RTSEL SelTR)
179{
180 pVM->cpum.s.Hyper.tr = SelTR;
181}
182
183CPUMDECL(void) CPUMSetHyperLDTR(PVM pVM, RTSEL SelLDTR)
184{
185 pVM->cpum.s.Hyper.ldtr = SelLDTR;
186}
187
188CPUMDECL(void) CPUMSetHyperDR0(PVM pVM, RTGCUINTREG uDr0)
189{
190 pVM->cpum.s.Hyper.dr0 = uDr0;
191 /** @todo in GC we must load it! */
192}
193
194CPUMDECL(void) CPUMSetHyperDR1(PVM pVM, RTGCUINTREG uDr1)
195{
196 pVM->cpum.s.Hyper.dr1 = uDr1;
197 /** @todo in GC we must load it! */
198}
199
200CPUMDECL(void) CPUMSetHyperDR2(PVM pVM, RTGCUINTREG uDr2)
201{
202 pVM->cpum.s.Hyper.dr2 = uDr2;
203 /** @todo in GC we must load it! */
204}
205
206CPUMDECL(void) CPUMSetHyperDR3(PVM pVM, RTGCUINTREG uDr3)
207{
208 pVM->cpum.s.Hyper.dr3 = uDr3;
209 /** @todo in GC we must load it! */
210}
211
212CPUMDECL(void) CPUMSetHyperDR6(PVM pVM, RTGCUINTREG uDr6)
213{
214 pVM->cpum.s.Hyper.dr6 = uDr6;
215 /** @todo in GC we must load it! */
216}
217
218CPUMDECL(void) CPUMSetHyperDR7(PVM pVM, RTGCUINTREG uDr7)
219{
220 pVM->cpum.s.Hyper.dr7 = uDr7;
221 /** @todo in GC we must load it! */
222}
223
224
225CPUMDECL(RTSEL) CPUMGetHyperCS(PVM pVM)
226{
227 return pVM->cpum.s.CTXSUFF(pHyperCore)->cs;
228}
229
230CPUMDECL(RTSEL) CPUMGetHyperDS(PVM pVM)
231{
232 return pVM->cpum.s.CTXSUFF(pHyperCore)->ds;
233}
234
235CPUMDECL(RTSEL) CPUMGetHyperES(PVM pVM)
236{
237 return pVM->cpum.s.CTXSUFF(pHyperCore)->es;
238}
239
240CPUMDECL(RTSEL) CPUMGetHyperFS(PVM pVM)
241{
242 return pVM->cpum.s.CTXSUFF(pHyperCore)->fs;
243}
244
245CPUMDECL(RTSEL) CPUMGetHyperGS(PVM pVM)
246{
247 return pVM->cpum.s.CTXSUFF(pHyperCore)->gs;
248}
249
250CPUMDECL(RTSEL) CPUMGetHyperSS(PVM pVM)
251{
252 return pVM->cpum.s.CTXSUFF(pHyperCore)->ss;
253}
254
255#if 0 /* these are not correct. */
256
257CPUMDECL(uint32_t) CPUMGetHyperCR0(PVM pVM)
258{
259 return pVM->cpum.s.Hyper.cr0;
260}
261
262CPUMDECL(uint32_t) CPUMGetHyperCR2(PVM pVM)
263{
264 return pVM->cpum.s.Hyper.cr2;
265}
266
267CPUMDECL(uint32_t) CPUMGetHyperCR3(PVM pVM)
268{
269 return pVM->cpum.s.Hyper.cr3;
270}
271
272CPUMDECL(uint32_t) CPUMGetHyperCR4(PVM pVM)
273{
274 return pVM->cpum.s.Hyper.cr4;
275}
276
277#endif /* not correct */
278
279CPUMDECL(uint32_t) CPUMGetHyperEAX(PVM pVM)
280{
281 return pVM->cpum.s.CTXSUFF(pHyperCore)->eax;
282}
283
284CPUMDECL(uint32_t) CPUMGetHyperEBX(PVM pVM)
285{
286 return pVM->cpum.s.CTXSUFF(pHyperCore)->ebx;
287}
288
289CPUMDECL(uint32_t) CPUMGetHyperECX(PVM pVM)
290{
291 return pVM->cpum.s.CTXSUFF(pHyperCore)->ecx;
292}
293
294CPUMDECL(uint32_t) CPUMGetHyperEDX(PVM pVM)
295{
296 return pVM->cpum.s.CTXSUFF(pHyperCore)->edx;
297}
298
299CPUMDECL(uint32_t) CPUMGetHyperESI(PVM pVM)
300{
301 return pVM->cpum.s.CTXSUFF(pHyperCore)->esi;
302}
303
304CPUMDECL(uint32_t) CPUMGetHyperEDI(PVM pVM)
305{
306 return pVM->cpum.s.CTXSUFF(pHyperCore)->edi;
307}
308
309CPUMDECL(uint32_t) CPUMGetHyperEBP(PVM pVM)
310{
311 return pVM->cpum.s.CTXSUFF(pHyperCore)->ebp;
312}
313
314CPUMDECL(uint32_t) CPUMGetHyperESP(PVM pVM)
315{
316 return pVM->cpum.s.CTXSUFF(pHyperCore)->esp;
317}
318
319CPUMDECL(uint32_t) CPUMGetHyperEFlags(PVM pVM)
320{
321 return pVM->cpum.s.CTXSUFF(pHyperCore)->eflags.u32;
322}
323
324CPUMDECL(uint32_t) CPUMGetHyperEIP(PVM pVM)
325{
326 return pVM->cpum.s.CTXSUFF(pHyperCore)->eip;
327}
328
329CPUMDECL(uint32_t) CPUMGetHyperIDTR(PVM pVM, uint16_t *pcbLimit)
330{
331 if (pcbLimit)
332 *pcbLimit = pVM->cpum.s.Hyper.idtr.cbIdt;
333 return pVM->cpum.s.Hyper.idtr.pIdt;
334}
335
336CPUMDECL(uint32_t) CPUMGetHyperGDTR(PVM pVM, uint16_t *pcbLimit)
337{
338 if (pcbLimit)
339 *pcbLimit = pVM->cpum.s.Hyper.gdtr.cbGdt;
340 return pVM->cpum.s.Hyper.gdtr.pGdt;
341}
342
343CPUMDECL(RTSEL) CPUMGetHyperLDTR(PVM pVM)
344{
345 return pVM->cpum.s.Hyper.ldtr;
346}
347
348CPUMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVM pVM)
349{
350 return pVM->cpum.s.Hyper.dr0;
351}
352
353CPUMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVM pVM)
354{
355 return pVM->cpum.s.Hyper.dr1;
356}
357
358CPUMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVM pVM)
359{
360 return pVM->cpum.s.Hyper.dr2;
361}
362
363CPUMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVM pVM)
364{
365 return pVM->cpum.s.Hyper.dr3;
366}
367
368CPUMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVM pVM)
369{
370 return pVM->cpum.s.Hyper.dr6;
371}
372
373CPUMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVM pVM)
374{
375 return pVM->cpum.s.Hyper.dr7;
376}
377
378
379/**
380 * Gets the pointer to the internal CPUMCTXCORE structure.
381 * This is only for reading in order to save a few calls.
382 *
383 * @param pVM Handle to the virtual machine.
384 */
385CPUMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVM pVM)
386{
387 return CPUMCTX2CORE(&pVM->cpum.s.Guest);
388}
389
390
391/**
392 * Sets the guest context core registers.
393 *
394 * @param pVM Handle to the virtual machine.
395 * @param pCtxCore The new context core values.
396 */
397CPUMDECL(void) CPUMSetGuestCtxCore(PVM pVM, PCCPUMCTXCORE pCtxCore)
398{
399 /** @todo #1410 requires selectors to be checked. */
400
401 PCPUMCTXCORE pCtxCoreDst CPUMCTX2CORE(&pVM->cpum.s.Guest);
402 *pCtxCoreDst = *pCtxCore;
403}
404
405
406/**
407 * Queries the pointer to the internal CPUMCTX structure
408 *
409 * @returns VBox status code.
410 * @param pVM Handle to the virtual machine.
411 * @param ppCtx Receives the CPUMCTX pointer when successful.
412 */
413CPUMDECL(int) CPUMQueryGuestCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
414{
415 *ppCtx = &pVM->cpum.s.Guest;
416 return VINF_SUCCESS;
417}
418
419
420CPUMDECL(int) CPUMSetGuestGDTR(PVM pVM, uint32_t addr, uint16_t limit)
421{
422 pVM->cpum.s.Guest.gdtr.cbGdt = limit;
423 pVM->cpum.s.Guest.gdtr.pGdt = addr;
424 pVM->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
425 return VINF_SUCCESS;
426}
427
428CPUMDECL(int) CPUMSetGuestIDTR(PVM pVM, uint32_t addr, uint16_t limit)
429{
430 pVM->cpum.s.Guest.idtr.cbIdt = limit;
431 pVM->cpum.s.Guest.idtr.pIdt = addr;
432 pVM->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
433 return VINF_SUCCESS;
434}
435
436CPUMDECL(int) CPUMSetGuestTR(PVM pVM, uint16_t tr)
437{
438 pVM->cpum.s.Guest.tr = tr;
439 pVM->cpum.s.fChanged |= CPUM_CHANGED_TR;
440 return VINF_SUCCESS;
441}
442
443CPUMDECL(int) CPUMSetGuestLDTR(PVM pVM, uint16_t ldtr)
444{
445 pVM->cpum.s.Guest.ldtr = ldtr;
446 pVM->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
447 return VINF_SUCCESS;
448}
449
450
451CPUMDECL(int) CPUMSetGuestCR0(PVM pVM, uint32_t cr0)
452{
453 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
454 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
455 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
456 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR0;
457 pVM->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
458 return VINF_SUCCESS;
459}
460
461CPUMDECL(int) CPUMSetGuestCR2(PVM pVM, uint32_t cr2)
462{
463 pVM->cpum.s.Guest.cr2 = cr2;
464 return VINF_SUCCESS;
465}
466
467CPUMDECL(int) CPUMSetGuestCR3(PVM pVM, uint32_t cr3)
468{
469 pVM->cpum.s.Guest.cr3 = cr3;
470 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR3;
471 return VINF_SUCCESS;
472}
473
474CPUMDECL(int) CPUMSetGuestCR4(PVM pVM, uint32_t cr4)
475{
476 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
477 != (pVM->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
478 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
479 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR4;
480 if (!CPUMSupportsFXSR(pVM))
481 cr4 &= ~X86_CR4_OSFSXR;
482 pVM->cpum.s.Guest.cr4 = cr4;
483 return VINF_SUCCESS;
484}
485
486CPUMDECL(int) CPUMSetGuestEFlags(PVM pVM, uint32_t eflags)
487{
488 pVM->cpum.s.Guest.eflags.u32 = eflags;
489 return VINF_SUCCESS;
490}
491
492CPUMDECL(int) CPUMSetGuestEIP(PVM pVM, uint32_t eip)
493{
494 pVM->cpum.s.Guest.eip = eip;
495 return VINF_SUCCESS;
496}
497
498CPUMDECL(int) CPUMSetGuestEAX(PVM pVM, uint32_t eax)
499{
500 pVM->cpum.s.Guest.eax = eax;
501 return VINF_SUCCESS;
502}
503
504CPUMDECL(int) CPUMSetGuestEBX(PVM pVM, uint32_t ebx)
505{
506 pVM->cpum.s.Guest.ebx = ebx;
507 return VINF_SUCCESS;
508}
509
510CPUMDECL(int) CPUMSetGuestECX(PVM pVM, uint32_t ecx)
511{
512 pVM->cpum.s.Guest.ecx = ecx;
513 return VINF_SUCCESS;
514}
515
516CPUMDECL(int) CPUMSetGuestEDX(PVM pVM, uint32_t edx)
517{
518 pVM->cpum.s.Guest.edx = edx;
519 return VINF_SUCCESS;
520}
521
522CPUMDECL(int) CPUMSetGuestESP(PVM pVM, uint32_t esp)
523{
524 pVM->cpum.s.Guest.esp = esp;
525 return VINF_SUCCESS;
526}
527
528CPUMDECL(int) CPUMSetGuestEBP(PVM pVM, uint32_t ebp)
529{
530 pVM->cpum.s.Guest.ebp = ebp;
531 return VINF_SUCCESS;
532}
533
534CPUMDECL(int) CPUMSetGuestESI(PVM pVM, uint32_t esi)
535{
536 pVM->cpum.s.Guest.esi = esi;
537 return VINF_SUCCESS;
538}
539
540CPUMDECL(int) CPUMSetGuestEDI(PVM pVM, uint32_t edi)
541{
542 pVM->cpum.s.Guest.edi = edi;
543 return VINF_SUCCESS;
544}
545
546CPUMDECL(int) CPUMSetGuestSS(PVM pVM, uint16_t ss)
547{
548 pVM->cpum.s.Guest.ss = ss;
549 return VINF_SUCCESS;
550}
551
552CPUMDECL(int) CPUMSetGuestCS(PVM pVM, uint16_t cs)
553{
554 pVM->cpum.s.Guest.cs = cs;
555 return VINF_SUCCESS;
556}
557
558CPUMDECL(int) CPUMSetGuestDS(PVM pVM, uint16_t ds)
559{
560 pVM->cpum.s.Guest.ds = ds;
561 return VINF_SUCCESS;
562}
563
564CPUMDECL(int) CPUMSetGuestES(PVM pVM, uint16_t es)
565{
566 pVM->cpum.s.Guest.es = es;
567 return VINF_SUCCESS;
568}
569
570CPUMDECL(int) CPUMSetGuestFS(PVM pVM, uint16_t fs)
571{
572 pVM->cpum.s.Guest.fs = fs;
573 return VINF_SUCCESS;
574}
575
576CPUMDECL(int) CPUMSetGuestGS(PVM pVM, uint16_t gs)
577{
578 pVM->cpum.s.Guest.gs = gs;
579 return VINF_SUCCESS;
580}
581
582
583CPUMDECL(uint32_t) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit)
584{
585 if (pcbLimit)
586 *pcbLimit = pVM->cpum.s.Guest.idtr.cbIdt;
587 return pVM->cpum.s.Guest.idtr.pIdt;
588}
589
590CPUMDECL(RTSEL) CPUMGetGuestTR(PVM pVM)
591{
592 return pVM->cpum.s.Guest.tr;
593}
594
595CPUMDECL(RTSEL) CPUMGetGuestCS(PVM pVM)
596{
597 return pVM->cpum.s.Guest.cs;
598}
599
600CPUMDECL(RTSEL) CPUMGetGuestDS(PVM pVM)
601{
602 return pVM->cpum.s.Guest.ds;
603}
604
605CPUMDECL(RTSEL) CPUMGetGuestES(PVM pVM)
606{
607 return pVM->cpum.s.Guest.es;
608}
609
610CPUMDECL(RTSEL) CPUMGetGuestFS(PVM pVM)
611{
612 return pVM->cpum.s.Guest.fs;
613}
614
615CPUMDECL(RTSEL) CPUMGetGuestGS(PVM pVM)
616{
617 return pVM->cpum.s.Guest.gs;
618}
619
620CPUMDECL(RTSEL) CPUMGetGuestSS(PVM pVM)
621{
622 return pVM->cpum.s.Guest.ss;
623}
624
625CPUMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM)
626{
627 return pVM->cpum.s.Guest.ldtr;
628}
629
630
631CPUMDECL(uint32_t) CPUMGetGuestCR0(PVM pVM)
632{
633 return pVM->cpum.s.Guest.cr0;
634}
635
636CPUMDECL(uint32_t) CPUMGetGuestCR2(PVM pVM)
637{
638 return pVM->cpum.s.Guest.cr2;
639}
640
641CPUMDECL(uint32_t) CPUMGetGuestCR3(PVM pVM)
642{
643 return pVM->cpum.s.Guest.cr3;
644}
645
646CPUMDECL(uint32_t) CPUMGetGuestCR4(PVM pVM)
647{
648 return pVM->cpum.s.Guest.cr4;
649}
650
651CPUMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR)
652{
653 *pGDTR = pVM->cpum.s.Guest.gdtr;
654}
655
656CPUMDECL(uint32_t) CPUMGetGuestEIP(PVM pVM)
657{
658 return pVM->cpum.s.Guest.eip;
659}
660
661CPUMDECL(uint32_t) CPUMGetGuestEAX(PVM pVM)
662{
663 return pVM->cpum.s.Guest.eax;
664}
665
666CPUMDECL(uint32_t) CPUMGetGuestEBX(PVM pVM)
667{
668 return pVM->cpum.s.Guest.ebx;
669}
670
671CPUMDECL(uint32_t) CPUMGetGuestECX(PVM pVM)
672{
673 return pVM->cpum.s.Guest.ecx;
674}
675
676CPUMDECL(uint32_t) CPUMGetGuestEDX(PVM pVM)
677{
678 return pVM->cpum.s.Guest.edx;
679}
680
681CPUMDECL(uint32_t) CPUMGetGuestESI(PVM pVM)
682{
683 return pVM->cpum.s.Guest.esi;
684}
685
686CPUMDECL(uint32_t) CPUMGetGuestEDI(PVM pVM)
687{
688 return pVM->cpum.s.Guest.edi;
689}
690
691CPUMDECL(uint32_t) CPUMGetGuestESP(PVM pVM)
692{
693 return pVM->cpum.s.Guest.esp;
694}
695
696CPUMDECL(uint32_t) CPUMGetGuestEBP(PVM pVM)
697{
698 return pVM->cpum.s.Guest.ebp;
699}
700
701CPUMDECL(uint32_t) CPUMGetGuestEFlags(PVM pVM)
702{
703 return pVM->cpum.s.Guest.eflags.u32;
704}
705
706CPUMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM)
707{
708 return &pVM->cpum.s.Guest.trHid;
709}
710
711//@todo: crx should be an array
712CPUMDECL(int) CPUMGetGuestCRx(PVM pVM, uint32_t iReg, uint32_t *pValue)
713{
714 switch (iReg)
715 {
716 case USE_REG_CR0:
717 *pValue = pVM->cpum.s.Guest.cr0;
718 break;
719 case USE_REG_CR2:
720 *pValue = pVM->cpum.s.Guest.cr2;
721 break;
722 case USE_REG_CR3:
723 *pValue = pVM->cpum.s.Guest.cr3;
724 break;
725 case USE_REG_CR4:
726 *pValue = pVM->cpum.s.Guest.cr4;
727 break;
728 default:
729 return VERR_INVALID_PARAMETER;
730 }
731 return VINF_SUCCESS;
732}
733
734CPUMDECL(RTUINTREG) CPUMGetGuestDR0(PVM pVM)
735{
736 return pVM->cpum.s.Guest.dr0;
737}
738
739CPUMDECL(RTUINTREG) CPUMGetGuestDR1(PVM pVM)
740{
741 return pVM->cpum.s.Guest.dr1;
742}
743
744CPUMDECL(RTUINTREG) CPUMGetGuestDR2(PVM pVM)
745{
746 return pVM->cpum.s.Guest.dr2;
747}
748
749CPUMDECL(RTUINTREG) CPUMGetGuestDR3(PVM pVM)
750{
751 return pVM->cpum.s.Guest.dr3;
752}
753
754CPUMDECL(RTUINTREG) CPUMGetGuestDR6(PVM pVM)
755{
756 return pVM->cpum.s.Guest.dr6;
757}
758
759CPUMDECL(RTUINTREG) CPUMGetGuestDR7(PVM pVM)
760{
761 return pVM->cpum.s.Guest.dr7;
762}
763
764/** @todo drx should be an array */
765CPUMDECL(int) CPUMGetGuestDRx(PVM pVM, uint32_t iReg, uint32_t *pValue)
766{
767 switch (iReg)
768 {
769 case USE_REG_DR0:
770 *pValue = pVM->cpum.s.Guest.dr0;
771 break;
772 case USE_REG_DR1:
773 *pValue = pVM->cpum.s.Guest.dr1;
774 break;
775 case USE_REG_DR2:
776 *pValue = pVM->cpum.s.Guest.dr2;
777 break;
778 case USE_REG_DR3:
779 *pValue = pVM->cpum.s.Guest.dr3;
780 break;
781 case USE_REG_DR4:
782 case USE_REG_DR6:
783 *pValue = pVM->cpum.s.Guest.dr6;
784 break;
785 case USE_REG_DR5:
786 case USE_REG_DR7:
787 *pValue = pVM->cpum.s.Guest.dr7;
788 break;
789
790 default:
791 return VERR_INVALID_PARAMETER;
792 }
793 return VINF_SUCCESS;
794}
795
796/**
797 * Gets a CpuId leaf.
798 *
799 * @param pVM The VM handle.
800 * @param iLeaf The CPUID leaf to get.
801 * @param pEax Where to store the EAX value.
802 * @param pEbx Where to store the EBX value.
803 * @param pEcx Where to store the ECX value.
804 * @param pEdx Where to store the EDX value.
805 */
806CPUMDECL(void) CPUMGetGuestCpuId(PVM pVM, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
807{
808 PCCPUMCPUID pCpuId;
809 if (iLeaf < ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
810 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
811 else if (iLeaf - UINT32_C(0x80000000) < ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
812 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
813 else
814 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
815
816 *pEax = pCpuId->eax;
817 *pEbx = pCpuId->ebx;
818 *pEcx = pCpuId->ecx;
819 *pEdx = pCpuId->edx;
820 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
821}
822
823/**
824 * Gets a pointer to the array of standard CPUID leafs.
825 *
826 * CPUMGetGuestCpuIdStdMax() give the size of the array.
827 *
828 * @returns Pointer to the standard CPUID leafs (read-only).
829 * @param pVM The VM handle.
830 * @remark Intended for PATM.
831 */
832CPUMDECL(GCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdStdGCPtr(PVM pVM)
833{
834 return GCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
835}
836
837/**
838 * Gets a pointer to the array of extended CPUID leafs.
839 *
840 * CPUMGetGuestCpuIdExtMax() give the size of the array.
841 *
842 * @returns Pointer to the extended CPUID leafs (read-only).
843 * @param pVM The VM handle.
844 * @remark Intended for PATM.
845 */
846CPUMDECL(GCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtGCPtr(PVM pVM)
847{
848 return GCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
849}
850
851/**
852 * Gets a pointer to the default CPUID leaf.
853 *
854 * @returns Pointer to the default CPUID leaf (read-only).
855 * @param pVM The VM handle.
856 * @remark Intended for PATM.
857 */
858CPUMDECL(GCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefGCPtr(PVM pVM)
859{
860 return GCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
861}
862
863/**
864 * Gets a number of standard CPUID leafs.
865 *
866 * @returns Number of leafs.
867 * @param pVM The VM handle.
868 * @remark Intended for PATM.
869 */
870CPUMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
871{
872 return ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
873}
874
875/**
876 * Gets a number of extended CPUID leafs.
877 *
878 * @returns Number of leafs.
879 * @param pVM The VM handle.
880 * @remark Intended for PATM.
881 */
882CPUMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
883{
884 return ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
885}
886
887/**
888 * Sets a CPUID feature bit.
889 *
890 * @param pVM The VM Handle.
891 * @param enmFeature The feature to set.
892 */
893CPUMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
894{
895 switch (enmFeature)
896 {
897 /*
898 * Set the APIC bit in both feature masks.
899 */
900 case CPUMCPUIDFEATURE_APIC:
901 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
902 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
903 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
904 && pVM->cpum.s.aGuestCpuIdExt[1].edx)
905 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
906 Log(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
907 break;
908
909 default:
910 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
911 break;
912 }
913}
914
915/**
916 * Clears a CPUID feature bit.
917 *
918 * @param pVM The VM Handle.
919 * @param enmFeature The feature to clear.
920 */
921CPUMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
922{
923 switch (enmFeature)
924 {
925 /*
926 * Set the APIC bit in both feature masks.
927 */
928 case CPUMCPUIDFEATURE_APIC:
929 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
930 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
931 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
932 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
933 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
934 break;
935
936 default:
937 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
938 break;
939 }
940}
941
942
943
944CPUMDECL(int) CPUMSetGuestDR0(PVM pVM, RTGCUINTREG uDr0)
945{
946 pVM->cpum.s.Guest.dr0 = uDr0;
947 return CPUMRecalcHyperDRx(pVM);
948}
949
950CPUMDECL(int) CPUMSetGuestDR1(PVM pVM, RTGCUINTREG uDr1)
951{
952 pVM->cpum.s.Guest.dr1 = uDr1;
953 return CPUMRecalcHyperDRx(pVM);
954}
955
956CPUMDECL(int) CPUMSetGuestDR2(PVM pVM, RTGCUINTREG uDr2)
957{
958 pVM->cpum.s.Guest.dr2 = uDr2;
959 return CPUMRecalcHyperDRx(pVM);
960}
961
962CPUMDECL(int) CPUMSetGuestDR3(PVM pVM, RTGCUINTREG uDr3)
963{
964 pVM->cpum.s.Guest.dr3 = uDr3;
965 return CPUMRecalcHyperDRx(pVM);
966}
967
968CPUMDECL(int) CPUMSetGuestDR6(PVM pVM, RTGCUINTREG uDr6)
969{
970 pVM->cpum.s.Guest.dr6 = uDr6;
971 return CPUMRecalcHyperDRx(pVM);
972}
973
974CPUMDECL(int) CPUMSetGuestDR7(PVM pVM, RTGCUINTREG uDr7)
975{
976 pVM->cpum.s.Guest.dr7 = uDr7;
977 return CPUMRecalcHyperDRx(pVM);
978}
979
980/** @todo drx should be an array */
981CPUMDECL(int) CPUMSetGuestDRx(PVM pVM, uint32_t iReg, uint32_t Value)
982{
983 switch (iReg)
984 {
985 case USE_REG_DR0:
986 pVM->cpum.s.Guest.dr0 = Value;
987 break;
988 case USE_REG_DR1:
989 pVM->cpum.s.Guest.dr1 = Value;
990 break;
991 case USE_REG_DR2:
992 pVM->cpum.s.Guest.dr2 = Value;
993 break;
994 case USE_REG_DR3:
995 pVM->cpum.s.Guest.dr3 = Value;
996 break;
997 case USE_REG_DR4:
998 case USE_REG_DR6:
999 pVM->cpum.s.Guest.dr6 = Value;
1000 break;
1001 case USE_REG_DR5:
1002 case USE_REG_DR7:
1003 pVM->cpum.s.Guest.dr7 = Value;
1004 break;
1005
1006 default:
1007 return VERR_INVALID_PARAMETER;
1008 }
1009 return CPUMRecalcHyperDRx(pVM);
1010}
1011
1012
1013/**
1014 * Recalculates the hypvervisor DRx register values based on
1015 * current guest registers and DBGF breakpoints.
1016 *
1017 * This is called whenever a guest DRx register is modified and when DBGF
1018 * sets a hardware breakpoint. In guest context this function will reload
1019 * any (hyper) DRx registers which comes out with a different value.
1020 *
1021 * @returns VINF_SUCCESS.
1022 * @param pVM The VM handle.
1023 */
1024CPUMDECL(int) CPUMRecalcHyperDRx(PVM pVM)
1025{
1026 /*
1027 * Compare the DR7s first.
1028 *
1029 * We only care about the enabled flags. The GE and LE flags are always
1030 * set and we don't care if the guest doesn't set them. GD is virtualized
1031 * when we dispatch #DB, we never enable it.
1032 */
1033 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1034#ifdef CPUM_VIRTUALIZE_DRX
1035 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVM);
1036#else
1037 const RTGCUINTREG uGstDr7 = 0;
1038#endif
1039 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1040 {
1041 /*
1042 * Ok, something is enabled. Recalc each of the breakpoints.
1043 * Straight forward code, not optimized/minimized in any way.
1044 */
1045 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1046
1047 /* bp 0 */
1048 RTGCUINTREG uNewDr0;
1049 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1050 {
1051 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1052 uNewDr0 = DBGFBpGetDR0(pVM);
1053 }
1054 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1055 {
1056 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1057 uNewDr0 = CPUMGetGuestDR0(pVM);
1058 }
1059 else
1060 uNewDr0 = pVM->cpum.s.Hyper.dr0;
1061
1062 /* bp 1 */
1063 RTGCUINTREG uNewDr1;
1064 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1065 {
1066 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1067 uNewDr1 = DBGFBpGetDR1(pVM);
1068 }
1069 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1070 {
1071 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1072 uNewDr1 = CPUMGetGuestDR1(pVM);
1073 }
1074 else
1075 uNewDr1 = pVM->cpum.s.Hyper.dr1;
1076
1077 /* bp 2 */
1078 RTGCUINTREG uNewDr2;
1079 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1080 {
1081 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1082 uNewDr2 = DBGFBpGetDR2(pVM);
1083 }
1084 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1085 {
1086 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1087 uNewDr2 = CPUMGetGuestDR2(pVM);
1088 }
1089 else
1090 uNewDr2 = pVM->cpum.s.Hyper.dr2;
1091
1092 /* bp 3 */
1093 RTGCUINTREG uNewDr3;
1094 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1095 {
1096 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1097 uNewDr3 = DBGFBpGetDR3(pVM);
1098 }
1099 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1100 {
1101 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1102 uNewDr3 = CPUMGetGuestDR3(pVM);
1103 }
1104 else
1105 uNewDr3 = pVM->cpum.s.Hyper.dr3;
1106
1107 /*
1108 * Apply the updates.
1109 */
1110#ifdef IN_GC
1111 if (!(pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1112 {
1113 /** @todo save host DBx registers. */
1114 }
1115#endif
1116 pVM->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1117 if (uNewDr3 != pVM->cpum.s.Hyper.dr3)
1118 CPUMSetHyperDR3(pVM, uNewDr3);
1119 if (uNewDr2 != pVM->cpum.s.Hyper.dr2)
1120 CPUMSetHyperDR2(pVM, uNewDr2);
1121 if (uNewDr1 != pVM->cpum.s.Hyper.dr1)
1122 CPUMSetHyperDR1(pVM, uNewDr1);
1123 if (uNewDr0 != pVM->cpum.s.Hyper.dr0)
1124 CPUMSetHyperDR0(pVM, uNewDr0);
1125 if (uNewDr7 != pVM->cpum.s.Hyper.dr7)
1126 CPUMSetHyperDR7(pVM, uNewDr7);
1127 }
1128 else
1129 {
1130#ifdef IN_GC
1131 if (pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1132 {
1133 /** @todo restore host DBx registers. */
1134 }
1135#endif
1136 pVM->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1137 }
1138 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1139 pVM->cpum.s.fUseFlags, pVM->cpum.s.Hyper.dr0, pVM->cpum.s.Hyper.dr1,
1140 pVM->cpum.s.Hyper.dr2, pVM->cpum.s.Hyper.dr3, pVM->cpum.s.Hyper.dr6,
1141 pVM->cpum.s.Hyper.dr7));
1142
1143 return VINF_SUCCESS;
1144}
1145
1146#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1147
1148/**
1149 * Transforms the guest CPU state to raw-ring mode.
1150 *
1151 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1152 *
1153 * @returns VBox status. (recompiler failure)
1154 * @param pVM VM handle.
1155 * @param pCtxCore The context core (for trap usage).
1156 * @see @ref pg_raw
1157 */
1158CPUMDECL(int) CPUMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
1159{
1160 Assert(!pVM->cpum.s.fRawEntered);
1161 if (!pCtxCore)
1162 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Guest);
1163
1164 /*
1165 * Are we in Ring-0?
1166 */
1167 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1168 && !pCtxCore->eflags.Bits.u1VM)
1169 {
1170 /*
1171 * Enter execution mode.
1172 */
1173 PATMRawEnter(pVM, pCtxCore);
1174
1175 /*
1176 * Set CPL to Ring-1.
1177 */
1178 pCtxCore->ss |= 1;
1179 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1180 pCtxCore->cs |= 1;
1181 }
1182 else
1183 {
1184 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 && !pCtxCore->eflags.Bits.u1VM,
1185 ("ring-1 code not supported\n"));
1186 /*
1187 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1188 */
1189 PATMRawEnter(pVM, pCtxCore);
1190 }
1191
1192 /*
1193 * Assert sanity.
1194 */
1195 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1196 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1197 || pCtxCore->eflags.Bits.u1VM,
1198 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1199 Assert((pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1200 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1201
1202 pVM->cpum.s.fRawEntered = true;
1203 return VINF_SUCCESS;
1204}
1205
1206
1207/**
1208 * Transforms the guest CPU state from raw-ring mode to correct values.
1209 *
1210 * This function will change any selector registers with DPL=1 to DPL=0.
1211 *
1212 * @returns Adjusted rc.
1213 * @param pVM VM handle.
1214 * @param rc Raw mode return code
1215 * @param pCtxCore The context core (for trap usage).
1216 * @see @ref pg_raw
1217 */
1218CPUMDECL(int) CPUMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rc)
1219{
1220 /*
1221 * Don't leave if we've already left (in GC).
1222 */
1223 Assert(pVM->cpum.s.fRawEntered);
1224 if (!pVM->cpum.s.fRawEntered)
1225 return rc;
1226 pVM->cpum.s.fRawEntered = false;
1227
1228 PCPUMCTX pCtx = &pVM->cpum.s.Guest;
1229 if (!pCtxCore)
1230 pCtxCore = CPUMCTX2CORE(pCtx);
1231 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1232 AssertMsg(pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1233 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1234
1235 /*
1236 * Are we executing in raw ring-1?
1237 */
1238 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1239 && !pCtxCore->eflags.Bits.u1VM)
1240 {
1241 /*
1242 * Leave execution mode.
1243 */
1244 PATMRawLeave(pVM, pCtxCore, rc);
1245 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1246 /** @todo See what happens if we remove this. */
1247 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1248 pCtxCore->ds &= ~X86_SEL_RPL;
1249 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1250 pCtxCore->es &= ~X86_SEL_RPL;
1251 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1252 pCtxCore->fs &= ~X86_SEL_RPL;
1253 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1254 pCtxCore->gs &= ~X86_SEL_RPL;
1255
1256 /*
1257 * Ring-1 selector => Ring-0.
1258 */
1259 pCtxCore->ss &= ~X86_SEL_RPL;
1260 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1261 pCtxCore->cs &= ~X86_SEL_RPL;
1262 }
1263 else
1264 {
1265 /*
1266 * PATM is taking care of the IOPL and IF flags for us.
1267 */
1268 PATMRawLeave(pVM, pCtxCore, rc);
1269 /** @todo See what happens if we remove this. */
1270 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1271 pCtxCore->ds &= ~X86_SEL_RPL;
1272 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1273 pCtxCore->es &= ~X86_SEL_RPL;
1274 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1275 pCtxCore->fs &= ~X86_SEL_RPL;
1276 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1277 pCtxCore->gs &= ~X86_SEL_RPL;
1278 }
1279
1280 return rc;
1281}
1282
1283/**
1284 * Updates the EFLAGS while we're in raw-mode.
1285 *
1286 * @param pVM The VM handle.
1287 * @param pCtxCore The context core.
1288 * @param eflags The new EFLAGS value.
1289 */
1290CPUMDECL(void) CPUMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1291{
1292 if (!pVM->cpum.s.fRawEntered)
1293 {
1294 pCtxCore->eflags.u32 = eflags;
1295 return;
1296 }
1297 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1298}
1299
1300#endif /* !IN_RING0 */
1301
1302/**
1303 * Gets the EFLAGS while we're in raw-mode.
1304 *
1305 * @returns The eflags.
1306 * @param pVM The VM handle.
1307 * @param pCtxCore The context core.
1308 */
1309CPUMDECL(uint32_t) CPUMRawGetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore)
1310{
1311#ifdef IN_RING0
1312 return pCtxCore->eflags.u32;
1313#else
1314 if (!pVM->cpum.s.fRawEntered)
1315 return pCtxCore->eflags.u32;
1316 return PATMRawGetEFlags(pVM, pCtxCore);
1317#endif
1318}
1319
1320
1321
1322
1323/**
1324 * Gets and resets the changed flags (CPUM_CHANGED_*).
1325 * Only REM should call this function.
1326 *
1327 * @returns The changed flags.
1328 * @param pVM The VM handle.
1329 */
1330CPUMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVM pVM)
1331{
1332 unsigned fFlags = pVM->cpum.s.fChanged;
1333 pVM->cpum.s.fChanged = 0;
1334 /** @todo change the switcher to use the fChanged flags. */
1335 if (pVM->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
1336 {
1337 fFlags |= CPUM_CHANGED_FPU_REM;
1338 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
1339 }
1340 return fFlags;
1341}
1342
1343/**
1344 * Sets the specified changed flags (CPUM_CHANGED_*).
1345 *
1346 * @param pVM The VM handle.
1347 */
1348CPUMDECL(void) CPUMSetChangedFlags(PVM pVM, uint32_t fChangedFlags)
1349{
1350 pVM->cpum.s.fChanged |= fChangedFlags;
1351}
1352
1353/**
1354 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
1355 * @returns true if supported.
1356 * @returns false if not supported.
1357 * @param pVM The VM handle.
1358 */
1359CPUMDECL(bool) CPUMSupportsFXSR(PVM pVM)
1360{
1361 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
1362}
1363
1364
1365/**
1366 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1367 * @returns true if used.
1368 * @returns false if not used.
1369 * @param pVM The VM handle.
1370 */
1371CPUMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1372{
1373 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSENTER) != 0;
1374}
1375
1376
1377/**
1378 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1379 * @returns true if used.
1380 * @returns false if not used.
1381 * @param pVM The VM handle.
1382 */
1383CPUMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1384{
1385 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSCALL) != 0;
1386}
1387
1388/**
1389 * Lazily sync in the FPU/XMM state
1390 *
1391 * @returns VBox status code.
1392 * @param pVM VM handle.
1393 */
1394CPUMDECL(int) CPUMHandleLazyFPU(PVM pVM)
1395{
1396 return CPUMHandleLazyFPUAsm(&pVM->cpum.s);
1397}
1398
1399/**
1400 * Restore host FPU/XMM state
1401 *
1402 * @returns VBox status code.
1403 * @param pVM VM handle.
1404 */
1405CPUMDECL(int) CPUMRestoreHostFPUState(PVM pVM)
1406{
1407 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
1408 return CPUMRestoreHostFPUStateAsm(&pVM->cpum.s);
1409}
1410
1411/**
1412 * Checks if we activated the FPU/XMM state of the guest OS
1413 * @returns true if we did.
1414 * @returns false if not.
1415 * @param pVM The VM handle.
1416 */
1417CPUMDECL(bool) CPUMIsGuestFPUStateActive(PVM pVM)
1418{
1419 return (pVM->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
1420}
1421
1422/**
1423 * Deactivate the FPU/XMM state of the guest OS
1424 * @param pVM The VM handle.
1425 */
1426CPUMDECL(void) CPUMDeactivateGuestFPUState(PVM pVM)
1427{
1428 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
1429}
1430
1431/**
1432 * Checks if the hidden selector registers are valid
1433 * @returns true if they are.
1434 * @returns false if not.
1435 * @param pVM The VM handle.
1436 */
1437CPUMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
1438{
1439 return !!pVM->cpum.s.fValidHiddenSelRegs; /** @todo change fValidHiddenSelRegs to bool! */
1440}
1441
1442/**
1443 * Checks if the hidden selector registers are valid
1444 * @param pVM The VM handle.
1445 * @param fValid Valid or not
1446 */
1447CPUMDECL(void) CPUMSetHiddenSelRegsValid(PVM pVM, bool fValid)
1448{
1449 pVM->cpum.s.fValidHiddenSelRegs = fValid;
1450}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette