VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/SELMAll.cpp@ 42202

Last change on this file since 42202 was 42186, checked in by vboxsync, 13 years ago

SELM,DIS,CPUM,EM: Hidden selector register cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 33.3 KB
Line 
1/* $Id: SELMAll.cpp 42186 2012-07-17 13:32:15Z vboxsync $ */
2/** @file
3 * SELM All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_SELM
23#include <VBox/vmm/selm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/hwaccm.h>
28#include "SELMInternal.h"
29#include <VBox/vmm/vm.h>
30#include <VBox/err.h>
31#include <VBox/param.h>
32#include <iprt/assert.h>
33#include <VBox/log.h>
34#include <VBox/vmm/vmm.h>
35#include <iprt/x86.h>
36
37
38
39#ifdef VBOX_WITH_RAW_MODE_NOT_R0
40/**
41 * Converts a GC selector based address to a flat address.
42 *
43 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
44 * for that.
45 *
46 * @returns Flat address.
47 * @param pVM Pointer to the VM.
48 * @param Sel Selector part.
49 * @param Addr Address part.
50 * @remarks Don't use when in long mode.
51 */
52VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
53{
54 Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */
55
56 /** @todo check the limit. */
57 X86DESC Desc;
58 if (!(Sel & X86_SEL_LDT))
59 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
60 else
61 {
62 /** @todo handle LDT pages not present! */
63 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
64 Desc = paLDT[Sel >> X86_SEL_SHIFT];
65 }
66
67 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(Desc)) & 0xffffffff);
68}
69#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
70
71
72/**
73 * Converts a GC selector based address to a flat address.
74 *
75 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
76 * for that.
77 *
78 * @returns Flat address.
79 * @param pVM Pointer to the VM.
80 * @param SelReg Selector register
81 * @param pCtxCore CPU context
82 * @param Addr Address part.
83 */
84VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
85{
86 PCPUMSELREG pSReg;
87 PVMCPU pVCpu = VMMGetCpu(pVM);
88
89 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg); AssertRC(rc);
90
91 /*
92 * Deal with real & v86 mode first.
93 */
94 if ( pCtxCore->eflags.Bits.u1VM
95 || CPUMIsGuestInRealMode(pVCpu))
96 {
97 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
98 if (CPUMAreHiddenSelRegsValid(pVCpu))
99 uFlat += pSReg->u64Base;
100 else
101 uFlat += ((RTGCUINTPTR)pSReg->Sel << 4);
102 return (RTGCPTR)uFlat;
103 }
104
105#ifdef VBOX_WITH_RAW_MODE_NOT_R0
106 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
107 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg))
108 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
109 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(&pCtxCore->cs))
110 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
111#else
112 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg));
113 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(&pCtxCore->cs));
114#endif
115
116 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
117 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
118 if ( pCtxCore->cs.Attr.n.u1Long
119 && CPUMIsGuestInLongMode(pVCpu))
120 {
121 switch (SelReg)
122 {
123 case DISSELREG_FS:
124 case DISSELREG_GS:
125 return (RTGCPTR)(pSReg->u64Base + Addr);
126
127 default:
128 return Addr; /* base 0 */
129 }
130 }
131
132 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
133 Assert(pSReg->u64Base <= 0xffffffff);
134 return ((pSReg->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
135}
136
137
138/**
139 * Converts a GC selector based address to a flat address.
140 *
141 * Some basic checking is done, but not all kinds yet.
142 *
143 * @returns VBox status
144 * @param pVCpu Pointer to the VMCPU.
145 * @param SelReg Selector register.
146 * @param pCtxCore CPU context.
147 * @param Addr Address part.
148 * @param fFlags SELMTOFLAT_FLAGS_*
149 * GDT entires are valid.
150 * @param ppvGC Where to store the GC flat address.
151 */
152VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags, PRTGCPTR ppvGC)
153{
154 /*
155 * Fetch the selector first.
156 */
157 PCPUMSELREG pSReg;
158 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg);
159 AssertRCReturn(rc, rc); AssertPtr(pSReg);
160
161 /*
162 * Deal with real & v86 mode first.
163 */
164 if ( pCtxCore->eflags.Bits.u1VM
165 || CPUMIsGuestInRealMode(pVCpu))
166 {
167 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
168 if (ppvGC)
169 {
170 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg))
171 *ppvGC = pSReg->u64Base + uFlat;
172 else
173 *ppvGC = ((RTGCUINTPTR)pSReg->Sel << 4) + uFlat;
174 }
175 return VINF_SUCCESS;
176 }
177
178
179#ifdef VBOX_WITH_RAW_MODE_NOT_R0
180 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg))
181 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
182 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(&pCtxCore->cs))
183 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
184#else
185 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg));
186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(&pCtxCore->cs));
187#endif
188
189 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
190 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
191 RTGCPTR pvFlat;
192 bool fCheckLimit = true;
193 if ( pCtxCore->cs.Attr.n.u1Long
194 && CPUMIsGuestInLongMode(pVCpu))
195 {
196 fCheckLimit = false;
197 switch (SelReg)
198 {
199 case DISSELREG_FS:
200 case DISSELREG_GS:
201 pvFlat = pSReg->u64Base + Addr;
202 break;
203
204 default:
205 pvFlat = Addr;
206 break;
207 }
208 }
209 else
210 {
211 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
212 Assert(pSReg->u64Base <= UINT32_C(0xffffffff));
213 pvFlat = pSReg->u64Base + Addr;
214 pvFlat &= UINT32_C(0xffffffff);
215 }
216
217 /*
218 * Check type if present.
219 */
220 if (pSReg->Attr.n.u1Present)
221 {
222 switch (pSReg->Attr.n.u4Type)
223 {
224 /* Read only selector type. */
225 case X86_SEL_TYPE_RO:
226 case X86_SEL_TYPE_RO_ACC:
227 case X86_SEL_TYPE_RW:
228 case X86_SEL_TYPE_RW_ACC:
229 case X86_SEL_TYPE_EO:
230 case X86_SEL_TYPE_EO_ACC:
231 case X86_SEL_TYPE_ER:
232 case X86_SEL_TYPE_ER_ACC:
233 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
234 {
235 /** @todo fix this mess */
236 }
237 /* check limit. */
238 if (fCheckLimit && Addr > pSReg->u32Limit)
239 return VERR_OUT_OF_SELECTOR_BOUNDS;
240 /* ok */
241 if (ppvGC)
242 *ppvGC = pvFlat;
243 return VINF_SUCCESS;
244
245 case X86_SEL_TYPE_EO_CONF:
246 case X86_SEL_TYPE_EO_CONF_ACC:
247 case X86_SEL_TYPE_ER_CONF:
248 case X86_SEL_TYPE_ER_CONF_ACC:
249 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
250 {
251 /** @todo fix this mess */
252 }
253 /* check limit. */
254 if (fCheckLimit && Addr > pSReg->u32Limit)
255 return VERR_OUT_OF_SELECTOR_BOUNDS;
256 /* ok */
257 if (ppvGC)
258 *ppvGC = pvFlat;
259 return VINF_SUCCESS;
260
261 case X86_SEL_TYPE_RO_DOWN:
262 case X86_SEL_TYPE_RO_DOWN_ACC:
263 case X86_SEL_TYPE_RW_DOWN:
264 case X86_SEL_TYPE_RW_DOWN_ACC:
265 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
266 {
267 /** @todo fix this mess */
268 }
269 /* check limit. */
270 if (fCheckLimit)
271 {
272 if (!pSReg->Attr.n.u1Granularity && Addr > UINT32_C(0xffff))
273 return VERR_OUT_OF_SELECTOR_BOUNDS;
274 if (Addr <= pSReg->u32Limit)
275 return VERR_OUT_OF_SELECTOR_BOUNDS;
276 }
277 /* ok */
278 if (ppvGC)
279 *ppvGC = pvFlat;
280 return VINF_SUCCESS;
281
282 default:
283 return VERR_INVALID_SELECTOR;
284
285 }
286 }
287 return VERR_SELECTOR_NOT_PRESENT;
288}
289
290
291#ifdef VBOX_WITH_RAW_MODE_NOT_R0
292/**
293 * Converts a GC selector based address to a flat address.
294 *
295 * Some basic checking is done, but not all kinds yet.
296 *
297 * @returns VBox status
298 * @param pVCpu Pointer to the VMCPU.
299 * @param eflags Current eflags
300 * @param Sel Selector part.
301 * @param Addr Address part.
302 * @param fFlags SELMTOFLAT_FLAGS_*
303 * GDT entires are valid.
304 * @param ppvGC Where to store the GC flat address.
305 * @param pcb Where to store the bytes from *ppvGC which can be accessed according to
306 * the selector. NULL is allowed.
307 * @remarks Don't use when in long mode.
308 */
309VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr,
310 uint32_t fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
311{
312 Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! (Accessing shadow GDT/LDT.) */
313
314 /*
315 * Deal with real & v86 mode first.
316 */
317 if ( eflags.Bits.u1VM
318 || CPUMIsGuestInRealMode(pVCpu))
319 {
320 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
321 if (ppvGC)
322 *ppvGC = ((RTGCUINTPTR)Sel << 4) + uFlat;
323 if (pcb)
324 *pcb = 0x10000 - uFlat;
325 return VINF_SUCCESS;
326 }
327
328 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
329 X86DESC Desc;
330 PVM pVM = pVCpu->CTX_SUFF(pVM);
331 if (!(Sel & X86_SEL_LDT))
332 {
333 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
334 && (unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.GuestGdtr.cbGdt)
335 return VERR_INVALID_SELECTOR;
336 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
337 }
338 else
339 {
340 if ((unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.cbLdtLimit)
341 return VERR_INVALID_SELECTOR;
342
343 /** @todo handle LDT page(s) not present! */
344 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
345 Desc = paLDT[Sel >> X86_SEL_SHIFT];
346 }
347
348 /* calc limit. */
349 uint32_t u32Limit = X86DESC_LIMIT(Desc);
350 if (Desc.Gen.u1Granularity)
351 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
352
353 /* calc address assuming straight stuff. */
354 RTGCPTR pvFlat = Addr + X86DESC_BASE(Desc);
355
356 /* Cut the address to 32 bits. */
357 Assert(!CPUMIsGuestInLongMode(pVCpu));
358 pvFlat &= 0xffffffff;
359
360 uint8_t u1Present = Desc.Gen.u1Present;
361 uint8_t u1Granularity = Desc.Gen.u1Granularity;
362 uint8_t u1DescType = Desc.Gen.u1DescType;
363 uint8_t u4Type = Desc.Gen.u4Type;
364
365 /*
366 * Check if present.
367 */
368 if (u1Present)
369 {
370 /*
371 * Type check.
372 */
373#define BOTH(a, b) ((a << 16) | b)
374 switch (BOTH(u1DescType, u4Type))
375 {
376
377 /** Read only selector type. */
378 case BOTH(1,X86_SEL_TYPE_RO):
379 case BOTH(1,X86_SEL_TYPE_RO_ACC):
380 case BOTH(1,X86_SEL_TYPE_RW):
381 case BOTH(1,X86_SEL_TYPE_RW_ACC):
382 case BOTH(1,X86_SEL_TYPE_EO):
383 case BOTH(1,X86_SEL_TYPE_EO_ACC):
384 case BOTH(1,X86_SEL_TYPE_ER):
385 case BOTH(1,X86_SEL_TYPE_ER_ACC):
386 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
387 {
388 /** @todo fix this mess */
389 }
390 /* check limit. */
391 if ((RTGCUINTPTR)Addr > u32Limit)
392 return VERR_OUT_OF_SELECTOR_BOUNDS;
393 /* ok */
394 if (ppvGC)
395 *ppvGC = pvFlat;
396 if (pcb)
397 *pcb = u32Limit - (uint32_t)Addr + 1;
398 return VINF_SUCCESS;
399
400 case BOTH(1,X86_SEL_TYPE_EO_CONF):
401 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
402 case BOTH(1,X86_SEL_TYPE_ER_CONF):
403 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
404 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
405 {
406 /** @todo fix this mess */
407 }
408 /* check limit. */
409 if ((RTGCUINTPTR)Addr > u32Limit)
410 return VERR_OUT_OF_SELECTOR_BOUNDS;
411 /* ok */
412 if (ppvGC)
413 *ppvGC = pvFlat;
414 if (pcb)
415 *pcb = u32Limit - (uint32_t)Addr + 1;
416 return VINF_SUCCESS;
417
418 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
419 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
420 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
421 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
422 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
423 {
424 /** @todo fix this mess */
425 }
426 /* check limit. */
427 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
428 return VERR_OUT_OF_SELECTOR_BOUNDS;
429 if ((RTGCUINTPTR)Addr <= u32Limit)
430 return VERR_OUT_OF_SELECTOR_BOUNDS;
431
432 /* ok */
433 if (ppvGC)
434 *ppvGC = pvFlat;
435 if (pcb)
436 *pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
437 return VINF_SUCCESS;
438
439 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
440 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
441 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
442 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
443 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
444 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
445 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
446 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
447 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
448 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
449 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
450 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
451 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
452 {
453 /** @todo fix this mess */
454 }
455 /* check limit. */
456 if ((RTGCUINTPTR)Addr > u32Limit)
457 return VERR_OUT_OF_SELECTOR_BOUNDS;
458 /* ok */
459 if (ppvGC)
460 *ppvGC = pvFlat;
461 if (pcb)
462 *pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
463 return VINF_SUCCESS;
464
465 default:
466 return VERR_INVALID_SELECTOR;
467
468 }
469#undef BOTH
470 }
471 return VERR_SELECTOR_NOT_PRESENT;
472}
473#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
474
475
476#ifdef VBOX_WITH_RAW_MODE_NOT_R0
477/**
478 * CPUM helper that loads the hidden selector register from the descriptor table
479 * when executing with raw-mode.
480 *
481 * @remarks This is only used when in legacy protected mode!
482 *
483 * @param pVCpu Pointer to the current virtual CPU.
484 * @param pCtx The guest CPU context.
485 * @param pSReg The selector register.
486 *
487 * @todo Deal 100% correctly with stale selectors. What's more evil is
488 * invalid page table entries, which isn't impossible to imagine for
489 * LDT entries for instance, though unlikely. Currently, we turn a
490 * blind eye to these issues and return the old hidden registers,
491 * though we don't set the valid flag, so that we'll try loading them
492 * over and over again till we succeed loading something.
493 */
494VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg)
495{
496 Assert(pCtx->cr0 & X86_CR0_PE);
497 Assert(!(pCtx->msrEFER & MSR_K6_EFER_LMA));
498
499 PVM pVM = pVCpu->CTX_SUFF(pVM);
500 Assert(pVM->cCpus == 1);
501
502 RTSEL const Sel = pSReg->Sel;
503
504/** @todo Consider loading these from the shadow tables when possible? */
505 /*
506 * Calculate descriptor table entry address.
507 */
508 RTGCPTR GCPtrDesc;
509 if (!(Sel & X86_SEL_LDT))
510 {
511 if ((Sel & X86_SEL_MASK) >= pCtx->gdtr.cbGdt)
512 {
513 AssertFailed(); /** @todo count these. */
514 return;
515 }
516 GCPtrDesc = pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK);
517 /** @todo Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; for cases
518 * where we don't change it too much. */
519 }
520 else
521 {
522 if ((Sel & X86_SEL_MASK) >= pCtx->ldtr.u32Limit)
523 {
524 AssertFailed(); /** @todo count these. */
525 return;
526 }
527 GCPtrDesc = pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK);
528 }
529
530 /*
531 * Try read the entry.
532 */
533 X86DESC Desc;
534 int rc = PGMPhysReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
535 if (RT_FAILURE(rc))
536 {
537 //RT_ZERO(Desc);
538 //if (!(Sel & X86_SEL_LDT))
539 // Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
540 //if (!Desc.Gen.u1Present)
541 {
542 AssertFailed(); /** @todo count these. */
543 return;
544 }
545 }
546
547 /*
548 * Digest it and store the result.
549 */
550 if ( !Desc.Gen.u1Present
551 || !Desc.Gen.u1DescType)
552 {
553 AssertFailed(); /** @todo count these. */
554 return;
555 }
556
557 uint32_t u32Limit = X86DESC_LIMIT(Desc);
558 if (Desc.Gen.u1Granularity)
559 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
560 pSReg->u32Limit = u32Limit;
561
562 pSReg->u64Base = X86DESC_BASE(Desc);
563 pSReg->Attr.u = X86DESC_GET_HID_ATTR(Desc);
564 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
565 pSReg->ValidSel = Sel;
566}
567#endif /* VBOX_WITH_RAW_MODE */
568
569
570/**
571 * Validates and converts a GC selector based code address to a flat
572 * address when in real or v8086 mode.
573 *
574 * @returns VINF_SUCCESS.
575 * @param pVCpu Pointer to the VMCPU.
576 * @param SelCS Selector part.
577 * @param pHidCS The hidden CS register part. Optional.
578 * @param Addr Address part.
579 * @param ppvFlat Where to store the flat address.
580 */
581DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVMCPU pVCpu, RTSEL SelCS, PCCPUMSELREGHID pSReg, RTGCPTR Addr,
582 PRTGCPTR ppvFlat)
583{
584 RTGCUINTPTR uFlat = Addr & 0xffff;
585 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg))
586 uFlat += (RTGCUINTPTR)SelCS << 4;
587 else
588 uFlat += pSReg->u64Base;
589 *ppvFlat = uFlat;
590 return VINF_SUCCESS;
591}
592
593
594#ifdef VBOX_WITH_RAW_MODE_NOT_R0
595/**
596 * Validates and converts a GC selector based code address to a flat address
597 * when in protected/long mode using the raw-mode algorithm.
598 *
599 * @returns VBox status code.
600 * @param pVM Pointer to the VM.
601 * @param pVCpu Pointer to the VMCPU.
602 * @param SelCPL Current privilege level. Get this from SS - CS might be
603 * conforming! A full selector can be passed, we'll only
604 * use the RPL part.
605 * @param SelCS Selector part.
606 * @param Addr Address part.
607 * @param ppvFlat Where to store the flat address.
608 * @param pcBits Where to store the segment bitness (16/32/64). Optional.
609 */
610DECLINLINE(int) selmValidateAndConvertCSAddrRawMode(PVM pVM, PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr,
611 PRTGCPTR ppvFlat, uint32_t *pcBits)
612{
613 NOREF(pVCpu);
614 /** @todo validate limit! */
615 X86DESC Desc;
616 if (!(SelCS & X86_SEL_LDT))
617 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
618 else
619 {
620 /** @todo handle LDT page(s) not present! */
621 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
622 Desc = paLDT[SelCS >> X86_SEL_SHIFT];
623 }
624
625 /*
626 * Check if present.
627 */
628 if (Desc.Gen.u1Present)
629 {
630 /*
631 * Type check.
632 */
633 if ( Desc.Gen.u1DescType == 1
634 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
635 {
636 /*
637 * Check level.
638 */
639 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
640 if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
641 ? uLevel <= Desc.Gen.u2Dpl
642 : uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
643 )
644 {
645 /*
646 * Limit check.
647 */
648 uint32_t u32Limit = X86DESC_LIMIT(Desc);
649 if (Desc.Gen.u1Granularity)
650 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
651 if ((RTGCUINTPTR)Addr <= u32Limit)
652 {
653 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
654 /* Cut the address to 32 bits. */
655 *ppvFlat &= 0xffffffff;
656
657 if (pcBits)
658 *pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
659 return VINF_SUCCESS;
660 }
661 return VERR_OUT_OF_SELECTOR_BOUNDS;
662 }
663 return VERR_INVALID_RPL;
664 }
665 return VERR_NOT_CODE_SELECTOR;
666 }
667 return VERR_SELECTOR_NOT_PRESENT;
668}
669#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
670
671
672/**
673 * Validates and converts a GC selector based code address to a flat address
674 * when in protected/long mode using the standard hidden selector registers
675 *
676 * @returns VBox status code.
677 * @param pVCpu Pointer to the VMCPU.
678 * @param SelCPL Current privilege level. Get this from SS - CS might be
679 * conforming! A full selector can be passed, we'll only
680 * use the RPL part.
681 * @param SelCS Selector part.
682 * @param pSRegCS The full CS selector register.
683 * @param Addr The address (think IP/EIP/RIP).
684 * @param ppvFlat Where to store the flat address upon successful return.
685 */
686DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pSRegCS,
687 RTGCPTR Addr, PRTGCPTR ppvFlat)
688{
689 /*
690 * Check if present.
691 */
692 if (pSRegCS->Attr.n.u1Present)
693 {
694 /*
695 * Type check.
696 */
697 if ( pSRegCS->Attr.n.u1DescType == 1
698 && (pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
699 {
700 /*
701 * Check level.
702 */
703 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
704 if ( !(pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CONF)
705 ? uLevel <= pSRegCS->Attr.n.u2Dpl
706 : uLevel >= pSRegCS->Attr.n.u2Dpl /* hope I got this right now... */
707 )
708 {
709 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
710 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
711 if ( pSRegCS->Attr.n.u1Long
712 && CPUMIsGuestInLongMode(pVCpu))
713 {
714 *ppvFlat = Addr;
715 return VINF_SUCCESS;
716 }
717
718 /*
719 * Limit check. Note that the limit in the hidden register is the
720 * final value. The granularity bit was included in its calculation.
721 */
722 uint32_t u32Limit = pSRegCS->u32Limit;
723 if ((RTGCUINTPTR)Addr <= u32Limit)
724 {
725 *ppvFlat = Addr + pSRegCS->u64Base;
726 return VINF_SUCCESS;
727 }
728
729 return VERR_OUT_OF_SELECTOR_BOUNDS;
730 }
731 Log(("selmValidateAndConvertCSAddrHidden: Invalid RPL Attr.n.u4Type=%x cpl=%x dpl=%x\n",
732 pSRegCS->Attr.n.u4Type, uLevel, pSRegCS->Attr.n.u2Dpl));
733 return VERR_INVALID_RPL;
734 }
735 return VERR_NOT_CODE_SELECTOR;
736 }
737 return VERR_SELECTOR_NOT_PRESENT;
738}
739
740
741/**
742 * Validates and converts a GC selector based code address to a flat address.
743 *
744 * @returns VBox status code.
745 * @param pVCpu Pointer to the VMCPU.
746 * @param Efl Current EFLAGS.
747 * @param SelCPL Current privilege level. Get this from SS - CS might be
748 * conforming! A full selector can be passed, we'll only
749 * use the RPL part.
750 * @param SelCS Selector part.
751 * @param pSRegCS The full CS selector register.
752 * @param Addr The address (think IP/EIP/RIP).
753 * @param ppvFlat Where to store the flat address upon successful return.
754 */
755VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS Efl, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREG pSRegCS,
756 RTGCPTR Addr, PRTGCPTR ppvFlat)
757{
758 if ( Efl.Bits.u1VM
759 || CPUMIsGuestInRealMode(pVCpu))
760 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, pSRegCS, Addr, ppvFlat);
761
762#ifdef VBOX_WITH_RAW_MODE_NOT_R0
763 /* Use the hidden registers when possible, updating them if outdate. */
764 if (!pSRegCS)
765 return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL);
766
767 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSRegCS))
768 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS);
769
770 /* Undo ring compression. */
771 if ((SelCPL & X86_SEL_RPL) == 1 && !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))
772 SelCPL &= ~X86_SEL_RPL;
773 Assert(pSRegCS->Sel == SelCS);
774 if ((SelCS & X86_SEL_RPL) == 1 && !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))
775 SelCS &= ~X86_SEL_RPL;
776#else
777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSRegCS));
778 Assert(pSRegCS->Sel == SelCS);
779#endif
780
781 return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pSRegCS, Addr, ppvFlat);
782}
783
784
785/**
786 * Returns Hypervisor's Trap 08 (\#DF) selector.
787 *
788 * @returns Hypervisor's Trap 08 (\#DF) selector.
789 * @param pVM Pointer to the VM.
790 */
791VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
792{
793 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
794}
795
796
797/**
798 * Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
799 *
800 * @param pVM Pointer to the VM.
801 * @param u32EIP EIP of Trap 08 handler.
802 */
803VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
804{
805 pVM->selm.s.TssTrap08.eip = u32EIP;
806}
807
808
809/**
810 * Sets ss:esp for ring1 in main Hypervisor's TSS.
811 *
812 * @param pVM Pointer to the VM.
813 * @param ss Ring1 SS register value. Pass 0 if invalid.
814 * @param esp Ring1 ESP register value.
815 */
816void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
817{
818 Assert((ss & 1) || esp == 0);
819 pVM->selm.s.Tss.ss1 = ss;
820 pVM->selm.s.Tss.esp1 = (uint32_t)esp;
821}
822
823
824#ifdef VBOX_WITH_RAW_MODE_NOT_R0
825/**
826 * Gets ss:esp for ring1 in main Hypervisor's TSS.
827 *
828 * Returns SS=0 if the ring-1 stack isn't valid.
829 *
830 * @returns VBox status code.
831 * @param pVM Pointer to the VM.
832 * @param pSS Ring1 SS register value.
833 * @param pEsp Ring1 ESP register value.
834 */
835VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
836{
837 Assert(pVM->cCpus == 1);
838 PVMCPU pVCpu = &pVM->aCpus[0];
839
840 if (pVM->selm.s.fSyncTSSRing0Stack)
841 {
842 RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
843 int rc;
844 VBOXTSS tss;
845
846 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
847
848# ifdef IN_RC
849 bool fTriedAlready = false;
850
851l_tryagain:
852 PVBOXTSS pTss = (PVBOXTSS)(uintptr_t)GCPtrTss;
853 rc = MMGCRamRead(pVM, &tss.ss0, &pTss->ss0, sizeof(tss.ss0));
854 rc |= MMGCRamRead(pVM, &tss.esp0, &pTss->esp0, sizeof(tss.esp0));
855# ifdef DEBUG
856 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, &pTss->offIoBitmap, sizeof(tss.offIoBitmap));
857# endif
858
859 if (RT_FAILURE(rc))
860 {
861 if (!fTriedAlready)
862 {
863 /* Shadow page might be out of sync. Sync and try again */
864 /** @todo might cross page boundary */
865 fTriedAlready = true;
866 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
867 if (rc != VINF_SUCCESS)
868 return rc;
869 goto l_tryagain;
870 }
871 AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
872 return rc;
873 }
874
875# else /* !IN_RC */
876 /* Reading too much. Could be cheaper than two separate calls though. */
877 rc = PGMPhysSimpleReadGCPtr(pVCpu, &tss, GCPtrTss, sizeof(VBOXTSS));
878 if (RT_FAILURE(rc))
879 {
880 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
881 return rc;
882 }
883# endif /* !IN_RC */
884
885# ifdef LOG_ENABLED
886 uint32_t ssr0 = pVM->selm.s.Tss.ss1;
887 uint32_t espr0 = pVM->selm.s.Tss.esp1;
888 ssr0 &= ~1;
889
890 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
891 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
892
893 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
894# endif
895 /* Update our TSS structure for the guest's ring 1 stack */
896 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
897 pVM->selm.s.fSyncTSSRing0Stack = false;
898 }
899
900 *pSS = pVM->selm.s.Tss.ss1;
901 *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
902
903 return VINF_SUCCESS;
904}
905#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
906
907
908/**
909 * Returns Guest TSS pointer
910 *
911 * @returns Pointer to the guest TSS, RTRCPTR_MAX if not being monitored.
912 * @param pVM Pointer to the VM.
913 */
914VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM)
915{
916 return (RTGCPTR)pVM->selm.s.GCPtrGuestTss;
917}
918
919#ifdef VBOX_WITH_RAW_MODE_NOT_R0
920
921/**
922 * Gets the hypervisor code selector (CS).
923 * @returns CS selector.
924 * @param pVM Pointer to the VM.
925 */
926VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
927{
928 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
929}
930
931
932/**
933 * Gets the 64-mode hypervisor code selector (CS64).
934 * @returns CS selector.
935 * @param pVM Pointer to the VM.
936 */
937VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
938{
939 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
940}
941
942
943/**
944 * Gets the hypervisor data selector (DS).
945 * @returns DS selector.
946 * @param pVM Pointer to the VM.
947 */
948VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
949{
950 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
951}
952
953
954/**
955 * Gets the hypervisor TSS selector.
956 * @returns TSS selector.
957 * @param pVM Pointer to the VM.
958 */
959VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
960{
961 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
962}
963
964
965/**
966 * Gets the hypervisor TSS Trap 8 selector.
967 * @returns TSS Trap 8 selector.
968 * @param pVM Pointer to the VM.
969 */
970VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
971{
972 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
973}
974
975/**
976 * Gets the address for the hypervisor GDT.
977 *
978 * @returns The GDT address.
979 * @param pVM Pointer to the VM.
980 * @remark This is intended only for very special use, like in the world
981 * switchers. Don't exploit this API!
982 */
983VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
984{
985 /*
986 * Always convert this from the HC pointer since we can be
987 * called before the first relocation and have to work correctly
988 * without having dependencies on the relocation order.
989 */
990 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
991}
992
993#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
994
995/**
996 * Gets info about the current TSS.
997 *
998 * @returns VBox status code.
999 * @retval VINF_SUCCESS if we've got a TSS loaded.
1000 * @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
1001 *
1002 * @param pVM Pointer to the VM.
1003 * @param pVCpu Pointer to the VMCPU.
1004 * @param pGCPtrTss Where to store the TSS address.
1005 * @param pcbTss Where to store the TSS size limit.
1006 * @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
1007 */
1008VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
1009{
1010 NOREF(pVM);
1011
1012 /*
1013 * The TR hidden register is always valid.
1014 */
1015 CPUMSELREGHID trHid;
1016 RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
1017 if (!(tr & X86_SEL_MASK))
1018 return VERR_SELM_NO_TSS;
1019
1020 *pGCPtrTss = trHid.u64Base;
1021 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
1022 if (pfCanHaveIOBitmap)
1023 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1024 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1025 return VINF_SUCCESS;
1026}
1027
1028
1029
1030/**
1031 * Notification callback which is called whenever there is a chance that a CR3
1032 * value might have changed.
1033 * This is called by PGM.
1034 *
1035 * @param pVM Pointer to the VM.
1036 * @param pVCpu Pointer to the VMCPU.
1037 */
1038VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
1039{
1040 /** @todo SMP support!! */
1041 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
1042 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
1043}
1044
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette