VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/SELMAll.cpp@ 42165

Last change on this file since 42165 was 42165, checked in by vboxsync, 13 years ago

CPUMIsGuestIn64BitCode/CPUMIsGuestIn64BitCodeEx changes together with some early lazily loading of hidden selectors (raw-mode).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 42.4 KB
Line 
1/* $Id: SELMAll.cpp 42165 2012-07-16 13:36:01Z vboxsync $ */
2/** @file
3 * SELM All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_SELM
23#include <VBox/vmm/selm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/pgm.h>
27#include "SELMInternal.h"
28#include <VBox/vmm/vm.h>
29#include <VBox/err.h>
30#include <VBox/param.h>
31#include <iprt/assert.h>
32#include <VBox/log.h>
33#include <VBox/vmm/vmm.h>
34#include <iprt/x86.h>
35
36
37
38#ifndef IN_RING0
39
40/**
41 * Converts a GC selector based address to a flat address.
42 *
43 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
44 * for that.
45 *
46 * @returns Flat address.
47 * @param pVM Pointer to the VM.
48 * @param Sel Selector part.
49 * @param Addr Address part.
50 * @remarks Don't use when in long mode.
51 */
52VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
53{
54 Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */
55
56 /** @todo check the limit. */
57 X86DESC Desc;
58 if (!(Sel & X86_SEL_LDT))
59 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
60 else
61 {
62 /** @todo handle LDT pages not present! */
63 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
64 Desc = paLDT[Sel >> X86_SEL_SHIFT];
65 }
66
67 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(Desc)) & 0xffffffff);
68}
69#endif /* !IN_RING0 */
70
71
72/**
73 * Converts a GC selector based address to a flat address.
74 *
75 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
76 * for that.
77 *
78 * @returns Flat address.
79 * @param pVM Pointer to the VM.
80 * @param SelReg Selector register
81 * @param pCtxCore CPU context
82 * @param Addr Address part.
83 */
84VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
85{
86 PCPUMSELREGHID pHiddenSel;
87 RTSEL Sel;
88 int rc;
89 PVMCPU pVCpu = VMMGetCpu(pVM);
90
91 rc = DISFetchRegSegEx(pCtxCore, SelReg, &Sel, &pHiddenSel); AssertRC(rc);
92
93 /*
94 * Deal with real & v86 mode first.
95 */
96 if ( pCtxCore->eflags.Bits.u1VM
97 || CPUMIsGuestInRealMode(pVCpu))
98 {
99 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
100 if (CPUMAreHiddenSelRegsValid(pVCpu))
101 uFlat += pHiddenSel->u64Base;
102 else
103 uFlat += ((RTGCUINTPTR)Sel << 4);
104 return (RTGCPTR)uFlat;
105 }
106
107#ifdef IN_RING0
108 Assert(CPUMAreHiddenSelRegsValid(pVCpu));
109#else
110 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */
111 if (!CPUMAreHiddenSelRegsValid(pVCpu))
112 return SELMToFlatBySel(pVM, Sel, Addr);
113#endif
114
115 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
116 if ( pCtxCore->cs.Attr.n.u1Long
117 && CPUMIsGuestInLongMode(pVCpu))
118 {
119 switch (SelReg)
120 {
121 case DISSELREG_FS:
122 case DISSELREG_GS:
123 return (RTGCPTR)(pHiddenSel->u64Base + Addr);
124
125 default:
126 return Addr; /* base 0 */
127 }
128 }
129
130 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
131 Assert(pHiddenSel->u64Base <= 0xffffffff);
132 return ((pHiddenSel->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
133}
134
135
136/**
137 * Converts a GC selector based address to a flat address.
138 *
139 * Some basic checking is done, but not all kinds yet.
140 *
141 * @returns VBox status
142 * @param pVCpu Pointer to the VMCPU.
143 * @param SelReg Selector register.
144 * @param pCtxCore CPU context.
145 * @param Addr Address part.
146 * @param fFlags SELMTOFLAT_FLAGS_*
147 * GDT entires are valid.
148 * @param ppvGC Where to store the GC flat address.
149 */
150VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCCPUMCTXCORE pCtxCore, RTGCPTR Addr, unsigned fFlags, PRTGCPTR ppvGC)
151{
152 /*
153 * Fetch the selector first.
154 */
155 PCPUMSELREGHID pHiddenSel;
156 RTSEL Sel;
157
158 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &Sel, &pHiddenSel);
159 AssertRC(rc);
160
161 /*
162 * Deal with real & v86 mode first.
163 */
164 if ( pCtxCore->eflags.Bits.u1VM
165 || CPUMIsGuestInRealMode(pVCpu))
166 {
167 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
168 if (ppvGC)
169 {
170 if ( pHiddenSel
171 && CPUMAreHiddenSelRegsValid(pVCpu))
172 *ppvGC = (RTGCPTR)(pHiddenSel->u64Base + uFlat);
173 else
174 *ppvGC = (RTGCPTR)(((RTGCUINTPTR)Sel << 4) + uFlat);
175 }
176 return VINF_SUCCESS;
177 }
178
179
180 uint32_t u32Limit;
181 RTGCPTR pvFlat;
182 uint32_t u1Present, u1DescType, u1Granularity, u4Type;
183
184 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */
185#ifndef IN_RC
186 if ( pHiddenSel
187 && CPUMAreHiddenSelRegsValid(pVCpu))
188 {
189 bool fCheckLimit = true;
190
191 u1Present = pHiddenSel->Attr.n.u1Present;
192 u1Granularity = pHiddenSel->Attr.n.u1Granularity;
193 u1DescType = pHiddenSel->Attr.n.u1DescType;
194 u4Type = pHiddenSel->Attr.n.u4Type;
195 u32Limit = pHiddenSel->u32Limit;
196
197 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
198 if ( pCtxCore->cs.Attr.n.u1Long
199 && CPUMIsGuestInLongMode(pVCpu))
200 {
201 fCheckLimit = false;
202 switch (SelReg)
203 {
204 case DISSELREG_FS:
205 case DISSELREG_GS:
206 pvFlat = (pHiddenSel->u64Base + Addr);
207 break;
208
209 default:
210 pvFlat = Addr;
211 break;
212 }
213 }
214 else
215 {
216 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
217 Assert(pHiddenSel->u64Base <= 0xffffffff);
218 pvFlat = (RTGCPTR)((pHiddenSel->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
219 }
220
221 /*
222 * Check if present.
223 */
224 if (u1Present)
225 {
226 /*
227 * Type check.
228 */
229 switch (u4Type)
230 {
231
232 /** Read only selector type. */
233 case X86_SEL_TYPE_RO:
234 case X86_SEL_TYPE_RO_ACC:
235 case X86_SEL_TYPE_RW:
236 case X86_SEL_TYPE_RW_ACC:
237 case X86_SEL_TYPE_EO:
238 case X86_SEL_TYPE_EO_ACC:
239 case X86_SEL_TYPE_ER:
240 case X86_SEL_TYPE_ER_ACC:
241 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
242 {
243 /** @todo fix this mess */
244 }
245 /* check limit. */
246 if (fCheckLimit && (RTGCUINTPTR)Addr > u32Limit)
247 return VERR_OUT_OF_SELECTOR_BOUNDS;
248 /* ok */
249 if (ppvGC)
250 *ppvGC = pvFlat;
251 return VINF_SUCCESS;
252
253 case X86_SEL_TYPE_EO_CONF:
254 case X86_SEL_TYPE_EO_CONF_ACC:
255 case X86_SEL_TYPE_ER_CONF:
256 case X86_SEL_TYPE_ER_CONF_ACC:
257 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
258 {
259 /** @todo fix this mess */
260 }
261 /* check limit. */
262 if (fCheckLimit && (RTGCUINTPTR)Addr > u32Limit)
263 return VERR_OUT_OF_SELECTOR_BOUNDS;
264 /* ok */
265 if (ppvGC)
266 *ppvGC = pvFlat;
267 return VINF_SUCCESS;
268
269 case X86_SEL_TYPE_RO_DOWN:
270 case X86_SEL_TYPE_RO_DOWN_ACC:
271 case X86_SEL_TYPE_RW_DOWN:
272 case X86_SEL_TYPE_RW_DOWN_ACC:
273 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
274 {
275 /** @todo fix this mess */
276 }
277 /* check limit. */
278 if (fCheckLimit)
279 {
280 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
281 return VERR_OUT_OF_SELECTOR_BOUNDS;
282 if ((RTGCUINTPTR)Addr <= u32Limit)
283 return VERR_OUT_OF_SELECTOR_BOUNDS;
284 }
285 /* ok */
286 if (ppvGC)
287 *ppvGC = pvFlat;
288 return VINF_SUCCESS;
289
290 default:
291 return VERR_INVALID_SELECTOR;
292
293 }
294 }
295 }
296# ifndef IN_RING0
297 else
298# endif
299#endif /* !IN_RC */
300#ifndef IN_RING0
301 {
302 X86DESC Desc;
303
304 PVM pVM = pVCpu->CTX_SUFF(pVM);
305 if (!(Sel & X86_SEL_LDT))
306 {
307 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
308 && (unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.GuestGdtr.cbGdt)
309 return VERR_INVALID_SELECTOR;
310 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
311 }
312 else
313 {
314 if ((unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.cbLdtLimit)
315 return VERR_INVALID_SELECTOR;
316
317 /** @todo handle LDT page(s) not present! */
318 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
319 Desc = paLDT[Sel >> X86_SEL_SHIFT];
320 }
321
322 /* calc limit. */
323 u32Limit = X86DESC_LIMIT(Desc);
324 if (Desc.Gen.u1Granularity)
325 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
326
327 /* calc address assuming straight stuff. */
328 pvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
329
330 /* Cut the address to 32 bits. */
331 Assert(!CPUMIsGuestInLongMode(pVCpu));
332 pvFlat &= 0xffffffff;
333
334 u1Present = Desc.Gen.u1Present;
335 u1Granularity = Desc.Gen.u1Granularity;
336 u1DescType = Desc.Gen.u1DescType;
337 u4Type = Desc.Gen.u4Type;
338
339 /*
340 * Check if present.
341 */
342 if (u1Present)
343 {
344 /*
345 * Type check.
346 */
347# define BOTH(a, b) ((a << 16) | b)
348 switch (BOTH(u1DescType, u4Type))
349 {
350
351 /** Read only selector type. */
352 case BOTH(1,X86_SEL_TYPE_RO):
353 case BOTH(1,X86_SEL_TYPE_RO_ACC):
354 case BOTH(1,X86_SEL_TYPE_RW):
355 case BOTH(1,X86_SEL_TYPE_RW_ACC):
356 case BOTH(1,X86_SEL_TYPE_EO):
357 case BOTH(1,X86_SEL_TYPE_EO_ACC):
358 case BOTH(1,X86_SEL_TYPE_ER):
359 case BOTH(1,X86_SEL_TYPE_ER_ACC):
360 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
361 {
362 /** @todo fix this mess */
363 }
364 /* check limit. */
365 if ((RTGCUINTPTR)Addr > u32Limit)
366 return VERR_OUT_OF_SELECTOR_BOUNDS;
367 /* ok */
368 if (ppvGC)
369 *ppvGC = pvFlat;
370 return VINF_SUCCESS;
371
372 case BOTH(1,X86_SEL_TYPE_EO_CONF):
373 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
374 case BOTH(1,X86_SEL_TYPE_ER_CONF):
375 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
376 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
377 {
378 /** @todo fix this mess */
379 }
380 /* check limit. */
381 if ((RTGCUINTPTR)Addr > u32Limit)
382 return VERR_OUT_OF_SELECTOR_BOUNDS;
383 /* ok */
384 if (ppvGC)
385 *ppvGC = pvFlat;
386 return VINF_SUCCESS;
387
388 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
389 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
390 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
391 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
392 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
393 {
394 /** @todo fix this mess */
395 }
396 /* check limit. */
397 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
398 return VERR_OUT_OF_SELECTOR_BOUNDS;
399 if ((RTGCUINTPTR)Addr <= u32Limit)
400 return VERR_OUT_OF_SELECTOR_BOUNDS;
401
402 /* ok */
403 if (ppvGC)
404 *ppvGC = pvFlat;
405 return VINF_SUCCESS;
406
407 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
408 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
409 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
410 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
411 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
412 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
413 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
414 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
415 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
416 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
417 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
418 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
419 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
420 {
421 /** @todo fix this mess */
422 }
423 /* check limit. */
424 if ((RTGCUINTPTR)Addr > u32Limit)
425 return VERR_OUT_OF_SELECTOR_BOUNDS;
426 /* ok */
427 if (ppvGC)
428 *ppvGC = pvFlat;
429 return VINF_SUCCESS;
430
431 default:
432 return VERR_INVALID_SELECTOR;
433
434 }
435# undef BOTH
436 }
437 }
438#endif /* !IN_RING0 */
439 return VERR_SELECTOR_NOT_PRESENT;
440}
441
442
443#ifndef IN_RING0
444/**
445 * Converts a GC selector based address to a flat address.
446 *
447 * Some basic checking is done, but not all kinds yet.
448 *
449 * @returns VBox status
450 * @param pVCpu Pointer to the VMCPU.
451 * @param eflags Current eflags
452 * @param Sel Selector part.
453 * @param Addr Address part.
454 * @param pHiddenSel Hidden selector register (can be NULL)
455 * @param fFlags SELMTOFLAT_FLAGS_*
456 * GDT entires are valid.
457 * @param ppvGC Where to store the GC flat address.
458 * @param pcb Where to store the bytes from *ppvGC which can be accessed according to
459 * the selector. NULL is allowed.
460 * @remarks Don't use when in long mode.
461 */
462VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr, PCCPUMSELREGHID pHiddenSel, uint32_t fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
463{
464 Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! */
465
466 /*
467 * Deal with real & v86 mode first.
468 */
469 if ( eflags.Bits.u1VM
470 || CPUMIsGuestInRealMode(pVCpu))
471 {
472 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
473 if (ppvGC)
474 {
475 if ( pHiddenSel
476 && CPUMAreHiddenSelRegsValid(pVCpu))
477 *ppvGC = (RTGCPTR)(pHiddenSel->u64Base + uFlat);
478 else
479 *ppvGC = (RTGCPTR)(((RTGCUINTPTR)Sel << 4) + uFlat);
480 }
481 if (pcb)
482 *pcb = 0x10000 - uFlat;
483 return VINF_SUCCESS;
484 }
485
486
487 uint32_t u32Limit;
488 RTGCPTR pvFlat;
489 uint32_t u1Present, u1DescType, u1Granularity, u4Type;
490
491 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */
492 if ( pHiddenSel
493 && CPUMAreHiddenSelRegsValid(pVCpu))
494 {
495 u1Present = pHiddenSel->Attr.n.u1Present;
496 u1Granularity = pHiddenSel->Attr.n.u1Granularity;
497 u1DescType = pHiddenSel->Attr.n.u1DescType;
498 u4Type = pHiddenSel->Attr.n.u4Type;
499
500 u32Limit = pHiddenSel->u32Limit;
501 pvFlat = (RTGCPTR)(pHiddenSel->u64Base + (RTGCUINTPTR)Addr);
502
503 if ( !pHiddenSel->Attr.n.u1Long
504 || !CPUMIsGuestInLongMode(pVCpu))
505 {
506 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
507 pvFlat &= 0xffffffff;
508 }
509 }
510 else
511 {
512 X86DESC Desc;
513
514 PVM pVM = pVCpu->CTX_SUFF(pVM);
515 if (!(Sel & X86_SEL_LDT))
516 {
517 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
518 && (unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.GuestGdtr.cbGdt)
519 return VERR_INVALID_SELECTOR;
520 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
521 }
522 else
523 {
524 if ((unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.cbLdtLimit)
525 return VERR_INVALID_SELECTOR;
526
527 /** @todo handle LDT page(s) not present! */
528 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
529 Desc = paLDT[Sel >> X86_SEL_SHIFT];
530 }
531
532 /* calc limit. */
533 u32Limit = X86DESC_LIMIT(Desc);
534 if (Desc.Gen.u1Granularity)
535 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
536
537 /* calc address assuming straight stuff. */
538 pvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
539
540 /* Cut the address to 32 bits. */
541 Assert(!CPUMIsGuestInLongMode(pVCpu));
542 pvFlat &= 0xffffffff;
543
544 u1Present = Desc.Gen.u1Present;
545 u1Granularity = Desc.Gen.u1Granularity;
546 u1DescType = Desc.Gen.u1DescType;
547 u4Type = Desc.Gen.u4Type;
548 }
549
550 /*
551 * Check if present.
552 */
553 if (u1Present)
554 {
555 /*
556 * Type check.
557 */
558#define BOTH(a, b) ((a << 16) | b)
559 switch (BOTH(u1DescType, u4Type))
560 {
561
562 /** Read only selector type. */
563 case BOTH(1,X86_SEL_TYPE_RO):
564 case BOTH(1,X86_SEL_TYPE_RO_ACC):
565 case BOTH(1,X86_SEL_TYPE_RW):
566 case BOTH(1,X86_SEL_TYPE_RW_ACC):
567 case BOTH(1,X86_SEL_TYPE_EO):
568 case BOTH(1,X86_SEL_TYPE_EO_ACC):
569 case BOTH(1,X86_SEL_TYPE_ER):
570 case BOTH(1,X86_SEL_TYPE_ER_ACC):
571 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
572 {
573 /** @todo fix this mess */
574 }
575 /* check limit. */
576 if ((RTGCUINTPTR)Addr > u32Limit)
577 return VERR_OUT_OF_SELECTOR_BOUNDS;
578 /* ok */
579 if (ppvGC)
580 *ppvGC = pvFlat;
581 if (pcb)
582 *pcb = u32Limit - (uint32_t)Addr + 1;
583 return VINF_SUCCESS;
584
585 case BOTH(1,X86_SEL_TYPE_EO_CONF):
586 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
587 case BOTH(1,X86_SEL_TYPE_ER_CONF):
588 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
589 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
590 {
591 /** @todo fix this mess */
592 }
593 /* check limit. */
594 if ((RTGCUINTPTR)Addr > u32Limit)
595 return VERR_OUT_OF_SELECTOR_BOUNDS;
596 /* ok */
597 if (ppvGC)
598 *ppvGC = pvFlat;
599 if (pcb)
600 *pcb = u32Limit - (uint32_t)Addr + 1;
601 return VINF_SUCCESS;
602
603 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
604 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
605 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
606 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
607 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
608 {
609 /** @todo fix this mess */
610 }
611 /* check limit. */
612 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
613 return VERR_OUT_OF_SELECTOR_BOUNDS;
614 if ((RTGCUINTPTR)Addr <= u32Limit)
615 return VERR_OUT_OF_SELECTOR_BOUNDS;
616
617 /* ok */
618 if (ppvGC)
619 *ppvGC = pvFlat;
620 if (pcb)
621 *pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
622 return VINF_SUCCESS;
623
624 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
625 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
626 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
627 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
628 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
629 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
630 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
631 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
632 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
633 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
634 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
635 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
636 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
637 {
638 /** @todo fix this mess */
639 }
640 /* check limit. */
641 if ((RTGCUINTPTR)Addr > u32Limit)
642 return VERR_OUT_OF_SELECTOR_BOUNDS;
643 /* ok */
644 if (ppvGC)
645 *ppvGC = pvFlat;
646 if (pcb)
647 *pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
648 return VINF_SUCCESS;
649
650 default:
651 return VERR_INVALID_SELECTOR;
652
653 }
654#undef BOTH
655 }
656 return VERR_SELECTOR_NOT_PRESENT;
657}
658#endif /* !IN_RING0 */
659
660
661#ifdef VBOX_WITH_RAW_MODE
662/**
663 * CPUM helper that loads the hidden selector register from the descriptor table
664 * when executing with raw-mode.
665 *
666 * @remarks This is only used when in legacy protected mode!
667 *
668 * @param pVCpu Pointer to the current virtual CPU.
669 * @param pCtx The guest CPU context.
670 * @param pSReg The selector register.
671 *
672 * @todo Deal 100% correctly with stale selectors. What's more evil is
673 * invalid page table entries, which isn't impossible to imagine for
674 * LDT entries for instance, though unlikely. Currently, we turn a
675 * blind eye to these issues and return the old hidden registers,
676 * though we don't set the valid flag, so that we'll try loading them
677 * over and over again till we succeed loading something.
678 */
679VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg)
680{
681 Assert(pCtx->cr0 & X86_CR0_PE);
682 Assert(!(pCtx->msrEFER & MSR_K6_EFER_LMA));
683
684 PVM pVM = pVCpu->CTX_SUFF(pVM);
685 Assert(pVM->cCpus == 1);
686
687 RTSEL const Sel = pSReg->Sel;
688
689 /*
690 * Calculate descriptor table entry address.
691 */
692 RTGCPTR GCPtrDesc;
693 if (!(Sel & X86_SEL_LDT))
694 {
695 if ((Sel & X86_SEL_MASK) >= pCtx->gdtr.cbGdt)
696 {
697 AssertFailed(); /** @todo count these. */
698 return;
699 }
700 GCPtrDesc = pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK);
701 /** @todo Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; for cases
702 * where we don't change it too much. */
703 }
704 else
705 {
706 if ((Sel & X86_SEL_MASK) >= pCtx->ldtr.u32Limit)
707 {
708 AssertFailed(); /** @todo count these. */
709 return;
710 }
711 GCPtrDesc = pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK);
712 }
713
714 /*
715 * Try read the entry.
716 */
717 X86DESC Desc;
718 int rc = PGMPhysReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
719 if (RT_FAILURE(rc))
720 {
721 //RT_ZERO(Desc);
722 //if (!(Sel & X86_SEL_LDT))
723 // Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
724 //if (!Desc.Gen.u1Present)
725 {
726 AssertFailed(); /** @todo count these. */
727 return;
728 }
729 }
730
731 /*
732 * Digest it and store the result.
733 */
734 if ( !Desc.Gen.u1Present
735 || !Desc.Gen.u1DescType)
736 {
737 AssertFailed(); /** @todo count these. */
738 return;
739 }
740
741 uint32_t u32Limit = X86DESC_LIMIT(Desc);
742 if (Desc.Gen.u1Granularity)
743 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
744 pSReg->u32Limit = u32Limit;
745
746 pSReg->u64Base = X86DESC_BASE(Desc);
747 pSReg->Attr.u = X86DESC_GET_HID_ATTR(Desc);
748 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
749 pSReg->ValidSel = Sel;
750}
751#endif /* VBOX_WITH_RAW_MODE */
752
753
754/**
755 * Validates and converts a GC selector based code address to a flat
756 * address when in real or v8086 mode.
757 *
758 * @returns VINF_SUCCESS.
759 * @param pVCpu Pointer to the VMCPU.
760 * @param SelCS Selector part.
761 * @param pHidCS The hidden CS register part. Optional.
762 * @param Addr Address part.
763 * @param ppvFlat Where to store the flat address.
764 */
765DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVMCPU pVCpu, RTSEL SelCS, PCCPUMSELREGHID pHidCS, RTGCPTR Addr,
766 PRTGCPTR ppvFlat)
767{
768 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
769 if (!pHidCS || !CPUMAreHiddenSelRegsValid(pVCpu))
770 uFlat += ((RTGCUINTPTR)SelCS << 4);
771 else
772 uFlat += pHidCS->u64Base;
773 *ppvFlat = (RTGCPTR)uFlat;
774 return VINF_SUCCESS;
775}
776
777
778#ifndef IN_RING0
779/**
780 * Validates and converts a GC selector based code address to a flat
781 * address when in protected/long mode using the standard algorithm.
782 *
783 * @returns VBox status code.
784 * @param pVM Pointer to the VM.
785 * @param pVCpu Pointer to the VMCPU.
786 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
787 * A full selector can be passed, we'll only use the RPL part.
788 * @param SelCS Selector part.
789 * @param Addr Address part.
790 * @param ppvFlat Where to store the flat address.
791 * @param pcBits Where to store the segment bitness (16/32/64). Optional.
792 */
793DECLINLINE(int) selmValidateAndConvertCSAddrStd(PVM pVM, PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr,
794 PRTGCPTR ppvFlat, uint32_t *pcBits)
795{
796 NOREF(pVCpu);
797 /** @todo validate limit! */
798 X86DESC Desc;
799 if (!(SelCS & X86_SEL_LDT))
800 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
801 else
802 {
803 /** @todo handle LDT page(s) not present! */
804 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
805 Desc = paLDT[SelCS >> X86_SEL_SHIFT];
806 }
807
808 /*
809 * Check if present.
810 */
811 if (Desc.Gen.u1Present)
812 {
813 /*
814 * Type check.
815 */
816 if ( Desc.Gen.u1DescType == 1
817 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
818 {
819 /*
820 * Check level.
821 */
822 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
823 if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
824 ? uLevel <= Desc.Gen.u2Dpl
825 : uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
826 )
827 {
828 /*
829 * Limit check.
830 */
831 uint32_t u32Limit = X86DESC_LIMIT(Desc);
832 if (Desc.Gen.u1Granularity)
833 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
834 if ((RTGCUINTPTR)Addr <= u32Limit)
835 {
836 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
837 /* Cut the address to 32 bits. */
838 *ppvFlat &= 0xffffffff;
839
840 if (pcBits)
841 *pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
842 return VINF_SUCCESS;
843 }
844 return VERR_OUT_OF_SELECTOR_BOUNDS;
845 }
846 return VERR_INVALID_RPL;
847 }
848 return VERR_NOT_CODE_SELECTOR;
849 }
850 return VERR_SELECTOR_NOT_PRESENT;
851}
852#endif /* !IN_RING0 */
853
854
855/**
856 * Validates and converts a GC selector based code address to a flat
857 * address when in protected/long mode using the standard algorithm.
858 *
859 * @returns VBox status code.
860 * @param pVCpu Pointer to the VMCPU.
861 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
862 * A full selector can be passed, we'll only use the RPL part.
863 * @param SelCS Selector part.
864 * @param Addr Address part.
865 * @param ppvFlat Where to store the flat address.
866 */
867DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pHidCS,
868 RTGCPTR Addr, PRTGCPTR ppvFlat)
869{
870 /*
871 * Check if present.
872 */
873 if (pHidCS->Attr.n.u1Present)
874 {
875 /*
876 * Type check.
877 */
878 if ( pHidCS->Attr.n.u1DescType == 1
879 && (pHidCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
880 {
881 /*
882 * Check level.
883 */
884 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
885 if ( !(pHidCS->Attr.n.u4Type & X86_SEL_TYPE_CONF)
886 ? uLevel <= pHidCS->Attr.n.u2Dpl
887 : uLevel >= pHidCS->Attr.n.u2Dpl /* hope I got this right now... */
888 )
889 {
890 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
891 if ( pHidCS->Attr.n.u1Long
892 && CPUMIsGuestInLongMode(pVCpu))
893 {
894 *ppvFlat = Addr;
895 return VINF_SUCCESS;
896 }
897
898 /*
899 * Limit check. Note that the limit in the hidden register is the
900 * final value. The granularity bit was included in its calculation.
901 */
902 uint32_t u32Limit = pHidCS->u32Limit;
903 if ((RTGCUINTPTR)Addr <= u32Limit)
904 {
905 *ppvFlat = (RTGCPTR)( (RTGCUINTPTR)Addr + pHidCS->u64Base );
906 return VINF_SUCCESS;
907 }
908 return VERR_OUT_OF_SELECTOR_BOUNDS;
909 }
910 Log(("Invalid RPL Attr.n.u4Type=%x cpl=%x dpl=%x\n", pHidCS->Attr.n.u4Type, uLevel, pHidCS->Attr.n.u2Dpl));
911 return VERR_INVALID_RPL;
912 }
913 return VERR_NOT_CODE_SELECTOR;
914 }
915 return VERR_SELECTOR_NOT_PRESENT;
916}
917
918
919#ifdef IN_RC
920/**
921 * Validates and converts a GC selector based code address to a flat address.
922 *
923 * This is like SELMValidateAndConvertCSAddr + SELMIsSelector32Bit but with
924 * invalid hidden CS data. It's customized for dealing efficiently with CS
925 * at GC trap time.
926 *
927 * @returns VBox status code.
928 * @param pVCpu Pointer to the VMCPU.
929 * @param eflags Current eflags
930 * @param SelCPL Current privilege level. Get this from SS - CS might be
931 * conforming! A full selector can be passed, we'll only
932 * use the RPL part.
933 * @param SelCS Selector part.
934 * @param Addr Address part.
935 * @param ppvFlat Where to store the flat address.
936 * @param pcBits Where to store the 64-bit/32-bit/16-bit indicator.
937 */
938VMMDECL(int) SELMValidateAndConvertCSAddrGCTrap(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr, PRTGCPTR ppvFlat, uint32_t *pcBits)
939{
940 if ( eflags.Bits.u1VM
941 || CPUMIsGuestInRealMode(pVCpu))
942 {
943 *pcBits = 16;
944 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, NULL, Addr, ppvFlat);
945 }
946 Assert(!CPUMAreHiddenSelRegsValid(pVCpu));
947 return selmValidateAndConvertCSAddrStd(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, pcBits);
948}
949#endif /* IN_RC */
950
951
952/**
953 * Validates and converts a GC selector based code address to a flat address.
954 *
955 * @returns VBox status code.
956 * @param pVCpu Pointer to the VMCPU.
957 * @param eflags Current eflags
958 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
959 * A full selector can be passed, we'll only use the RPL part.
960 * @param SelCS Selector part.
961 * @param pHiddenSel The hidden CS selector register.
962 * @param Addr Address part.
963 * @param ppvFlat Where to store the flat address.
964 */
965VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pHiddenCSSel,
966 RTGCPTR Addr, PRTGCPTR ppvFlat)
967{
968 if ( eflags.Bits.u1VM
969 || CPUMIsGuestInRealMode(pVCpu))
970 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, pHiddenCSSel, Addr, ppvFlat);
971
972#ifdef IN_RING0
973 Assert(CPUMAreHiddenSelRegsValid(pVCpu));
974#else
975 /** @todo when we're in 16 bits mode, we should cut off the address as well? (like in selmValidateAndConvertCSAddrRealMode) */
976 if (!CPUMAreHiddenSelRegsValid(pVCpu) || !pHiddenCSSel)
977 return selmValidateAndConvertCSAddrStd(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL);
978#endif
979 return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pHiddenCSSel, Addr, ppvFlat);
980}
981
982
983#ifndef IN_RING0
984/**
985 * Return the cpu mode corresponding to the (CS) selector
986 *
987 * @returns DISCPUMODE according to the selector type (16, 32 or 64 bits)
988 * @param pVM Pointer to the VM.
989 * @param pVCpu Pointer to the VMCPU.
990 * @param Sel The selector.
991 */
992static DISCPUMODE selmGetCpuModeFromSelector(PVM pVM, PVMCPU pVCpu, RTSEL Sel)
993{
994 Assert(!CPUMAreHiddenSelRegsValid(pVCpu));
995
996 /** @todo validate limit! */
997 X86DESC Desc;
998 if (!(Sel & X86_SEL_LDT))
999 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
1000 else
1001 {
1002 /** @todo handle LDT page(s) not present! */
1003 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
1004 Desc = paLDT[Sel >> X86_SEL_SHIFT];
1005 }
1006 return (Desc.Gen.u1DefBig) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
1007}
1008#endif /* !IN_RING0 */
1009
1010
1011/**
1012 * Return the cpu mode corresponding to the (CS) selector
1013 *
1014 * @returns DISCPUMODE according to the selector type (16, 32 or 64 bits)
1015 * @param pVCpu Pointer to the VMCPU.
1016 * @param eflags Current eflags register
1017 * @param Sel The selector.
1018 * @param pHiddenSel The hidden selector register.
1019 */
1020VMMDECL(DISCPUMODE) SELMGetCpuModeFromSelector(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, PCCPUMSELREGHID pHiddenSel)
1021{
1022#ifdef IN_RING0
1023 Assert(CPUMAreHiddenSelRegsValid(pVCpu));
1024 NOREF(eflags); NOREF(Sel);
1025#else /* !IN_RING0 */
1026 if (!CPUMAreHiddenSelRegsValid(pVCpu))
1027 {
1028 /*
1029 * Deal with real & v86 mode first.
1030 */
1031 if ( eflags.Bits.u1VM
1032 || CPUMIsGuestInRealMode(pVCpu))
1033 return DISCPUMODE_16BIT;
1034
1035 return selmGetCpuModeFromSelector(pVCpu->CTX_SUFF(pVM), pVCpu, Sel);
1036 }
1037#endif /* !IN_RING0 */
1038 if ( pHiddenSel->Attr.n.u1Long
1039 && CPUMIsGuestInLongMode(pVCpu))
1040 return DISCPUMODE_64BIT;
1041
1042 /* Else compatibility or 32 bits mode. */
1043 return pHiddenSel->Attr.n.u1DefBig ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
1044}
1045
1046
1047/**
1048 * Returns Hypervisor's Trap 08 (\#DF) selector.
1049 *
1050 * @returns Hypervisor's Trap 08 (\#DF) selector.
1051 * @param pVM Pointer to the VM.
1052 */
1053VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
1054{
1055 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1056}
1057
1058
1059/**
1060 * Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
1061 *
1062 * @param pVM Pointer to the VM.
1063 * @param u32EIP EIP of Trap 08 handler.
1064 */
1065VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
1066{
1067 pVM->selm.s.TssTrap08.eip = u32EIP;
1068}
1069
1070
1071/**
1072 * Sets ss:esp for ring1 in main Hypervisor's TSS.
1073 *
1074 * @param pVM Pointer to the VM.
1075 * @param ss Ring1 SS register value. Pass 0 if invalid.
1076 * @param esp Ring1 ESP register value.
1077 */
1078void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
1079{
1080 Assert((ss & 1) || esp == 0);
1081 pVM->selm.s.Tss.ss1 = ss;
1082 pVM->selm.s.Tss.esp1 = (uint32_t)esp;
1083}
1084
1085
1086#ifndef IN_RING0
1087/**
1088 * Gets ss:esp for ring1 in main Hypervisor's TSS.
1089 *
1090 * Returns SS=0 if the ring-1 stack isn't valid.
1091 *
1092 * @returns VBox status code.
1093 * @param pVM Pointer to the VM.
1094 * @param pSS Ring1 SS register value.
1095 * @param pEsp Ring1 ESP register value.
1096 */
1097VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
1098{
1099 Assert(pVM->cCpus == 1);
1100 PVMCPU pVCpu = &pVM->aCpus[0];
1101
1102 if (pVM->selm.s.fSyncTSSRing0Stack)
1103 {
1104 RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
1105 int rc;
1106 VBOXTSS tss;
1107
1108 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
1109
1110# ifdef IN_RC
1111 bool fTriedAlready = false;
1112
1113l_tryagain:
1114 PVBOXTSS pTss = (PVBOXTSS)(uintptr_t)GCPtrTss;
1115 rc = MMGCRamRead(pVM, &tss.ss0, &pTss->ss0, sizeof(tss.ss0));
1116 rc |= MMGCRamRead(pVM, &tss.esp0, &pTss->esp0, sizeof(tss.esp0));
1117# ifdef DEBUG
1118 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, &pTss->offIoBitmap, sizeof(tss.offIoBitmap));
1119# endif
1120
1121 if (RT_FAILURE(rc))
1122 {
1123 if (!fTriedAlready)
1124 {
1125 /* Shadow page might be out of sync. Sync and try again */
1126 /** @todo might cross page boundary */
1127 fTriedAlready = true;
1128 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
1129 if (rc != VINF_SUCCESS)
1130 return rc;
1131 goto l_tryagain;
1132 }
1133 AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
1134 return rc;
1135 }
1136
1137# else /* !IN_RC */
1138 /* Reading too much. Could be cheaper than two separate calls though. */
1139 rc = PGMPhysSimpleReadGCPtr(pVCpu, &tss, GCPtrTss, sizeof(VBOXTSS));
1140 if (RT_FAILURE(rc))
1141 {
1142 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
1143 return rc;
1144 }
1145# endif /* !IN_RC */
1146
1147# ifdef LOG_ENABLED
1148 uint32_t ssr0 = pVM->selm.s.Tss.ss1;
1149 uint32_t espr0 = pVM->selm.s.Tss.esp1;
1150 ssr0 &= ~1;
1151
1152 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
1153 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
1154
1155 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
1156# endif
1157 /* Update our TSS structure for the guest's ring 1 stack */
1158 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
1159 pVM->selm.s.fSyncTSSRing0Stack = false;
1160 }
1161
1162 *pSS = pVM->selm.s.Tss.ss1;
1163 *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
1164
1165 return VINF_SUCCESS;
1166}
1167#endif /* !IN_RING0 */
1168
1169
1170/**
1171 * Returns Guest TSS pointer
1172 *
1173 * @returns Pointer to the guest TSS, RTRCPTR_MAX if not being monitored.
1174 * @param pVM Pointer to the VM.
1175 */
1176VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM)
1177{
1178 return (RTGCPTR)pVM->selm.s.GCPtrGuestTss;
1179}
1180
1181
1182#ifndef IN_RING0
1183
1184/**
1185 * Gets the hypervisor code selector (CS).
1186 * @returns CS selector.
1187 * @param pVM Pointer to the VM.
1188 */
1189VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
1190{
1191 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
1192}
1193
1194
1195/**
1196 * Gets the 64-mode hypervisor code selector (CS64).
1197 * @returns CS selector.
1198 * @param pVM Pointer to the VM.
1199 */
1200VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
1201{
1202 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
1203}
1204
1205
1206/**
1207 * Gets the hypervisor data selector (DS).
1208 * @returns DS selector.
1209 * @param pVM Pointer to the VM.
1210 */
1211VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
1212{
1213 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
1214}
1215
1216
1217/**
1218 * Gets the hypervisor TSS selector.
1219 * @returns TSS selector.
1220 * @param pVM Pointer to the VM.
1221 */
1222VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
1223{
1224 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
1225}
1226
1227
1228/**
1229 * Gets the hypervisor TSS Trap 8 selector.
1230 * @returns TSS Trap 8 selector.
1231 * @param pVM Pointer to the VM.
1232 */
1233VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
1234{
1235 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1236}
1237
1238/**
1239 * Gets the address for the hypervisor GDT.
1240 *
1241 * @returns The GDT address.
1242 * @param pVM Pointer to the VM.
1243 * @remark This is intended only for very special use, like in the world
1244 * switchers. Don't exploit this API!
1245 */
1246VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
1247{
1248 /*
1249 * Always convert this from the HC pointer since we can be
1250 * called before the first relocation and have to work correctly
1251 * without having dependencies on the relocation order.
1252 */
1253 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
1254}
1255
1256#endif /* !IN_RING0 */
1257
1258/**
1259 * Gets info about the current TSS.
1260 *
1261 * @returns VBox status code.
1262 * @retval VINF_SUCCESS if we've got a TSS loaded.
1263 * @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
1264 *
1265 * @param pVM Pointer to the VM.
1266 * @param pVCpu Pointer to the VMCPU.
1267 * @param pGCPtrTss Where to store the TSS address.
1268 * @param pcbTss Where to store the TSS size limit.
1269 * @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
1270 */
1271VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
1272{
1273 NOREF(pVM);
1274
1275 /*
1276 * The TR hidden register is always valid.
1277 */
1278 CPUMSELREGHID trHid;
1279 RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
1280 if (!(tr & X86_SEL_MASK))
1281 return VERR_SELM_NO_TSS;
1282
1283 *pGCPtrTss = trHid.u64Base;
1284 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
1285 if (pfCanHaveIOBitmap)
1286 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1287 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1288 return VINF_SUCCESS;
1289}
1290
1291
1292
1293/**
1294 * Notification callback which is called whenever there is a chance that a CR3
1295 * value might have changed.
1296 * This is called by PGM.
1297 *
1298 * @param pVM Pointer to the VM.
1299 * @param pVCpu Pointer to the VMCPU.
1300 */
1301VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
1302{
1303 /** @todo SMP support!! */
1304 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
1305 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
1306}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette