VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/SELMAll.cpp@ 70948

Last change on this file since 70948 was 70948, checked in by vboxsync, 7 years ago

VMM: Added a bMainExecutionEngine member to the VM structure for use instead of fHMEnabled and fNEMEnabled. Changed a lot of HMIsEnabled invocations to use the new macros VM_IS_RAW_MODE_ENABLED and VM_IS_HM_OR_NEM_ENABLED. Eliminated fHMEnabledFixed. Fixed inverted test for raw-mode debug register sanity checking. Some other minor cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 39.8 KB
Line 
1/* $Id: SELMAll.cpp 70948 2018-02-10 15:38:12Z vboxsync $ */
2/** @file
3 * SELM All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_SELM
23#include <VBox/vmm/selm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/mm.h>
27#include <VBox/vmm/hm.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/hm.h>
30#include "SELMInternal.h"
31#include <VBox/vmm/vm.h>
32#include <VBox/err.h>
33#include <VBox/param.h>
34#include <iprt/assert.h>
35#include <VBox/vmm/vmm.h>
36#include <iprt/x86.h>
37#include <iprt/string.h>
38
39#include "SELMInline.h"
40
41
42/*********************************************************************************************************************************
43* Global Variables *
44*********************************************************************************************************************************/
45#if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
46/** Segment register names. */
47static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
48#endif
49
50
51#ifndef IN_RING0
52
53# ifdef SELM_TRACK_GUEST_GDT_CHANGES
54/**
55 * @callback_method_impl{FNPGMVIRTHANDLER}
56 */
57PGM_ALL_CB2_DECL(VBOXSTRICTRC)
58selmGuestGDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
59 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
60{
61 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
62 Log(("selmGuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
63 NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);
64
65# ifdef IN_RING3
66 RT_NOREF_PV(pVM);
67
68 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
69 return VINF_PGM_HANDLER_DO_DEFAULT;
70
71# else /* IN_RC: */
72 /*
73 * Execute the write, doing necessary pre and post shadow GDT checks.
74 */
75 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
76 uint32_t offGuestGdt = pCtx->gdtr.pGdt - GCPtr;
77 selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
78 memcpy(pvBuf, pvPtr, cbBuf);
79 VBOXSTRICTRC rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
80 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
81 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
82 else
83 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
84 return rcStrict;
85# endif
86}
87# endif
88
89
90# ifdef SELM_TRACK_GUEST_LDT_CHANGES
91/**
92 * @callback_method_impl{FNPGMVIRTHANDLER}
93 */
94PGM_ALL_CB2_DECL(VBOXSTRICTRC)
95selmGuestLDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
96 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
97{
98 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
99 Log(("selmGuestLDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
100 NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser); RT_NOREF_PV(pVM);
101
102 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
103# ifdef IN_RING3
104 return VINF_PGM_HANDLER_DO_DEFAULT;
105# else
106 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
107 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
108# endif
109}
110# endif
111
112
113# ifdef SELM_TRACK_GUEST_TSS_CHANGES
114/**
115 * @callback_method_impl{FNPGMVIRTHANDLER}
116 */
117PGM_ALL_CB2_DECL(VBOXSTRICTRC)
118selmGuestTSSWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
119 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
120{
121 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
122 Log(("selmGuestTSSWriteHandler: write %.*Rhxs to %RGv size %d\n", RT_MIN(8, cbBuf), pvBuf, GCPtr, cbBuf));
123 NOREF(pvBuf); NOREF(GCPtr); NOREF(cbBuf); NOREF(enmOrigin); NOREF(pvUser); NOREF(pvPtr);
124
125# ifdef IN_RING3
126 RT_NOREF_PV(pVM);
127
128 /** @todo This can be optimized by checking for the ESP0 offset and tracking TR
129 * reloads in REM (setting VM_FF_SELM_SYNC_TSS if TR is reloaded). We
130 * should probably also deregister the virtual handler if TR.base/size
131 * changes while we're in REM. May also share
132 * selmRCGuestTssPostWriteCheck code. */
133 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
134 return VINF_PGM_HANDLER_DO_DEFAULT;
135
136# else /* IN_RC */
137 /*
138 * Do the write and check if anything relevant changed.
139 */
140 Assert(pVM->selm.s.GCPtrGuestTss != (uintptr_t)RTRCPTR_MAX);
141 memcpy(pvPtr, pvBuf, cbBuf);
142 return selmRCGuestTssPostWriteCheck(pVM, pVCpu, GCPtr - pVM->selm.s.GCPtrGuestTss, cbBuf);
143# endif
144}
145# endif
146
147#endif /* IN_RING0 */
148
149
150#ifdef VBOX_WITH_RAW_MODE_NOT_R0
151/**
152 * Converts a GC selector based address to a flat address.
153 *
154 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
155 * for that.
156 *
157 * @returns Flat address.
158 * @param pVM The cross context VM structure.
159 * @param Sel Selector part.
160 * @param Addr Address part.
161 * @remarks Don't use when in long mode.
162 */
163VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
164{
165 Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */
166 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
167
168 /** @todo check the limit. */
169 X86DESC Desc;
170 if (!(Sel & X86_SEL_LDT))
171 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
172 else
173 {
174 /** @todo handle LDT pages not present! */
175 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
176 Desc = paLDT[Sel >> X86_SEL_SHIFT];
177 }
178
179 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)) & 0xffffffff);
180}
181#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
182
183
184/**
185 * Converts a GC selector based address to a flat address.
186 *
187 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
188 * for that.
189 *
190 * @returns Flat address.
191 * @param pVM The cross context VM structure.
192 * @param SelReg Selector register
193 * @param pCtxCore CPU context
194 * @param Addr Address part.
195 */
196VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
197{
198 PCPUMSELREG pSReg;
199 PVMCPU pVCpu = VMMGetCpu(pVM);
200
201 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg); AssertRC(rc);
202
203 /*
204 * Deal with real & v86 mode first.
205 */
206 if ( pCtxCore->eflags.Bits.u1VM
207 || CPUMIsGuestInRealMode(pVCpu))
208 {
209 uint32_t uFlat = (uint32_t)Addr & 0xffff;
210 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
211 uFlat += (uint32_t)pSReg->u64Base;
212 else
213 uFlat += (uint32_t)pSReg->Sel << 4;
214 return (RTGCPTR)uFlat;
215 }
216
217#ifdef VBOX_WITH_RAW_MODE_NOT_R0
218 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
219 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
220 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
221 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
222 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
223#else
224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
226#endif
227
228 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
229 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
230 if ( pCtxCore->cs.Attr.n.u1Long
231 && CPUMIsGuestInLongMode(pVCpu))
232 {
233 switch (SelReg)
234 {
235 case DISSELREG_FS:
236 case DISSELREG_GS:
237 return (RTGCPTR)(pSReg->u64Base + Addr);
238
239 default:
240 return Addr; /* base 0 */
241 }
242 }
243
244 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
245 Assert(pSReg->u64Base <= 0xffffffff);
246 return (uint32_t)pSReg->u64Base + (uint32_t)Addr;
247}
248
249
250/**
251 * Converts a GC selector based address to a flat address.
252 *
253 * Some basic checking is done, but not all kinds yet.
254 *
255 * @returns VBox status
256 * @param pVCpu The cross context virtual CPU structure.
257 * @param SelReg Selector register.
258 * @param pCtxCore CPU context.
259 * @param Addr Address part.
260 * @param fFlags SELMTOFLAT_FLAGS_*
261 * GDT entires are valid.
262 * @param ppvGC Where to store the GC flat address.
263 */
264VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags, PRTGCPTR ppvGC)
265{
266 /*
267 * Fetch the selector first.
268 */
269 PCPUMSELREG pSReg;
270 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg);
271 AssertRCReturn(rc, rc); AssertPtr(pSReg);
272
273 /*
274 * Deal with real & v86 mode first.
275 */
276 if ( pCtxCore->eflags.Bits.u1VM
277 || CPUMIsGuestInRealMode(pVCpu))
278 {
279 if (ppvGC)
280 {
281 uint32_t uFlat = (uint32_t)Addr & 0xffff;
282 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
283 *ppvGC = (uint32_t)pSReg->u64Base + uFlat;
284 else
285 *ppvGC = ((uint32_t)pSReg->Sel << 4) + uFlat;
286 }
287 return VINF_SUCCESS;
288 }
289
290#ifdef VBOX_WITH_RAW_MODE_NOT_R0
291 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
292 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
293 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
294 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
295#else
296 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
297 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
298#endif
299
300 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
301 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
302 RTGCPTR pvFlat;
303 bool fCheckLimit = true;
304 if ( pCtxCore->cs.Attr.n.u1Long
305 && CPUMIsGuestInLongMode(pVCpu))
306 {
307 fCheckLimit = false;
308 switch (SelReg)
309 {
310 case DISSELREG_FS:
311 case DISSELREG_GS:
312 pvFlat = pSReg->u64Base + Addr;
313 break;
314
315 default:
316 pvFlat = Addr;
317 break;
318 }
319 }
320 else
321 {
322 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
323 Assert(pSReg->u64Base <= UINT32_C(0xffffffff));
324 pvFlat = (uint32_t)pSReg->u64Base + (uint32_t)Addr;
325 Assert(pvFlat <= UINT32_MAX);
326 }
327
328 /*
329 * Check type if present.
330 */
331 if (pSReg->Attr.n.u1Present)
332 {
333 switch (pSReg->Attr.n.u4Type)
334 {
335 /* Read only selector type. */
336 case X86_SEL_TYPE_RO:
337 case X86_SEL_TYPE_RO_ACC:
338 case X86_SEL_TYPE_RW:
339 case X86_SEL_TYPE_RW_ACC:
340 case X86_SEL_TYPE_EO:
341 case X86_SEL_TYPE_EO_ACC:
342 case X86_SEL_TYPE_ER:
343 case X86_SEL_TYPE_ER_ACC:
344 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
345 {
346 /** @todo fix this mess */
347 }
348 /* check limit. */
349 if (fCheckLimit && Addr > pSReg->u32Limit)
350 return VERR_OUT_OF_SELECTOR_BOUNDS;
351 /* ok */
352 if (ppvGC)
353 *ppvGC = pvFlat;
354 return VINF_SUCCESS;
355
356 case X86_SEL_TYPE_EO_CONF:
357 case X86_SEL_TYPE_EO_CONF_ACC:
358 case X86_SEL_TYPE_ER_CONF:
359 case X86_SEL_TYPE_ER_CONF_ACC:
360 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
361 {
362 /** @todo fix this mess */
363 }
364 /* check limit. */
365 if (fCheckLimit && Addr > pSReg->u32Limit)
366 return VERR_OUT_OF_SELECTOR_BOUNDS;
367 /* ok */
368 if (ppvGC)
369 *ppvGC = pvFlat;
370 return VINF_SUCCESS;
371
372 case X86_SEL_TYPE_RO_DOWN:
373 case X86_SEL_TYPE_RO_DOWN_ACC:
374 case X86_SEL_TYPE_RW_DOWN:
375 case X86_SEL_TYPE_RW_DOWN_ACC:
376 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
377 {
378 /** @todo fix this mess */
379 }
380 /* check limit. */
381 if (fCheckLimit)
382 {
383 if (!pSReg->Attr.n.u1Granularity && Addr > UINT32_C(0xffff))
384 return VERR_OUT_OF_SELECTOR_BOUNDS;
385 if (Addr <= pSReg->u32Limit)
386 return VERR_OUT_OF_SELECTOR_BOUNDS;
387 }
388 /* ok */
389 if (ppvGC)
390 *ppvGC = pvFlat;
391 return VINF_SUCCESS;
392
393 default:
394 return VERR_INVALID_SELECTOR;
395
396 }
397 }
398 return VERR_SELECTOR_NOT_PRESENT;
399}
400
401
402#ifdef VBOX_WITH_RAW_MODE_NOT_R0
403/**
404 * Converts a GC selector based address to a flat address.
405 *
406 * Some basic checking is done, but not all kinds yet.
407 *
408 * @returns VBox status
409 * @param pVCpu The cross context virtual CPU structure.
410 * @param eflags Current eflags
411 * @param Sel Selector part.
412 * @param Addr Address part.
413 * @param fFlags SELMTOFLAT_FLAGS_*
414 * GDT entires are valid.
415 * @param ppvGC Where to store the GC flat address.
416 * @param pcb Where to store the bytes from *ppvGC which can be accessed according to
417 * the selector. NULL is allowed.
418 * @remarks Don't use when in long mode.
419 */
420VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr,
421 uint32_t fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
422{
423 Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! (Accessing shadow GDT/LDT.) */
424 Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
425
426 /*
427 * Deal with real & v86 mode first.
428 */
429 if ( eflags.Bits.u1VM
430 || CPUMIsGuestInRealMode(pVCpu))
431 {
432 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
433 if (ppvGC)
434 *ppvGC = ((RTGCUINTPTR)Sel << 4) + uFlat;
435 if (pcb)
436 *pcb = 0x10000 - uFlat;
437 return VINF_SUCCESS;
438 }
439
440 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
441 X86DESC Desc;
442 PVM pVM = pVCpu->CTX_SUFF(pVM);
443 if (!(Sel & X86_SEL_LDT))
444 {
445 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
446 && (Sel | X86_SEL_RPL_LDT) > pVM->selm.s.GuestGdtr.cbGdt)
447 return VERR_INVALID_SELECTOR;
448 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
449 }
450 else
451 {
452 if ((Sel | X86_SEL_RPL_LDT) > pVM->selm.s.cbLdtLimit)
453 return VERR_INVALID_SELECTOR;
454
455 /** @todo handle LDT page(s) not present! */
456 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
457 Desc = paLDT[Sel >> X86_SEL_SHIFT];
458 }
459
460 /* calc limit. */
461 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
462
463 /* calc address assuming straight stuff. */
464 RTGCPTR pvFlat = Addr + X86DESC_BASE(&Desc);
465
466 /* Cut the address to 32 bits. */
467 Assert(!CPUMIsGuestInLongMode(pVCpu));
468 pvFlat &= 0xffffffff;
469
470 uint8_t u1Present = Desc.Gen.u1Present;
471 uint8_t u1Granularity = Desc.Gen.u1Granularity;
472 uint8_t u1DescType = Desc.Gen.u1DescType;
473 uint8_t u4Type = Desc.Gen.u4Type;
474
475 /*
476 * Check if present.
477 */
478 if (u1Present)
479 {
480 /*
481 * Type check.
482 */
483#define BOTH(a, b) ((a << 16) | b)
484 switch (BOTH(u1DescType, u4Type))
485 {
486
487 /** Read only selector type. */
488 case BOTH(1,X86_SEL_TYPE_RO):
489 case BOTH(1,X86_SEL_TYPE_RO_ACC):
490 case BOTH(1,X86_SEL_TYPE_RW):
491 case BOTH(1,X86_SEL_TYPE_RW_ACC):
492 case BOTH(1,X86_SEL_TYPE_EO):
493 case BOTH(1,X86_SEL_TYPE_EO_ACC):
494 case BOTH(1,X86_SEL_TYPE_ER):
495 case BOTH(1,X86_SEL_TYPE_ER_ACC):
496 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
497 {
498 /** @todo fix this mess */
499 }
500 /* check limit. */
501 if ((RTGCUINTPTR)Addr > u32Limit)
502 return VERR_OUT_OF_SELECTOR_BOUNDS;
503 /* ok */
504 if (ppvGC)
505 *ppvGC = pvFlat;
506 if (pcb)
507 *pcb = u32Limit - (uint32_t)Addr + 1;
508 return VINF_SUCCESS;
509
510 case BOTH(1,X86_SEL_TYPE_EO_CONF):
511 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
512 case BOTH(1,X86_SEL_TYPE_ER_CONF):
513 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
514 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
515 {
516 /** @todo fix this mess */
517 }
518 /* check limit. */
519 if ((RTGCUINTPTR)Addr > u32Limit)
520 return VERR_OUT_OF_SELECTOR_BOUNDS;
521 /* ok */
522 if (ppvGC)
523 *ppvGC = pvFlat;
524 if (pcb)
525 *pcb = u32Limit - (uint32_t)Addr + 1;
526 return VINF_SUCCESS;
527
528 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
529 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
530 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
531 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
532 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
533 {
534 /** @todo fix this mess */
535 }
536 /* check limit. */
537 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
538 return VERR_OUT_OF_SELECTOR_BOUNDS;
539 if ((RTGCUINTPTR)Addr <= u32Limit)
540 return VERR_OUT_OF_SELECTOR_BOUNDS;
541
542 /* ok */
543 if (ppvGC)
544 *ppvGC = pvFlat;
545 if (pcb)
546 *pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
547 return VINF_SUCCESS;
548
549 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
550 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
551 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
552 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
553 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
554 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
555 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
556 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
557 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
558 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
559 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
560 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
561 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
562 {
563 /** @todo fix this mess */
564 }
565 /* check limit. */
566 if ((RTGCUINTPTR)Addr > u32Limit)
567 return VERR_OUT_OF_SELECTOR_BOUNDS;
568 /* ok */
569 if (ppvGC)
570 *ppvGC = pvFlat;
571 if (pcb)
572 *pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
573 return VINF_SUCCESS;
574
575 default:
576 return VERR_INVALID_SELECTOR;
577
578 }
579#undef BOTH
580 }
581 return VERR_SELECTOR_NOT_PRESENT;
582}
583#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
584
585
586#ifdef VBOX_WITH_RAW_MODE_NOT_R0
587
588static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg,
589 RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg)
590{
591 Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
592 RT_NOREF_PV(pCtx); RT_NOREF_PV(Sel);
593
594 /*
595 * Try read the entry.
596 */
597 X86DESC GstDesc;
598 VBOXSTRICTRC rcStrict = PGMPhysReadGCPtr(pVCpu, &GstDesc, GCPtrDesc, sizeof(GstDesc), PGMACCESSORIGIN_SELM);
599 if (rcStrict == VINF_SUCCESS)
600 {
601 /*
602 * Validate it and load it.
603 */
604 if (selmIsGstDescGoodForSReg(pVCpu, pSReg, &GstDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
605 {
606 selmLoadHiddenSRegFromGuestDesc(pVCpu, pSReg, &GstDesc);
607 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (gst)\n",
608 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
609 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGst);
610 }
611 else
612 {
613 Log(("SELMLoadHiddenSelectorReg: Guest table entry is no good (%s=%#x): %.8Rhxs\n", g_aszSRegNms[iSReg], Sel, &GstDesc));
614 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGstNoGood);
615 }
616 }
617 else
618 {
619 AssertMsg(RT_FAILURE_NP(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
620 Log(("SELMLoadHiddenSelectorReg: Error reading descriptor %s=%#x: %Rrc\n",
621 g_aszSRegNms[iSReg], Sel, VBOXSTRICTRC_VAL(rcStrict) ));
622 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelReadErrors);
623 }
624}
625
626
627/**
628 * CPUM helper that loads the hidden selector register from the descriptor table
629 * when executing with raw-mode.
630 *
631 * @remarks This is only used when in legacy protected mode!
632 *
633 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
634 * @param pCtx The guest CPU context.
635 * @param pSReg The selector register.
636 *
637 * @todo Deal 100% correctly with stale selectors. What's more evil is
638 * invalid page table entries, which isn't impossible to imagine for
639 * LDT entries for instance, though unlikely. Currently, we turn a
640 * blind eye to these issues and return the old hidden registers,
641 * though we don't set the valid flag, so that we'll try loading them
642 * over and over again till we succeed loading something.
643 */
644VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg)
645{
646 Assert(pCtx->cr0 & X86_CR0_PE);
647 Assert(!(pCtx->msrEFER & MSR_K6_EFER_LMA));
648
649 PVM pVM = pVCpu->CTX_SUFF(pVM);
650 Assert(pVM->cCpus == 1);
651 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
652
653
654 /*
655 * Get the shadow descriptor table entry and validate it.
656 * Should something go amiss, try the guest table.
657 */
658 RTSEL const Sel = pSReg->Sel;
659 uint32_t const iSReg = pSReg - CPUMCTX_FIRST_SREG(pCtx); Assert(iSReg < X86_SREG_COUNT);
660 PCX86DESC pShwDesc;
661 if (!(Sel & X86_SEL_LDT))
662 {
663 /** @todo this shall not happen, we shall check for these things when executing
664 * LGDT */
665 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->gdtr.cbGdt);
666
667 pShwDesc = &pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
668 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)
669 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
670 {
671 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK), Sel, iSReg);
672 return;
673 }
674 }
675 else
676 {
677 /** @todo this shall not happen, we shall check for these things when executing
678 * LLDT */
679 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->ldtr.u32Limit);
680
681 pShwDesc = (PCX86DESC)((uintptr_t)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper + (Sel & X86_SEL_MASK));
682 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT)
683 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
684 {
685 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK), Sel, iSReg);
686 return;
687 }
688 }
689
690 /*
691 * All fine, load it.
692 */
693 selmLoadHiddenSRegFromShadowDesc(pSReg, pShwDesc);
694 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelShw);
695 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (shw)\n",
696 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
697}
698
699#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
700
701/**
702 * Validates and converts a GC selector based code address to a flat
703 * address when in real or v8086 mode.
704 *
705 * @returns VINF_SUCCESS.
706 * @param pVCpu The cross context virtual CPU structure.
707 * @param SelCS Selector part.
708 * @param pSReg The hidden CS register part. Optional.
709 * @param Addr Address part.
710 * @param ppvFlat Where to store the flat address.
711 */
712DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVMCPU pVCpu, RTSEL SelCS, PCCPUMSELREGHID pSReg, RTGCPTR Addr,
713 PRTGCPTR ppvFlat)
714{
715 NOREF(pVCpu);
716 uint32_t uFlat = Addr & 0xffff;
717 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
718 uFlat += (uint32_t)SelCS << 4;
719 else
720 uFlat += (uint32_t)pSReg->u64Base;
721 *ppvFlat = uFlat;
722 return VINF_SUCCESS;
723}
724
725
726#ifdef VBOX_WITH_RAW_MODE_NOT_R0
727/**
728 * Validates and converts a GC selector based code address to a flat address
729 * when in protected/long mode using the raw-mode algorithm.
730 *
731 * @returns VBox status code.
732 * @param pVM The cross context VM structure.
733 * @param pVCpu The cross context virtual CPU structure.
734 * @param SelCPL Current privilege level. Get this from SS - CS might be
735 * conforming! A full selector can be passed, we'll only
736 * use the RPL part.
737 * @param SelCS Selector part.
738 * @param Addr Address part.
739 * @param ppvFlat Where to store the flat address.
740 * @param pcBits Where to store the segment bitness (16/32/64). Optional.
741 */
742DECLINLINE(int) selmValidateAndConvertCSAddrRawMode(PVM pVM, PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr,
743 PRTGCPTR ppvFlat, uint32_t *pcBits)
744{
745 NOREF(pVCpu);
746 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
747
748 /** @todo validate limit! */
749 X86DESC Desc;
750 if (!(SelCS & X86_SEL_LDT))
751 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
752 else
753 {
754 /** @todo handle LDT page(s) not present! */
755 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
756 Desc = paLDT[SelCS >> X86_SEL_SHIFT];
757 }
758
759 /*
760 * Check if present.
761 */
762 if (Desc.Gen.u1Present)
763 {
764 /*
765 * Type check.
766 */
767 if ( Desc.Gen.u1DescType == 1
768 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
769 {
770 /*
771 * Check level.
772 */
773 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
774 if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
775 ? uLevel <= Desc.Gen.u2Dpl
776 : uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
777 )
778 {
779 /*
780 * Limit check.
781 */
782 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
783 if ((RTGCUINTPTR)Addr <= u32Limit)
784 {
785 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc));
786 /* Cut the address to 32 bits. */
787 *ppvFlat &= 0xffffffff;
788
789 if (pcBits)
790 *pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
791 return VINF_SUCCESS;
792 }
793 return VERR_OUT_OF_SELECTOR_BOUNDS;
794 }
795 return VERR_INVALID_RPL;
796 }
797 return VERR_NOT_CODE_SELECTOR;
798 }
799 return VERR_SELECTOR_NOT_PRESENT;
800}
801#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
802
803
804/**
805 * Validates and converts a GC selector based code address to a flat address
806 * when in protected/long mode using the standard hidden selector registers
807 *
808 * @returns VBox status code.
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param SelCPL Current privilege level. Get this from SS - CS might be
811 * conforming! A full selector can be passed, we'll only
812 * use the RPL part.
813 * @param SelCS Selector part.
814 * @param pSRegCS The full CS selector register.
815 * @param Addr The address (think IP/EIP/RIP).
816 * @param ppvFlat Where to store the flat address upon successful return.
817 */
818DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pSRegCS,
819 RTGCPTR Addr, PRTGCPTR ppvFlat)
820{
821 NOREF(SelCPL); NOREF(SelCS);
822
823 /*
824 * Check if present.
825 */
826 if (pSRegCS->Attr.n.u1Present)
827 {
828 /*
829 * Type check.
830 */
831 if ( pSRegCS->Attr.n.u1DescType == 1
832 && (pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
833 {
834 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
835 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
836 if ( pSRegCS->Attr.n.u1Long
837 && CPUMIsGuestInLongMode(pVCpu))
838 {
839 *ppvFlat = Addr;
840 return VINF_SUCCESS;
841 }
842
843 /*
844 * Limit check. Note that the limit in the hidden register is the
845 * final value. The granularity bit was included in its calculation.
846 */
847 uint32_t u32Limit = pSRegCS->u32Limit;
848 if ((uint32_t)Addr <= u32Limit)
849 {
850 *ppvFlat = (uint32_t)Addr + (uint32_t)pSRegCS->u64Base;
851 return VINF_SUCCESS;
852 }
853
854 return VERR_OUT_OF_SELECTOR_BOUNDS;
855 }
856 return VERR_NOT_CODE_SELECTOR;
857 }
858 return VERR_SELECTOR_NOT_PRESENT;
859}
860
861
862/**
863 * Validates and converts a GC selector based code address to a flat address.
864 *
865 * @returns VBox status code.
866 * @param pVCpu The cross context virtual CPU structure.
867 * @param Efl Current EFLAGS.
868 * @param SelCPL Current privilege level. Get this from SS - CS might be
869 * conforming! A full selector can be passed, we'll only
870 * use the RPL part.
871 * @param SelCS Selector part.
872 * @param pSRegCS The full CS selector register.
873 * @param Addr The address (think IP/EIP/RIP).
874 * @param ppvFlat Where to store the flat address upon successful return.
875 */
876VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS Efl, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREG pSRegCS,
877 RTGCPTR Addr, PRTGCPTR ppvFlat)
878{
879 if ( Efl.Bits.u1VM
880 || CPUMIsGuestInRealMode(pVCpu))
881 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, pSRegCS, Addr, ppvFlat);
882
883#ifdef VBOX_WITH_RAW_MODE_NOT_R0
884 /* Use the hidden registers when possible, updating them if outdate. */
885 if (!pSRegCS)
886 return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL);
887
888 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS))
889 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS);
890
891 /* Undo ring compression. */
892 if ((SelCPL & X86_SEL_RPL) == 1 && VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
893 SelCPL &= ~X86_SEL_RPL;
894 Assert(pSRegCS->Sel == SelCS);
895 if ((SelCS & X86_SEL_RPL) == 1 && VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
896 SelCS &= ~X86_SEL_RPL;
897#else
898 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS));
899 Assert(pSRegCS->Sel == SelCS);
900#endif
901
902 return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pSRegCS, Addr, ppvFlat);
903}
904
905
906/**
907 * Returns Hypervisor's Trap 08 (\#DF) selector.
908 *
909 * @returns Hypervisor's Trap 08 (\#DF) selector.
910 * @param pVM The cross context VM structure.
911 */
912VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
913{
914 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
915}
916
917
918/**
919 * Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
920 *
921 * @param pVM The cross context VM structure.
922 * @param u32EIP EIP of Trap 08 handler.
923 */
924VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
925{
926 pVM->selm.s.TssTrap08.eip = u32EIP;
927}
928
929
930/**
931 * Sets ss:esp for ring1 in main Hypervisor's TSS.
932 *
933 * @param pVM The cross context VM structure.
934 * @param ss Ring1 SS register value. Pass 0 if invalid.
935 * @param esp Ring1 ESP register value.
936 */
937void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
938{
939 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
940 Assert((ss & 1) || esp == 0);
941 pVM->selm.s.Tss.ss1 = ss;
942 pVM->selm.s.Tss.esp1 = (uint32_t)esp;
943}
944
945
946#ifdef VBOX_WITH_RAW_RING1
947/**
948 * Sets ss:esp for ring1 in main Hypervisor's TSS.
949 *
950 * @param pVM The cross context VM structure.
951 * @param ss Ring2 SS register value. Pass 0 if invalid.
952 * @param esp Ring2 ESP register value.
953 */
954void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
955{
956 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
957 Assert((ss & 3) == 2 || esp == 0);
958 pVM->selm.s.Tss.ss2 = ss;
959 pVM->selm.s.Tss.esp2 = (uint32_t)esp;
960}
961#endif
962
963
964#ifdef VBOX_WITH_RAW_MODE_NOT_R0
965/**
966 * Gets ss:esp for ring1 in main Hypervisor's TSS.
967 *
968 * Returns SS=0 if the ring-1 stack isn't valid.
969 *
970 * @returns VBox status code.
971 * @param pVM The cross context VM structure.
972 * @param pSS Ring1 SS register value.
973 * @param pEsp Ring1 ESP register value.
974 */
975VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
976{
977 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
978 Assert(pVM->cCpus == 1);
979 PVMCPU pVCpu = &pVM->aCpus[0];
980
981#ifdef SELM_TRACK_GUEST_TSS_CHANGES
982 if (pVM->selm.s.fSyncTSSRing0Stack)
983 {
984#endif
985 RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
986 int rc;
987 VBOXTSS tss;
988
989 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
990
991# ifdef IN_RC
992 bool fTriedAlready = false;
993
994l_tryagain:
995 PVBOXTSS pTss = (PVBOXTSS)(uintptr_t)GCPtrTss;
996 rc = MMGCRamRead(pVM, &tss.ss0, &pTss->ss0, sizeof(tss.ss0));
997 rc |= MMGCRamRead(pVM, &tss.esp0, &pTss->esp0, sizeof(tss.esp0));
998# ifdef DEBUG
999 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, &pTss->offIoBitmap, sizeof(tss.offIoBitmap));
1000# endif
1001
1002 if (RT_FAILURE(rc))
1003 {
1004 if (!fTriedAlready)
1005 {
1006 /* Shadow page might be out of sync. Sync and try again */
1007 /** @todo might cross page boundary */
1008 fTriedAlready = true;
1009 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
1010 if (rc != VINF_SUCCESS)
1011 return rc;
1012 goto l_tryagain;
1013 }
1014 AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
1015 return rc;
1016 }
1017
1018# else /* !IN_RC */
1019 /* Reading too much. Could be cheaper than two separate calls though. */
1020 rc = PGMPhysSimpleReadGCPtr(pVCpu, &tss, GCPtrTss, sizeof(VBOXTSS));
1021 if (RT_FAILURE(rc))
1022 {
1023 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
1024 return rc;
1025 }
1026# endif /* !IN_RC */
1027
1028# ifdef LOG_ENABLED
1029 uint32_t ssr0 = pVM->selm.s.Tss.ss1;
1030 uint32_t espr0 = pVM->selm.s.Tss.esp1;
1031 ssr0 &= ~1;
1032
1033 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
1034 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
1035
1036 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
1037# endif
1038 /* Update our TSS structure for the guest's ring 1 stack */
1039 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
1040 pVM->selm.s.fSyncTSSRing0Stack = false;
1041#ifdef SELM_TRACK_GUEST_TSS_CHANGES
1042 }
1043#endif
1044
1045 *pSS = pVM->selm.s.Tss.ss1;
1046 *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
1047
1048 return VINF_SUCCESS;
1049}
1050#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1051
1052
1053#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && defined(VBOX_WITH_64_BITS_GUESTS))
1054
1055/**
1056 * Gets the hypervisor code selector (CS).
1057 * @returns CS selector.
1058 * @param pVM The cross context VM structure.
1059 */
1060VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
1061{
1062 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
1063}
1064
1065
1066/**
1067 * Gets the 64-mode hypervisor code selector (CS64).
1068 * @returns CS selector.
1069 * @param pVM The cross context VM structure.
1070 */
1071VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
1072{
1073 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
1074}
1075
1076
1077/**
1078 * Gets the hypervisor data selector (DS).
1079 * @returns DS selector.
1080 * @param pVM The cross context VM structure.
1081 */
1082VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
1083{
1084 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
1085}
1086
1087
1088/**
1089 * Gets the hypervisor TSS selector.
1090 * @returns TSS selector.
1091 * @param pVM The cross context VM structure.
1092 */
1093VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
1094{
1095 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
1096}
1097
1098
1099/**
1100 * Gets the hypervisor TSS Trap 8 selector.
1101 * @returns TSS Trap 8 selector.
1102 * @param pVM The cross context VM structure.
1103 */
1104VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
1105{
1106 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1107}
1108
1109/**
1110 * Gets the address for the hypervisor GDT.
1111 *
1112 * @returns The GDT address.
1113 * @param pVM The cross context VM structure.
1114 * @remark This is intended only for very special use, like in the world
1115 * switchers. Don't exploit this API!
1116 */
1117VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
1118{
1119 /*
1120 * Always convert this from the HC pointer since we can be
1121 * called before the first relocation and have to work correctly
1122 * without having dependencies on the relocation order.
1123 */
1124 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
1125}
1126
1127#endif /* defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && defined(VBOX_WITH_64_BITS_GUESTS)) */
1128
1129/**
1130 * Gets info about the current TSS.
1131 *
1132 * @returns VBox status code.
1133 * @retval VINF_SUCCESS if we've got a TSS loaded.
1134 * @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
1135 *
1136 * @param pVM The cross context VM structure.
1137 * @param pVCpu The cross context virtual CPU structure.
1138 * @param pGCPtrTss Where to store the TSS address.
1139 * @param pcbTss Where to store the TSS size limit.
1140 * @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
1141 */
1142VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
1143{
1144 NOREF(pVM);
1145
1146 /*
1147 * The TR hidden register is always valid.
1148 */
1149 CPUMSELREGHID trHid;
1150 RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
1151 if (!(tr & X86_SEL_MASK_OFF_RPL))
1152 return VERR_SELM_NO_TSS;
1153
1154 *pGCPtrTss = trHid.u64Base;
1155 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
1156 if (pfCanHaveIOBitmap)
1157 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1158 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1159 return VINF_SUCCESS;
1160}
1161
1162
1163
1164/**
1165 * Notification callback which is called whenever there is a chance that a CR3
1166 * value might have changed.
1167 * This is called by PGM.
1168 *
1169 * @param pVM The cross context VM structure.
1170 * @param pVCpu The cross context virtual CPU structure.
1171 */
1172VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
1173{
1174 /** @todo SMP support!! (64-bit guest scenario, primarily) */
1175 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
1176 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
1177}
1178
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette