VirtualBox

source: vbox/trunk/src/VBox/VMM/SELM.cpp@ 1335

Last change on this file since 1335 was 1335, checked in by vboxsync, 18 years ago

Sync the null LDT selector as it's perfectly valid. (unlike the null GDT selector)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 83.0 KB
Line 
1/* $Id: SELM.cpp 1335 2007-03-08 15:59:39Z vboxsync $ */
2/** @file
3 * SELM - The Selector manager.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_SELM
26#include <VBox/selm.h>
27#include <VBox/cpum.h>
28#include <VBox/stam.h>
29#include <VBox/mm.h>
30#include <VBox/pdm.h>
31#include <VBox/pgm.h>
32#include <VBox/trpm.h>
33#include <VBox/dbgf.h>
34#include "SELMInternal.h"
35#include <VBox/vm.h>
36#include <VBox/err.h>
37#include <VBox/param.h>
38
39#include <iprt/assert.h>
40#include <VBox/log.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/string.h>
45#include "x86context.h"
46
47
48/**
49 * Enable or disable tracking of Guest's GDT/LDT/TSS.
50 * @{
51 */
52#define SELM_TRACK_GUEST_GDT_CHANGES
53#define SELM_TRACK_GUEST_LDT_CHANGES
54#define SELM_TRACK_GUEST_TSS_CHANGES
55/** @} */
56
57/**
58 * Enable or disable tracking of Shadow GDT/LDT/TSS.
59 * @{
60 */
61#define SELM_TRACK_SHADOW_GDT_CHANGES
62#define SELM_TRACK_SHADOW_LDT_CHANGES
63#define SELM_TRACK_SHADOW_TSS_CHANGES
64/** @} */
65
66
67/** SELM saved state version. */
68#define SELM_SAVED_STATE_VERSION 5
69
70/*******************************************************************************
71* Internal Functions *
72*******************************************************************************/
73static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM);
74static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
75static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
76static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
77static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
78static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
79static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
80//static DECLCALLBACK(void) selmR3InfoTss(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
81//static DECLCALLBACK(void) selmR3InfoTssGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
82static DECLCALLBACK(int) selmGuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
83static DECLCALLBACK(int) selmGuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
84static DECLCALLBACK(int) selmGuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
85
86
87
88/**
89 * Initializes the SELM.
90 *
91 * @returns VBox status code.
92 * @param pVM The VM to operate on.
93 */
94SELMR3DECL(int) SELMR3Init(PVM pVM)
95{
96 LogFlow(("SELMR3Init\n"));
97
98 /*
99 * Assert alignment and sizes.
100 */
101 AssertRelease(!(RT_OFFSETOF(VM, selm.s) & 31));
102 AssertRelease(!(RT_OFFSETOF(VM, selm.s.Tss) & 15));
103 AssertRelease(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding));
104
105 /*
106 * Init the structure.
107 */
108 pVM->selm.s.offVM = RT_OFFSETOF(VM, selm);
109 pVM->selm.s.SelCS = (SELM_GDT_ELEMENTS - 0x1) << 3;
110 pVM->selm.s.SelDS = (SELM_GDT_ELEMENTS - 0x2) << 3;
111 pVM->selm.s.SelCS64 = (SELM_GDT_ELEMENTS - 0x3) << 3;
112 pVM->selm.s.SelTSS = (SELM_GDT_ELEMENTS - 0x4) << 3;
113 pVM->selm.s.SelTSSTrap08 = (SELM_GDT_ELEMENTS - 0x5) << 3;
114
115 /*
116 * Allocate GDT table.
117 */
118 int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtHC[0]) * SELM_GDT_ELEMENTS,
119 PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdtHC);
120 AssertRCReturn(rc, rc);
121
122 /*
123 * Allocate LDT area.
124 */
125 rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.HCPtrLdt);
126 AssertRCReturn(rc, rc);
127
128 /*
129 * Init Guest's and Shadow GDT, LDT, TSS changes control variables.
130 */
131 pVM->selm.s.cbEffGuestGdtLimit = 0;
132 pVM->selm.s.GuestGdtr.pGdt = ~0;
133 pVM->selm.s.GCPtrGuestLdt = ~0;
134 pVM->selm.s.GCPtrGuestTss = ~0;
135
136 pVM->selm.s.paGdtGC = 0;
137 pVM->selm.s.GCPtrLdt = ~0;
138 pVM->selm.s.GCPtrTss = ~0;
139 pVM->selm.s.GCSelTss = ~0;
140
141 pVM->selm.s.fDisableMonitoring = false;
142 pVM->selm.s.fSyncTSSRing0Stack = false;
143
144 /*
145 * Register the saved state data unit.
146 */
147 rc = SSMR3RegisterInternal(pVM, "selm", 1, SELM_SAVED_STATE_VERSION, sizeof(SELM),
148 NULL, selmR3Save, NULL,
149 NULL, selmR3Load, selmR3LoadDone);
150 if (VBOX_FAILURE(rc))
151 return rc;
152
153 /*
154 * Statistics.
155 */
156 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT.");
157 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT.");
158 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected.");
159 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS.");
160 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
161 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS.");
162 STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
163 STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
164
165 /*
166 * Default action when entering raw mode for the first time
167 */
168 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
169 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
170 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
171
172 /*
173 * Register info handlers.
174 */
175 DBGFR3InfoRegisterInternal(pVM, "gdt", "Displays the shadow GDT. No arguments.", &selmR3InfoGdt);
176 DBGFR3InfoRegisterInternal(pVM, "gdtguest", "Displays the guest GDT. No arguments.", &selmR3InfoGdtGuest);
177 DBGFR3InfoRegisterInternal(pVM, "ldt", "Displays the shadow LDT. No arguments.", &selmR3InfoLdt);
178 DBGFR3InfoRegisterInternal(pVM, "ldtguest", "Displays the guest LDT. No arguments.", &selmR3InfoLdtGuest);
179 //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss);
180 //DBGFR3InfoRegisterInternal(pVM, "tssguest", "Displays the guest TSS. No arguments.", &selmR3InfoTssGuest);
181
182 return rc;
183}
184
185
186/**
187 * Finalizes HMA page attributes.
188 *
189 * @returns VBox status code.
190 * @param pVM The VM handle.
191 */
192SELMR3DECL(int) SELMR3InitFinalize(PVM pVM)
193{
194 /*
195 * Make Double Fault work with WP enabled?
196 *
197 * The double fault is a task switch and thus requires write access to the GDT of the TSS
198 * (to set it busy), to the old TSS (to store state), and to the Trap 8 TSS for the back link.
199 *
200 * Since we in enabling write access to these pages make ourself vulnerable to attacks,
201 * it is not possible to do this by default.
202 */
203 bool f;
204 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f);
205#if !defined(DEBUG_bird) && !defined(__AMD64__) /** @todo Remember to remove __AMD64__ here! */
206 if (VBOX_SUCCESS(rc) && f)
207#endif
208 {
209 PVBOXDESC paGdt = pVM->selm.s.paGdtHC;
210 rc = PGMMapSetPage(pVM, MMHyperHC2GC(pVM, &paGdt[pVM->selm.s.SelTSSTrap08 >> 3]), sizeof(paGdt[0]),
211 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
212 AssertRC(rc);
213 rc = PGMMapSetPage(pVM, MMHyperHC2GC(pVM, &paGdt[pVM->selm.s.SelTSS >> 3]), sizeof(paGdt[0]),
214 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
215 AssertRC(rc);
216 rc = PGMMapSetPage(pVM, VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss), sizeof(pVM->selm.s.Tss),
217 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
218 AssertRC(rc);
219 rc = PGMMapSetPage(pVM, VM_GUEST_ADDR(pVM, &pVM->selm.s.TssTrap08), sizeof(pVM->selm.s.TssTrap08),
220 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
221 AssertRC(rc);
222 }
223 return VINF_SUCCESS;
224}
225
226
227/**
228 * Applies relocations to data and code managed by this
229 * component. This function will be called at init and
230 * whenever the VMM need to relocate it self inside the GC.
231 *
232 * @param pVM The VM.
233 */
234SELMR3DECL(void) SELMR3Relocate(PVM pVM)
235{
236 LogFlow(("SELMR3Relocate\n"));
237 PVBOXDESC paGdt = pVM->selm.s.paGdtHC;
238
239 /*
240 * Update GDTR and selector.
241 */
242 CPUMSetHyperGDTR(pVM, MMHyperHC2GC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1);
243
244 /** @todo selector relocations should be a seperate operation? */
245 CPUMSetHyperCS(pVM, pVM->selm.s.SelCS);
246 CPUMSetHyperDS(pVM, pVM->selm.s.SelDS);
247 CPUMSetHyperES(pVM, pVM->selm.s.SelDS);
248 CPUMSetHyperSS(pVM, pVM->selm.s.SelDS);
249 CPUMSetHyperTR(pVM, pVM->selm.s.SelTSS);
250
251 /*
252 * Set up global code and data descriptors for use in the guest context.
253 * Both are wide open (base 0, limit 4GB)
254 */
255 PVBOXDESC pDesc = &paGdt[pVM->selm.s.SelCS >> 3];
256 pDesc->Gen.u16LimitLow = 0xffff;
257 pDesc->Gen.u4LimitHigh = 0xf;
258 pDesc->Gen.u16BaseLow = 0;
259 pDesc->Gen.u8BaseHigh1 = 0;
260 pDesc->Gen.u8BaseHigh2 = 0;
261 pDesc->Gen.u4Type = X86_SELTYPE_MEM_EXECUTEREAD_ACC;
262 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
263 pDesc->Gen.u2Dpl = 0; /* supervisor */
264 pDesc->Gen.u1Present = 1;
265 pDesc->Gen.u1Available = 0;
266 pDesc->Gen.u1Reserved = 0;
267 pDesc->Gen.u1DefBig = 1; /* def 32 bit */
268 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
269
270 /* data */
271 pDesc = &paGdt[pVM->selm.s.SelDS >> 3];
272 pDesc->Gen.u16LimitLow = 0xffff;
273 pDesc->Gen.u4LimitHigh = 0xf;
274 pDesc->Gen.u16BaseLow = 0;
275 pDesc->Gen.u8BaseHigh1 = 0;
276 pDesc->Gen.u8BaseHigh2 = 0;
277 pDesc->Gen.u4Type = X86_SELTYPE_MEM_READWRITE_ACC;
278 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
279 pDesc->Gen.u2Dpl = 0; /* supervisor */
280 pDesc->Gen.u1Present = 1;
281 pDesc->Gen.u1Available = 0;
282 pDesc->Gen.u1Reserved = 0;
283 pDesc->Gen.u1DefBig = 1; /* big */
284 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
285
286 /* 64-bit mode code (& data?) */
287 pDesc = &paGdt[pVM->selm.s.SelCS64 >> 3];
288 pDesc->Gen.u16LimitLow = 0xffff;
289 pDesc->Gen.u4LimitHigh = 0xf;
290 pDesc->Gen.u16BaseLow = 0;
291 pDesc->Gen.u8BaseHigh1 = 0;
292 pDesc->Gen.u8BaseHigh2 = 0;
293 pDesc->Gen.u4Type = X86_SELTYPE_MEM_EXECUTEREAD_ACC;
294 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
295 pDesc->Gen.u2Dpl = 0; /* supervisor */
296 pDesc->Gen.u1Present = 1;
297 pDesc->Gen.u1Available = 0;
298 pDesc->Gen.u1Reserved = 1; /* The Long (L) attribute bit. */
299 pDesc->Gen.u1DefBig = 0; /* With L=1 this must be 0. */
300 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
301
302 /*
303 * TSS descriptor
304 */
305 pDesc = &paGdt[pVM->selm.s.SelTSS >> 3];
306 RTGCPTR pGCTSS = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss);
307 pDesc->Gen.u16BaseLow = RT_LOWORD(pGCTSS);
308 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(pGCTSS);
309 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(pGCTSS);
310 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
311 pDesc->Gen.u4LimitHigh = 0;
312 pDesc->Gen.u4Type = X86_SELTYPE_SYS_386_TSS_AVAIL;
313 pDesc->Gen.u1DescType = 0; /* system */
314 pDesc->Gen.u2Dpl = 0; /* supervisor */
315 pDesc->Gen.u1Present = 1;
316 pDesc->Gen.u1Available = 0;
317 pDesc->Gen.u1Reserved = 0;
318 pDesc->Gen.u1DefBig = 0;
319 pDesc->Gen.u1Granularity = 0; /* byte limit */
320
321 /*
322 * TSS descriptor for trap 08
323 */
324 pDesc = &paGdt[pVM->selm.s.SelTSSTrap08 >> 3];
325 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
326 pDesc->Gen.u4LimitHigh = 0;
327 pGCTSS = VM_GUEST_ADDR(pVM, &pVM->selm.s.TssTrap08);
328 pDesc->Gen.u16BaseLow = RT_LOWORD(pGCTSS);
329 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(pGCTSS);
330 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(pGCTSS);
331 pDesc->Gen.u4Type = X86_SELTYPE_SYS_386_TSS_AVAIL;
332 pDesc->Gen.u1DescType = 0; /* system */
333 pDesc->Gen.u2Dpl = 0; /* supervisor */
334 pDesc->Gen.u1Present = 1;
335 pDesc->Gen.u1Available = 0;
336 pDesc->Gen.u1Reserved = 0;
337 pDesc->Gen.u1DefBig = 0;
338 pDesc->Gen.u1Granularity = 0; /* byte limit */
339
340/** @todo SELM must be called when any of the CR3s changes during a cpu mode change. */
341/** @todo PGM knows the proper CR3 values these days, not CPUM. */
342 /*
343 * Update the TSSes.
344 */
345 /* Current TSS */
346 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVM);
347 pVM->selm.s.Tss.ss0 = pVM->selm.s.SelDS;
348 pVM->selm.s.Tss.esp0 = VMMGetStackGC(pVM);
349 pVM->selm.s.Tss.cs = pVM->selm.s.SelCS;
350 pVM->selm.s.Tss.ds = pVM->selm.s.SelDS;
351 pVM->selm.s.Tss.es = pVM->selm.s.SelDS;
352 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
353
354 /* trap 08 */
355 pVM->selm.s.TssTrap08.cr3 = PGMGetInterGCCR3(pVM); /* this should give use better survival chances. */
356 pVM->selm.s.TssTrap08.ss0 = pVM->selm.s.SelDS;
357 pVM->selm.s.TssTrap08.ss = pVM->selm.s.SelDS;
358 pVM->selm.s.TssTrap08.esp0 = VMMGetStackGC(pVM) - PAGE_SIZE / 2; /* upper half can be analysed this way. */
359 pVM->selm.s.TssTrap08.esp = pVM->selm.s.TssTrap08.esp0;
360 pVM->selm.s.TssTrap08.ebp = pVM->selm.s.TssTrap08.esp0;
361 pVM->selm.s.TssTrap08.cs = pVM->selm.s.SelCS;
362 pVM->selm.s.TssTrap08.ds = pVM->selm.s.SelDS;
363 pVM->selm.s.TssTrap08.es = pVM->selm.s.SelDS;
364 pVM->selm.s.TssTrap08.fs = 0;
365 pVM->selm.s.TssTrap08.gs = 0;
366 pVM->selm.s.TssTrap08.selLdt = 0;
367 pVM->selm.s.TssTrap08.eflags = 0x2; /* all cleared */
368 pVM->selm.s.TssTrap08.ecx = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */
369 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.ecx;
370 pVM->selm.s.TssTrap08.eax = pVM->selm.s.TssTrap08.ecx;
371 pVM->selm.s.TssTrap08.edx = VM_GUEST_ADDR(pVM, pVM); /* setup edx VM address. */
372 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.edx;
373 pVM->selm.s.TssTrap08.ebx = pVM->selm.s.TssTrap08.edx;
374 pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);
375 /* TRPM will be updating the eip */
376
377 if (!pVM->selm.s.fDisableMonitoring)
378 {
379 /*
380 * Update shadow GDT/LDT/TSS write access handlers.
381 */
382 int rc;
383#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
384 if (pVM->selm.s.paGdtGC != 0)
385 {
386 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtGC);
387 AssertRC(rc);
388 }
389 pVM->selm.s.paGdtGC = MMHyperHC2GC(pVM, paGdt);
390 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.paGdtGC,
391 pVM->selm.s.paGdtGC + SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1,
392 0, 0, "selmgcShadowGDTWriteHandler", 0, "Shadow GDT write access handler");
393 AssertRC(rc);
394#endif
395#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
396 if (pVM->selm.s.GCPtrTss != ~0U)
397 {
398 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrTss);
399 AssertRC(rc);
400 }
401 pVM->selm.s.GCPtrTss = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss);
402 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.GCPtrTss,
403 pVM->selm.s.GCPtrTss + sizeof(pVM->selm.s.Tss) - 1,
404 0, 0, "selmgcShadowTSSWriteHandler", 0, "Shadow TSS write access handler");
405 AssertRC(rc);
406#endif
407
408 /*
409 * Update the GC LDT region handler and address.
410 */
411#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
412 if (pVM->selm.s.GCPtrLdt != ~0U)
413 {
414 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrLdt);
415 AssertRC(rc);
416 }
417#endif
418 pVM->selm.s.GCPtrLdt = MMHyperHC2GC(pVM, pVM->selm.s.HCPtrLdt);
419#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
420 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.GCPtrLdt,
421 pVM->selm.s.GCPtrLdt + _64K + PAGE_SIZE - 1,
422 0, 0, "selmgcShadowLDTWriteHandler", 0, "Shadow LDT write access handler");
423 AssertRC(rc);
424#endif
425 }
426}
427
428
429/**
430 * Notification callback which is called whenever there is a chance that a CR3
431 * value might have changed.
432 * This is called by PGM.
433 *
434 * @param pVM The VM handle
435 */
436SELMR3DECL(void) SELMR3PagingModeChanged(PVM pVM)
437{
438 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVM);
439 pVM->selm.s.TssTrap08.cr3 = PGMGetInterGCCR3(pVM);
440}
441
442
443/**
444 * Terminates the SELM.
445 *
446 * Termination means cleaning up and freeing all resources,
447 * the VM it self is at this point powered off or suspended.
448 *
449 * @returns VBox status code.
450 * @param pVM The VM to operate on.
451 */
452SELMR3DECL(int) SELMR3Term(PVM pVM)
453{
454 return 0;
455}
456
457
458/**
459 * The VM is being reset.
460 *
461 * For the SELM component this means that any GDT/LDT/TSS monitors
462 * needs to be removed.
463 *
464 * @param pVM VM handle.
465 */
466SELMR3DECL(void) SELMR3Reset(PVM pVM)
467{
468 LogFlow(("SELMR3Reset:\n"));
469 VM_ASSERT_EMT(pVM);
470
471 /*
472 * Uninstall guest GDT/LDT/TSS write access handlers.
473 */
474 int rc;
475#ifdef SELM_TRACK_GUEST_GDT_CHANGES
476 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
477 {
478 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
479 AssertRC(rc);
480 pVM->selm.s.GuestGdtr.pGdt = ~0U;
481 pVM->selm.s.GuestGdtr.cbGdt = 0;
482 }
483 pVM->selm.s.fGDTRangeRegistered = false;
484#endif
485#ifdef SELM_TRACK_GUEST_LDT_CHANGES
486 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
487 {
488 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
489 AssertRC(rc);
490 pVM->selm.s.GCPtrGuestLdt = ~0U;
491 }
492#endif
493#ifdef SELM_TRACK_GUEST_TSS_CHANGES
494 if (pVM->selm.s.GCPtrGuestTss != ~0U)
495 {
496 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
497 AssertRC(rc);
498 pVM->selm.s.GCPtrGuestTss = ~0U;
499 pVM->selm.s.GCSelTss = ~0;
500 }
501#endif
502
503 /*
504 * Re-initialize other members.
505 */
506 pVM->selm.s.cbLdtLimit = 0;
507 pVM->selm.s.offLdtHyper = 0;
508 pVM->selm.s.cbMonitoredGuestTss = 0;
509
510 pVM->selm.s.fSyncTSSRing0Stack = false;
511
512 /*
513 * Default action when entering raw mode for the first time
514 */
515 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
516 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
517 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
518}
519
520/**
521 * Disable GDT/LDT/TSS monitoring and syncing
522 *
523 * @param pVM The VM to operate on.
524 */
525SELMR3DECL(void) SELMR3DisableMonitoring(PVM pVM)
526{
527 /*
528 * Uninstall guest GDT/LDT/TSS write access handlers.
529 */
530 int rc;
531#ifdef SELM_TRACK_GUEST_GDT_CHANGES
532 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
533 {
534 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
535 AssertRC(rc);
536 pVM->selm.s.GuestGdtr.pGdt = ~0U;
537 pVM->selm.s.GuestGdtr.cbGdt = 0;
538 }
539 pVM->selm.s.fGDTRangeRegistered = false;
540#endif
541#ifdef SELM_TRACK_GUEST_LDT_CHANGES
542 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
543 {
544 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
545 AssertRC(rc);
546 pVM->selm.s.GCPtrGuestLdt = ~0U;
547 }
548#endif
549#ifdef SELM_TRACK_GUEST_TSS_CHANGES
550 if (pVM->selm.s.GCPtrGuestTss != ~0U)
551 {
552 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
553 AssertRC(rc);
554 pVM->selm.s.GCPtrGuestTss = ~0U;
555 pVM->selm.s.GCSelTss = ~0;
556 }
557#endif
558
559 /*
560 * Unregister shadow GDT/LDT/TSS write access handlers.
561 */
562#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
563 if (pVM->selm.s.paGdtGC != 0)
564 {
565 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtGC);
566 AssertRC(rc);
567 pVM->selm.s.paGdtGC = 0;
568 }
569#endif
570#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
571 if (pVM->selm.s.GCPtrTss != ~0U)
572 {
573 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrTss);
574 AssertRC(rc);
575 pVM->selm.s.GCPtrTss = ~0U;
576 }
577#endif
578#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
579 if (pVM->selm.s.GCPtrLdt != ~0U)
580 {
581 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrLdt);
582 AssertRC(rc);
583 pVM->selm.s.GCPtrLdt = ~0U;
584 }
585#endif
586
587 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
588 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
589 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
590
591 pVM->selm.s.fDisableMonitoring = true;
592}
593
594/**
595 * Execute state save operation.
596 *
597 * @returns VBox status code.
598 * @param pVM VM Handle.
599 * @param pSSM SSM operation handle.
600 */
601static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM)
602{
603 LogFlow(("selmR3Save:\n"));
604
605 /*
606 * Save the basic bits - fortunately all the other things can be resynced on load.
607 */
608 PSELM pSelm = &pVM->selm.s;
609
610 SSMR3PutBool(pSSM, pSelm->fDisableMonitoring);
611 SSMR3PutBool(pSSM, pSelm->fSyncTSSRing0Stack);
612 SSMR3PutSel(pSSM, pSelm->SelCS);
613 SSMR3PutSel(pSSM, pSelm->SelDS);
614 SSMR3PutSel(pSSM, pSelm->SelCS64);
615 SSMR3PutSel(pSSM, pSelm->SelCS64); //reserved for DS64.
616 SSMR3PutSel(pSSM, pSelm->SelTSS);
617 return SSMR3PutSel(pSSM, pSelm->SelTSSTrap08);
618}
619
620
621/**
622 * Execute state load operation.
623 *
624 * @returns VBox status code.
625 * @param pVM VM Handle.
626 * @param pSSM SSM operation handle.
627 * @param u32Version Data layout version.
628 */
629static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
630{
631 LogFlow(("selmR3Load:\n"));
632
633 /*
634 * Validate version.
635 */
636 if (u32Version != SELM_SAVED_STATE_VERSION)
637 {
638 Log(("selmR3Load: Invalid version u32Version=%d!\n", u32Version));
639 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
640 }
641
642 /*
643 * Do a reset.
644 */
645 SELMR3Reset(pVM);
646
647 /* Get the monitoring flag. */
648 SSMR3GetBool(pSSM, &pVM->selm.s.fDisableMonitoring);
649
650 /* Get the TSS state flag. */
651 SSMR3GetBool(pSSM, &pVM->selm.s.fSyncTSSRing0Stack);
652
653 /*
654 * Get the selectors.
655 */
656 RTSEL SelCS;
657 SSMR3GetSel(pSSM, &SelCS);
658 RTSEL SelDS;
659 SSMR3GetSel(pSSM, &SelDS);
660 RTSEL SelCS64;
661 SSMR3GetSel(pSSM, &SelCS64);
662 RTSEL SelDS64;
663 SSMR3GetSel(pSSM, &SelDS64);
664 RTSEL SelTSS;
665 SSMR3GetSel(pSSM, &SelTSS);
666 RTSEL SelTSSTrap08;
667 SSMR3GetSel(pSSM, &SelTSSTrap08);
668 if (u32Version == 1)
669 {
670 RTSEL SelTSSTrap0a;
671 int rc = SSMR3GetSel(pSSM, &SelTSSTrap0a);
672 if (VBOX_FAILURE(rc))
673 return rc;
674 }
675
676 /* Check that no selectors have be relocated. */
677 PSELM pSelm = &pVM->selm.s;
678 if ( SelCS != pSelm->SelCS
679 || SelDS != pSelm->SelDS
680 || SelCS64 != pSelm->SelCS64
681 || SelDS64 != pSelm->SelCS64
682 || SelTSS != pSelm->SelTSS
683 || SelTSSTrap08 != pSelm->SelTSSTrap08)
684 {
685 AssertMsgFailed(("Some selector have been relocated - this cannot happen!\n"));
686 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
687 }
688
689 return VINF_SUCCESS;
690}
691
692
693/**
694 * Sync the GDT, LDT and TSS after loading the state.
695 *
696 * Just to play save, we set the FFs to force syncing before
697 * executing GC code.
698 *
699 * @returns VBox status code.
700 * @param pVM VM Handle.
701 * @param pSSM SSM operation handle.
702 */
703static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
704{
705 LogFlow(("selmR3LoadDone:\n"));
706
707 /*
708 * Don't do anything if it's a load failure.
709 */
710 int rc = SSMR3HandleGetStatus(pSSM);
711 if (VBOX_FAILURE(rc))
712 return VINF_SUCCESS;
713
714 /*
715 * Do the syncing if we're in protected mode.
716 */
717 if (PGMGetGuestMode(pVM) != PGMMODE_REAL)
718 {
719 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
720 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
721 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
722 SELMR3UpdateFromCPUM(pVM);
723 }
724
725 /*
726 * Flag everything for resync on next raw mode entry.
727 */
728 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
729 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
730 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
731
732 return VINF_SUCCESS;
733}
734
735
736/**
737 * Sets up the virtualization of a guest GDT.
738 *
739 * @returns VBox status code.
740 * @param pVM The VM to operate on.
741 * @param paGDTEs Pointer to GDT array.
742 * @param cGDTEs Number of entries in the GDT array.
743 */
744SELMR3DECL(int) SELMR3GdtSetup(PVM pVM, PCVBOXDESC paGDTEs, unsigned cGDTEs)
745{
746 AssertMsg(cGDTEs <= (unsigned)(pVM->selm.s.SelTSSTrap08 >> 3), ("Oops! the loaded GDT is as large as our.. we assume no clashes!!!\n"));
747
748 /*
749 * Enumerate the array.
750 */
751 PCVBOXDESC pGDTESrc = paGDTEs;
752 PVBOXDESC pGDTEDst = pVM->selm.s.paGdtHC;
753 for (unsigned iGDT = 0; iGDT < cGDTEs; iGDT++, pGDTEDst++, pGDTESrc++)
754 {
755 /* ASSUME no clashes for now - lazy bird!!! */
756 if (pGDTESrc->Gen.u1Present)
757 {
758 pGDTEDst->Gen = pGDTESrc->Gen;
759 /* mark non ring-3 selectors as not present. */
760 if (pGDTEDst->Gen.u2Dpl != 3)
761 pGDTEDst->Gen.u1Present = 0;
762 }
763 else
764 {
765 /* zero it. */
766 pGDTEDst->au32[0] = 0;
767 pGDTEDst->au32[1] = 0;
768 }
769 }
770
771 return VINF_SUCCESS;
772}
773
774
775/**
776 * Updates the Guest GDT & LDT virtualization based on current CPU state.
777 *
778 * @returns VBox status code.
779 * @param pVM The VM to operate on.
780 */
781SELMR3DECL(int) SELMR3UpdateFromCPUM(PVM pVM)
782{
783 int rc = VINF_SUCCESS;
784
785 if (pVM->selm.s.fDisableMonitoring)
786 {
787 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
788 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
789 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
790
791 return VINF_SUCCESS;
792 }
793
794 STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a);
795
796 /*
797 * GDT sync
798 */
799 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_GDT))
800 {
801 /*
802 * Always assume the best
803 */
804 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
805
806 /* If the GDT was changed, then make sure the LDT is checked too */
807 /** @todo only do this if the actual ldtr selector was changed; this is a bit excessive */
808 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
809 /* Same goes for the TSS selector */
810 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
811
812 /*
813 * Get the GDTR and check if there is anything to do (there usually is).
814 */
815 VBOXGDTR GDTR;
816 CPUMGetGuestGDTR(pVM, &GDTR);
817 if (GDTR.cbGdt < sizeof(VBOXDESC))
818 {
819 Log(("No GDT entries...\n"));
820 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
821 return VINF_SUCCESS;
822 }
823
824 /*
825 * Read the Guest GDT.
826 * ASSUMES that the entire GDT is in memory.
827 */
828 RTUINT cbEffLimit = GDTR.cbGdt;
829 PVBOXDESC pGDTE = &pVM->selm.s.paGdtHC[1];
830 rc = PGMPhysReadGCPtr(pVM, pGDTE, GDTR.pGdt + sizeof(VBOXDESC), cbEffLimit + 1 - sizeof(VBOXDESC));
831 if (VBOX_FAILURE(rc))
832 {
833 /*
834 * Read it page by page.
835 *
836 * Keep track of the last valid page and delay memsets and
837 * adjust cbEffLimit to reflect the effective size. The latter
838 * is something we do in the belief that the guest will probably
839 * never actually commit the last page, thus allowing us to keep
840 * our selectors in the high end of the GDT.
841 */
842 RTUINT cbLeft = cbEffLimit + 1 - sizeof(VBOXDESC);
843 RTGCPTR GCPtrSrc = (RTGCPTR)GDTR.pGdt + sizeof(VBOXDESC);
844 uint8_t *pu8Dst = (uint8_t *)&pVM->selm.s.paGdtHC[1];
845 uint8_t *pu8DstInvalid = pu8Dst;
846
847 while (cbLeft)
848 {
849 RTUINT cb = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
850 cb = RT_MIN(cb, cbLeft);
851 rc = PGMPhysReadGCPtr(pVM, pu8Dst, GCPtrSrc, cb);
852 if (VBOX_SUCCESS(rc))
853 {
854 if (pu8DstInvalid != pu8Dst)
855 memset(pu8DstInvalid, 0, pu8Dst - pu8DstInvalid);
856 GCPtrSrc += cb;
857 pu8Dst += cb;
858 pu8DstInvalid = pu8Dst;
859 }
860 else if ( rc == VERR_PAGE_NOT_PRESENT
861 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
862 {
863 GCPtrSrc += cb;
864 pu8Dst += cb;
865 }
866 else
867 {
868 AssertReleaseMsgFailed(("Couldn't read GDT at %RX32, rc=%Vrc!\n", GDTR.pGdt, rc));
869 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
870 return VERR_NOT_IMPLEMENTED;
871 }
872 cbLeft -= cb;
873 }
874
875 /* any invalid pages at the end? */
876 if (pu8DstInvalid != pu8Dst)
877 {
878 cbEffLimit = pu8DstInvalid - (uint8_t *)pVM->selm.s.paGdtHC - 1;
879 /* If any GDTEs was invalidated, zero them. */
880 if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit)
881 memset(pu8DstInvalid + cbEffLimit + 1, 0, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit);
882 }
883
884 /* keep track of the effective limit. */
885 if (cbEffLimit != pVM->selm.s.cbEffGuestGdtLimit)
886 {
887 Log(("SELMR3UpdateFromCPUM: cbEffGuestGdtLimit=%#x -> %#x (actual %#x)\n",
888 pVM->selm.s.cbEffGuestGdtLimit, cbEffLimit, GDTR.cbGdt));
889 pVM->selm.s.cbEffGuestGdtLimit = cbEffLimit;
890 }
891 }
892
893 /*
894 * Check if the Guest GDT intrudes on our GDT entries.
895 */
896 // RTSEL aHyperGDT[MAX_NEEDED_HYPERVISOR_GDTS];
897 if (cbEffLimit >= pVM->selm.s.SelTSSTrap08)
898 {
899#if 0
900 PVBOXDESC pGDTEStart = pVM->selm.s.paGdtHC;
901 PVBOXDESC pGDTE = (PVBOXDESC)((char *)pGDTEStart + GDTR.cbGdt + 1 - sizeof(VBOXDESC));
902 int iGDT = 0;
903
904 /* Disabling this for now; previously saw triple faults with OS/2, before fixing the above if statement */
905 Log(("Internal SELM GDT conflict: use non-present entries\n"));
906 while (pGDTE > pGDTEStart && iGDT < MAX_NEEDED_HYPERVISOR_GDTS)
907 {
908 /* We can reuse non-present entries */
909 if (!pGDTE->Gen.u1Present)
910 {
911 aHyperGDT[iGDT] = ((uintptr_t)pGDTE - (uintptr_t)pVM->selm.s.paGdtHC) / sizeof(VBOXDESC);
912 aHyperGDT[iGDT] = aHyperGDT[iGDT] << X86_SEL_SHIFT;
913 Log(("SELM: Found unused GDT %04X\n", aHyperGDT[iGDT]));
914 iGDT++;
915 }
916
917 pGDTE--;
918 }
919 if (iGDT != MAX_NEEDED_HYPERVISOR_GDTS)
920#endif
921 {
922 AssertReleaseMsgFailed(("Internal SELM GDT conflict.\n"));
923 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
924 return VERR_NOT_IMPLEMENTED;
925 }
926 }
927
928 /*
929 * Work thru the copied GDT entries adjusting them for correct virtualization.
930 */
931 PVBOXDESC pGDTEEnd = (PVBOXDESC)((char *)pGDTE + cbEffLimit + 1 - sizeof(VBOXDESC));
932 while (pGDTE < pGDTEEnd)
933 {
934 if (pGDTE->Gen.u1Present)
935 {
936 /*
937 * Code and data selectors are generally 1:1, with the
938 * 'little' adjustment we do for DPL 0 selectors.
939 */
940 if (pGDTE->Gen.u1DescType)
941 {
942 /*
943 * Hack for A-bit against Trap E on read-only GDT.
944 */
945 /** @todo Fix this by loading ds and cs before turning off WP. */
946 pGDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
947
948 /*
949 * All DPL 0 code and data segments are squeezed into DPL 1.
950 *
951 * We're skipping conforming segments here because those
952 * cannot give us any trouble.
953 */
954 if ( pGDTE->Gen.u2Dpl == 0
955 && (pGDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
956 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
957 pGDTE->Gen.u2Dpl = 1;
958 }
959 else
960 {
961 /*
962 * System type selectors are marked not present.
963 * Recompiler or special handling is required for these.
964 */
965 /** @todo what about interrupt gates and rawr0? */
966 pGDTE->Gen.u1Present = 0;
967 }
968 }
969
970 /* Next GDT entry. */
971 pGDTE++;
972 }
973
974#if 0 /** @todo r=bird: The relocation code won't be working right. Start with the IF below. */
975 /*
976 * Check if the Guest GDT intrudes on our GDT entries.
977 */
978 if (cbEffLimit >= pVM->selm.s.SelTSSTrap08)
979 {
980 /* Reinitialize our hypervisor GDTs */
981 pVM->selm.s.SelCS = aHyperGDT[0];
982 pVM->selm.s.SelDS = aHyperGDT[1];
983 pVM->selm.s.SelCS64 = aHyperGDT[2];
984 pVM->selm.s.SelTSS = aHyperGDT[3];
985 pVM->selm.s.SelTSSTrap08 = aHyperGDT[4];
986 SELMR3Relocate(pVM); /** @todo r=bird: Must call VMR3Relocate! */
987 }
988#endif
989
990 /*
991 * Adjust the cached GDT limit.
992 * Any GDT entries which have been removed must be cleared.
993 */
994 if (pVM->selm.s.GuestGdtr.cbGdt != GDTR.cbGdt)
995 {
996 if (pVM->selm.s.GuestGdtr.cbGdt > GDTR.cbGdt)
997 memset(pGDTE, 0, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt);
998#ifndef SELM_TRACK_GUEST_GDT_CHANGES
999 pVM->selm.s.GuestGdtr.cbGdt = GDTR.cbGdt;
1000#endif
1001 }
1002
1003#ifdef SELM_TRACK_GUEST_GDT_CHANGES
1004 /*
1005 * Check if Guest's GDTR is changed.
1006 */
1007 if ( GDTR.pGdt != pVM->selm.s.GuestGdtr.pGdt
1008 || GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
1009 {
1010 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%08X cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt));
1011
1012 /*
1013 * [Re]Register write virtual handler for guest's GDT.
1014 */
1015 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
1016 {
1017 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
1018 AssertRC(rc);
1019 }
1020
1021 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,
1022 0, selmGuestGDTWriteHandler, "selmgcGuestGDTWriteHandler", 0, "Guest GDT write access handler");
1023 if (VBOX_FAILURE(rc))
1024 return rc;
1025
1026 /* Update saved Guest GDTR. */
1027 pVM->selm.s.GuestGdtr = GDTR;
1028 pVM->selm.s.fGDTRangeRegistered = true;
1029 }
1030#endif
1031 }
1032
1033 /*
1034 * TSS sync
1035 */
1036 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS))
1037 {
1038 SELMR3SyncTSS(pVM);
1039 }
1040
1041 /*
1042 * LDT sync
1043 */
1044 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_LDT))
1045 {
1046 /*
1047 * Always assume the best
1048 */
1049 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
1050
1051 /*
1052 * LDT handling is done similarly to the GDT handling with a shadow
1053 * array. However, since the LDT is expected to be swappable (at least
1054 * some ancient OSes makes it swappable) it must be floating and
1055 * synced on a per-page basis.
1056 *
1057 * Eventually we will change this to be fully on demand. Meaning that
1058 * we will only sync pages containing LDT selectors actually used and
1059 * let the #PF handler lazily sync pages as they are used.
1060 * (This applies to GDT too, when we start making OS/2 fast.)
1061 */
1062
1063 /*
1064 * First, determin the current LDT selector.
1065 */
1066 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1067 if ((SelLdt & X86_SEL_MASK) == 0)
1068 {
1069 /* ldtr = 0 - update hyper LDTR and deregister any active handler. */
1070 CPUMSetHyperLDTR(pVM, 0);
1071#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1072 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1073 {
1074 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1075 AssertRC(rc);
1076 pVM->selm.s.GCPtrGuestLdt = ~0U;
1077 }
1078#endif
1079 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1080 return VINF_SUCCESS;
1081 }
1082
1083 /*
1084 * Get the LDT selector.
1085 */
1086 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelLdt >> X86_SEL_SHIFT];
1087 RTGCPTR GCPtrLdt = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1088 unsigned cbLdt = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1089 if (pDesc->Gen.u1Granularity)
1090 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1091
1092 /*
1093 * Validate it.
1094 */
1095 if ( !cbLdt
1096 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt
1097 || pDesc->Gen.u1DescType
1098 || pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1099 {
1100 AssertMsg(!cbLdt, ("Invalid LDT %04x!\n", SelLdt));
1101
1102 /* cbLdt > 0:
1103 * This is quite impossible, so we do as most people do when faced with
1104 * the impossible, we simply ignore it.
1105 */
1106 CPUMSetHyperLDTR(pVM, 0);
1107#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1108 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1109 {
1110 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1111 AssertRC(rc);
1112 pVM->selm.s.GCPtrGuestLdt = ~0U;
1113 }
1114#endif
1115 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1116 return VINF_SUCCESS;
1117 }
1118 /** @todo check what intel does about odd limits. */
1119 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(VBOXDESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1120
1121 /*
1122 * Use the cached guest ldt address if the descriptor has already been modified (see below)
1123 * (this is necessary due to redundant LDT updates; see todo above at GDT sync)
1124 */
1125 if (MMHyperIsInsideArea(pVM, GCPtrLdt) == true)
1126 GCPtrLdt = pVM->selm.s.GCPtrGuestLdt; /* use the old one */
1127
1128
1129#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1130 /** @todo Handle only present LDT segments. */
1131 // if (pDesc->Gen.u1Present)
1132 {
1133 /*
1134 * Check if Guest's LDT address/limit is changed.
1135 */
1136 if ( GCPtrLdt != pVM->selm.s.GCPtrGuestLdt
1137 || cbLdt != pVM->selm.s.cbLdtLimit)
1138 {
1139 Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %VGv:%04x to %VGv:%04x. (GDTR=%VGv:%04x)\n",
1140 pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1141
1142 /*
1143 * [Re]Register write virtual handler for guest's GDT.
1144 * In the event of LDT overlapping something, don't install it just assume it's being updated.
1145 */
1146 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1147 {
1148 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1149 AssertRC(rc);
1150 }
1151#ifdef DEBUG
1152 if (pDesc->Gen.u1Present)
1153 Log(("LDT selector marked not present!!\n"));
1154#endif
1155 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */,
1156 0, selmGuestLDTWriteHandler, "selmgcGuestLDTWriteHandler", 0, "Guest LDT write access handler");
1157 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
1158 {
1159 /** @todo investigate the various cases where conflicts happen and try avoid them by enh. the instruction emulation. */
1160 pVM->selm.s.GCPtrGuestLdt = ~0;
1161 Log(("WARNING: Guest LDT (%VGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=%VGv:%04x)\n",
1162 GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1163 }
1164 else if (VBOX_SUCCESS(rc))
1165 pVM->selm.s.GCPtrGuestLdt = GCPtrLdt;
1166 else
1167 {
1168 CPUMSetHyperLDTR(pVM, 0);
1169 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1170 return rc;
1171 }
1172
1173 pVM->selm.s.cbLdtLimit = cbLdt;
1174 }
1175 }
1176#else
1177 pVM->selm.s.cbLdtLimit = cbLdt;
1178#endif
1179
1180 /*
1181 * Calc Shadow LDT base.
1182 */
1183 unsigned off;
1184 pVM->selm.s.offLdtHyper = off = (GCPtrLdt & PAGE_OFFSET_MASK);
1185 RTGCPTR GCPtrShadowLDT = (RTGCPTR)((RTGCUINTPTR)pVM->selm.s.GCPtrLdt + off);
1186 PVBOXDESC pShadowLDT = (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1187
1188 /*
1189 * Enable the LDT selector in the shadow GDT.
1190 */
1191 pDesc->Gen.u1Present = 1;
1192 pDesc->Gen.u16BaseLow = RT_LOWORD(GCPtrShadowLDT);
1193 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(GCPtrShadowLDT);
1194 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(GCPtrShadowLDT);
1195 pDesc->Gen.u1Available = 0;
1196 pDesc->Gen.u1Reserved = 0;
1197 if (cbLdt > 0xffff)
1198 {
1199 cbLdt = 0xffff;
1200 pDesc->Gen.u4LimitHigh = 0;
1201 pDesc->Gen.u16LimitLow = pDesc->Gen.u1Granularity ? 0xf : 0xffff;
1202 }
1203
1204 /*
1205 * Set Hyper LDTR and notify TRPM.
1206 */
1207 CPUMSetHyperLDTR(pVM, SelLdt);
1208
1209 /*
1210 * Loop synchronising the LDT page by page.
1211 */
1212 /** @todo investigate how intel handle various operations on half present cross page entries. */
1213 off = GCPtrLdt & (sizeof(VBOXDESC) - 1);
1214 AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt));
1215
1216 /** @note Do not skip the first selector; unlike the GDT, a zero LDT selector is perfectly valid. */
1217 unsigned cbLeft = cbLdt + 1;
1218 PVBOXDESC pLDTE = pShadowLDT;
1219 while (cbLeft)
1220 {
1221 /*
1222 * Read a chunk.
1223 */
1224 unsigned cbChunk = PAGE_SIZE - ((RTGCUINTPTR)GCPtrLdt & PAGE_OFFSET_MASK);
1225 if (cbChunk > cbLeft)
1226 cbChunk = cbLeft;
1227 rc = PGMPhysReadGCPtr(pVM, pShadowLDT, GCPtrLdt, cbChunk);
1228 if (VBOX_SUCCESS(rc))
1229 {
1230 /*
1231 * Mark page
1232 */
1233 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D);
1234 AssertRC(rc);
1235
1236 /*
1237 * Loop thru the available LDT entries.
1238 * Figure out where to start and end and the potential cross pageness of
1239 * things adds a little complexity. pLDTE is updated there and not in the
1240 * 'next' part of the loop. The pLDTEEnd is inclusive.
1241 */
1242 PVBOXDESC pLDTEEnd = (PVBOXDESC)((uintptr_t)pShadowLDT + cbChunk) - 1;
1243 if (pLDTE + 1 < pShadowLDT)
1244 pLDTE = (PVBOXDESC)((uintptr_t)pShadowLDT + off);
1245 while (pLDTE <= pLDTEEnd)
1246 {
1247 if (pLDTE->Gen.u1Present)
1248 {
1249 /*
1250 * Code and data selectors are generally 1:1, with the
1251 * 'little' adjustment we do for DPL 0 selectors.
1252 */
1253 if (pLDTE->Gen.u1DescType)
1254 {
1255 /*
1256 * Hack for A-bit against Trap E on read-only GDT.
1257 */
1258 /** @todo Fix this by loading ds and cs before turning off WP. */
1259 if (!(pLDTE->Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1260 pLDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1261
1262 /*
1263 * All DPL 0 code and data segments are squeezed into DPL 1.
1264 *
1265 * We're skipping conforming segments here because those
1266 * cannot give us any trouble.
1267 */
1268 if ( pLDTE->Gen.u2Dpl == 0
1269 && (pLDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1270 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
1271 pLDTE->Gen.u2Dpl = 1;
1272 }
1273 else
1274 {
1275 /*
1276 * System type selectors are marked not present.
1277 * Recompiler or special handling is required for these.
1278 */
1279 /** @todo what about interrupt gates and rawr0? */
1280 pLDTE->Gen.u1Present = 0;
1281 }
1282 }
1283
1284 /* Next LDT entry. */
1285 pLDTE++;
1286 }
1287 }
1288 else
1289 {
1290 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%d\n", rc));
1291 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0);
1292 AssertRC(rc);
1293 }
1294
1295 /*
1296 * Advance to the next page.
1297 */
1298 cbLeft -= cbChunk;
1299 GCPtrShadowLDT += cbChunk;
1300 pShadowLDT = (PVBOXDESC)((char *)pShadowLDT + cbChunk);
1301 GCPtrLdt += cbChunk;
1302 }
1303 }
1304
1305 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1306 return VINF_SUCCESS;
1307}
1308
1309
1310/**
1311 * \#PF Handler callback for virtual access handler ranges.
1312 *
1313 * Important to realize that a physical page in a range can have aliases, and
1314 * for ALL and WRITE handlers these will also trigger.
1315 *
1316 * @returns VINF_SUCCESS if the handler have carried out the operation.
1317 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1318 * @param pVM VM Handle.
1319 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1320 * @param pvPtr The HC mapping of that address.
1321 * @param pvBuf What the guest is reading/writing.
1322 * @param cbBuf How much it's reading/writing.
1323 * @param enmAccessType The access type.
1324 * @param pvUser User argument.
1325 */
1326static DECLCALLBACK(int) selmGuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1327{
1328 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1329 Log(("selmGuestGDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1330 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
1331
1332 return VINF_PGM_HANDLER_DO_DEFAULT;
1333}
1334
1335/**
1336 * \#PF Handler callback for virtual access handler ranges.
1337 *
1338 * Important to realize that a physical page in a range can have aliases, and
1339 * for ALL and WRITE handlers these will also trigger.
1340 *
1341 * @returns VINF_SUCCESS if the handler have carried out the operation.
1342 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1343 * @param pVM VM Handle.
1344 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1345 * @param pvPtr The HC mapping of that address.
1346 * @param pvBuf What the guest is reading/writing.
1347 * @param cbBuf How much it's reading/writing.
1348 * @param enmAccessType The access type.
1349 * @param pvUser User argument.
1350 */
1351static DECLCALLBACK(int) selmGuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1352{
1353 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1354 Log(("selmGuestLDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1355 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
1356 return VINF_PGM_HANDLER_DO_DEFAULT;
1357}
1358
1359/**
1360 * \#PF Handler callback for virtual access handler ranges.
1361 *
1362 * Important to realize that a physical page in a range can have aliases, and
1363 * for ALL and WRITE handlers these will also trigger.
1364 *
1365 * @returns VINF_SUCCESS if the handler have carried out the operation.
1366 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1367 * @param pVM VM Handle.
1368 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1369 * @param pvPtr The HC mapping of that address.
1370 * @param pvBuf What the guest is reading/writing.
1371 * @param cbBuf How much it's reading/writing.
1372 * @param enmAccessType The access type.
1373 * @param pvUser User argument.
1374 */
1375static DECLCALLBACK(int) selmGuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1376{
1377 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1378 Log(("selmGuestTSSWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1379 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1380 return VINF_PGM_HANDLER_DO_DEFAULT;
1381}
1382
1383/**
1384 * Check if the TSS ring 0 stack selector and pointer were updated (for now)
1385 *
1386 * @returns VBox status code.
1387 * @param pVM The VM to operate on.
1388 */
1389SELMR3DECL(int) SELMR3SyncTSS(PVM pVM)
1390{
1391 int rc;
1392
1393 if (pVM->selm.s.fDisableMonitoring)
1394 {
1395 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
1396 return VINF_SUCCESS;
1397 }
1398
1399/** @todo r=bird: SELMR3SyncTSS should be VMMAll code.
1400 * All the base, size, flags and stuff must be kept up to date in the CPUM tr register.
1401 */
1402 STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a);
1403
1404 Assert(!VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_GDT));
1405 Assert(VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS));
1406
1407 /*
1408 * TSS sync
1409 */
1410 RTSEL SelTss = CPUMGetGuestTR(pVM);
1411 if (SelTss & X86_SEL_MASK)
1412 {
1413 /** @todo r=bird: strictly speaking, this is wrong as we shouldn't bother with changes to
1414 * the TSS selector once its loaded. There are a bunch of this kind of problems (see Sander's
1415 * comment in the unzip defect)
1416 * The first part here should only be done when we're loading TR. The latter part which is
1417 * updating of the ss0:esp0 pair can be done by the access handler now since we can trap all
1418 * accesses, also REM ones. */
1419
1420 /*
1421 * Guest TR is not NULL.
1422 */
1423 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelTss >> X86_SEL_SHIFT];
1424 RTGCPTR GCPtrTss = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1425 unsigned cbTss = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1426 if (pDesc->Gen.u1Granularity)
1427 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1428 cbTss++;
1429 pVM->selm.s.cbGuestTss = cbTss;
1430 pVM->selm.s.fGuestTss32Bit = pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1431 || pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1432
1433 /* Don't bother with anything but the core structure. (Actually all we care for is the r0 ss.) */
1434 if (cbTss > sizeof(VBOXTSS))
1435 cbTss = sizeof(VBOXTSS);
1436 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbTss - 1) >> PAGE_SHIFT),
1437 ("GCPtrTss=%VGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss));
1438
1439 // All system GDTs are marked not present above. That explains why this check fails.
1440 //if (pDesc->Gen.u1Present)
1441 /** @todo Handle only present TSS segments. */
1442 {
1443 /*
1444 * Check if Guest's TSS is changed.
1445 */
1446 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss
1447 || cbTss != pVM->selm.s.cbMonitoredGuestTss)
1448 {
1449 Log(("SELMR3UpdateFromCPUM: Guest's TSS is changed to pTss=%08X cbTss=%08X cbGuestTss\n", GCPtrTss, cbTss, pVM->selm.s.cbGuestTss));
1450
1451 /*
1452 * Validate it.
1453 */
1454 if ( SelTss & X86_SEL_LDT
1455 || !cbTss
1456 || SelTss >= pVM->selm.s.GuestGdtr.cbGdt
1457 || pDesc->Gen.u1DescType
1458 || ( pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
1459 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
1460 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL
1461 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) )
1462 {
1463 AssertMsgFailed(("Invalid Guest TSS %04x!\n", SelTss));
1464 }
1465 else
1466 {
1467 /*
1468 * [Re]Register write virtual handler for guest's TSS.
1469 */
1470 if (pVM->selm.s.GCPtrGuestTss != ~0U)
1471 {
1472 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
1473 AssertRC(rc);
1474 }
1475
1476 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbTss - 1,
1477 0, selmGuestTSSWriteHandler, "selmgcGuestTSSWriteHandler", 0, "Guest TSS write access handler");
1478 if (VBOX_FAILURE(rc))
1479 {
1480 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1481 return rc;
1482 }
1483
1484 /* Update saved Guest TSS info. */
1485 pVM->selm.s.GCPtrGuestTss = GCPtrTss;
1486 pVM->selm.s.cbMonitoredGuestTss = cbTss;
1487 pVM->selm.s.GCSelTss = SelTss;
1488 }
1489 }
1490
1491 /* Update the ring 0 stack selector and base address */
1492 /* feeling very lazy; reading too much */
1493 VBOXTSS tss;
1494 rc = PGMPhysReadGCPtr(pVM, &tss, GCPtrTss, sizeof(VBOXTSS));
1495 if (VBOX_SUCCESS(rc))
1496 {
1497 #ifdef DEBUG
1498 uint32_t ssr0, espr0;
1499
1500 SELMGetRing1Stack(pVM, &ssr0, &espr0);
1501 ssr0 &= ~1;
1502
1503 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
1504 Log(("SELMR3SyncTSS: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
1505 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
1506 #endif
1507 /* Update our TSS structure for the guest's ring 1 stack */
1508 SELMSetRing1Stack(pVM, tss.ss0 | 1, tss.esp0);
1509 }
1510 else
1511 {
1512 /** @note the ring 0 stack selector and base address are updated on demand in this case. */
1513
1514 /** @todo handle these dependencies better! */
1515 TRPMR3SetGuestTrapHandler(pVM, 0x2E, TRPM_INVALID_HANDLER);
1516 TRPMR3SetGuestTrapHandler(pVM, 0x80, TRPM_INVALID_HANDLER);
1517 pVM->selm.s.fSyncTSSRing0Stack = true;
1518 }
1519 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
1520 }
1521 }
1522
1523 STAM_PROFILE_STOP(&pVM->selm.s.StatTSSSync, a);
1524 return VINF_SUCCESS;
1525}
1526
1527
1528/**
1529 * Compares the Guest GDT and LDT with the shadow tables.
1530 * This is a VBOX_STRICT only function.
1531 *
1532 * @returns VBox status code.
1533 * @param pVM The VM Handle.
1534 */
1535SELMR3DECL(int) SELMR3DebugCheck(PVM pVM)
1536{
1537#ifdef VBOX_STRICT
1538 /*
1539 * Get GDTR and check for conflict.
1540 */
1541 VBOXGDTR GDTR;
1542 CPUMGetGuestGDTR(pVM, &GDTR);
1543 if (GDTR.cbGdt == 0)
1544 return VINF_SUCCESS;
1545
1546#if 0
1547 if (GDTR.cbGdt >= (unsigned)(pVM->selm.s.SelTSSTrap08 >> X86_SEL_SHIFT))
1548 {
1549 AssertReleaseMsgFailed(("Internal SELM GDT conflict.\n"));
1550 return VERR_NOT_IMPLEMENTED;
1551 }
1552#endif
1553
1554 if (GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
1555 Log(("SELMR3DebugCheck: limits have changed! new=%d old=%d\n", GDTR.cbGdt, pVM->selm.s.GuestGdtr.cbGdt));
1556
1557 /*
1558 * Loop thru the GDT checking each entry.
1559 */
1560 RTGCPTR GCPtrGDTEGuest = GDTR.pGdt;
1561 PVBOXDESC pGDTE = pVM->selm.s.paGdtHC;
1562 PVBOXDESC pGDTEEnd = (PVBOXDESC)((uintptr_t)pGDTE + GDTR.cbGdt);
1563 while (pGDTE < pGDTEEnd)
1564 {
1565 VBOXDESC GDTEGuest;
1566 int rc = PGMPhysReadGCPtr(pVM, &GDTEGuest, GCPtrGDTEGuest, sizeof(GDTEGuest));
1567 if (VBOX_SUCCESS(rc))
1568 {
1569 if (pGDTE->Gen.u1DescType || pGDTE->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1570 {
1571 if ( pGDTE->Gen.u16LimitLow != GDTEGuest.Gen.u16LimitLow
1572 || pGDTE->Gen.u4LimitHigh != GDTEGuest.Gen.u4LimitHigh
1573 || pGDTE->Gen.u16BaseLow != GDTEGuest.Gen.u16BaseLow
1574 || pGDTE->Gen.u8BaseHigh1 != GDTEGuest.Gen.u8BaseHigh1
1575 || pGDTE->Gen.u8BaseHigh2 != GDTEGuest.Gen.u8BaseHigh2
1576 || pGDTE->Gen.u1DefBig != GDTEGuest.Gen.u1DefBig
1577 || pGDTE->Gen.u1DescType != GDTEGuest.Gen.u1DescType)
1578 {
1579 unsigned iGDT = pGDTE - pVM->selm.s.paGdtHC;
1580 SELMR3DumpDescriptor(*pGDTE, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, shadow");
1581 SELMR3DumpDescriptor(GDTEGuest, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, guest");
1582 }
1583 }
1584 }
1585
1586 /* Advance to the next descriptor. */
1587 GCPtrGDTEGuest += sizeof(VBOXDESC);
1588 pGDTE++;
1589 }
1590
1591
1592 /*
1593 * LDT?
1594 */
1595 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1596 if ((SelLdt & X86_SEL_MASK) == 0)
1597 return VINF_SUCCESS;
1598 if (SelLdt > GDTR.cbGdt)
1599 {
1600 Log(("SELMR3DebugCheck: ldt is out of bound SelLdt=%#x\n", SelLdt));
1601 return VERR_INTERNAL_ERROR;
1602 }
1603 VBOXDESC LDTDesc;
1604 int rc = PGMPhysReadGCPtr(pVM, &LDTDesc, GDTR.pGdt + (SelLdt & X86_SEL_MASK), sizeof(LDTDesc));
1605 if (VBOX_FAILURE(rc))
1606 {
1607 Log(("SELMR3DebugCheck: Failed to read LDT descriptor. rc=%d\n", rc));
1608 return rc;
1609 }
1610 RTGCPTR GCPtrLDTEGuest = LDTDesc.Gen.u16BaseLow | (LDTDesc.Gen.u8BaseHigh1 << 16) | (LDTDesc.Gen.u8BaseHigh2 << 24);
1611 unsigned cbLdt = LDTDesc.Gen.u16LimitLow | (LDTDesc.Gen.u4LimitHigh << 16);
1612 if (LDTDesc.Gen.u1Granularity)
1613 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1614
1615 /*
1616 * Validate it.
1617 */
1618 if (!cbLdt)
1619 return VINF_SUCCESS;
1620 /** @todo check what intel does about odd limits. */
1621 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(VBOXDESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1622 if ( LDTDesc.Gen.u1DescType
1623 || LDTDesc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT
1624 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt)
1625 {
1626 Log(("SELmR3DebugCheck: Invalid LDT %04x!\n", SelLdt));
1627 return VERR_INTERNAL_ERROR;
1628 }
1629
1630 /*
1631 * Loop thru the LDT checking each entry.
1632 */
1633 unsigned off = (GCPtrLDTEGuest & PAGE_OFFSET_MASK);
1634 PVBOXDESC pLDTE = (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1635 PVBOXDESC pLDTEEnd = (PVBOXDESC)((uintptr_t)pGDTE + cbLdt);
1636 while (pLDTE < pLDTEEnd)
1637 {
1638 VBOXDESC LDTEGuest;
1639 int rc = PGMPhysReadGCPtr(pVM, &LDTEGuest, GCPtrLDTEGuest, sizeof(LDTEGuest));
1640 if (VBOX_SUCCESS(rc))
1641 {
1642 if ( pLDTE->Gen.u16LimitLow != LDTEGuest.Gen.u16LimitLow
1643 || pLDTE->Gen.u4LimitHigh != LDTEGuest.Gen.u4LimitHigh
1644 || pLDTE->Gen.u16BaseLow != LDTEGuest.Gen.u16BaseLow
1645 || pLDTE->Gen.u8BaseHigh1 != LDTEGuest.Gen.u8BaseHigh1
1646 || pLDTE->Gen.u8BaseHigh2 != LDTEGuest.Gen.u8BaseHigh2
1647 || pLDTE->Gen.u1DefBig != LDTEGuest.Gen.u1DefBig
1648 || pLDTE->Gen.u1DescType != LDTEGuest.Gen.u1DescType)
1649 {
1650 unsigned iLDT = pLDTE - (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1651 SELMR3DumpDescriptor(*pLDTE, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, shadow");
1652 SELMR3DumpDescriptor(LDTEGuest, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, guest");
1653 }
1654 }
1655
1656 /* Advance to the next descriptor. */
1657 GCPtrLDTEGuest += sizeof(VBOXDESC);
1658 pLDTE++;
1659 }
1660
1661#else
1662 NOREF(pVM);
1663#endif
1664
1665 return VINF_SUCCESS;
1666}
1667
1668
1669/**
1670 * Validates the RawR0 TSS values against the one in the Guest TSS.
1671 *
1672 * @returns true if it matches.
1673 * @returns false and assertions on mismatch..
1674 * @param pVM VM Handle.
1675 */
1676SELMR3DECL(bool) SELMR3CheckTSS(PVM pVM)
1677{
1678#ifdef VBOX_STRICT
1679
1680 RTSEL SelTss = CPUMGetGuestTR(pVM);
1681 if (SelTss & X86_SEL_MASK)
1682 {
1683 AssertMsg((SelTss & X86_SEL_MASK) == (pVM->selm.s.GCSelTss & X86_SEL_MASK), ("New TSS selector = %04X, old TSS selector = %04X\n", SelTss, pVM->selm.s.GCSelTss));
1684
1685 /*
1686 * Guest TR is not NULL.
1687 */
1688 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelTss >> X86_SEL_SHIFT];
1689 RTGCPTR GCPtrTss = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1690 unsigned cbTss = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1691 if (pDesc->Gen.u1Granularity)
1692 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1693 cbTss++;
1694 /* Don't bother with anything but the core structure. (Actually all we care for is the r0 ss.) */
1695 if (cbTss > sizeof(VBOXTSS))
1696 cbTss = sizeof(VBOXTSS);
1697 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbTss - 1) >> PAGE_SHIFT),
1698 ("GCPtrTss=%VGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss));
1699
1700 // All system GDTs are marked not present above. That explains why this check fails.
1701 //if (pDesc->Gen.u1Present)
1702 /** @todo Handle only present TSS segments. */
1703 {
1704 /*
1705 * Check if Guest's TSS was changed.
1706 */
1707 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss
1708 || cbTss != pVM->selm.s.cbMonitoredGuestTss)
1709 {
1710 AssertMsgFailed(("Guest's TSS (Sel 0x%X) is changed from %RGv:%04x to %RGv:%04x\n",
1711 SelTss, pVM->selm.s.GCPtrGuestTss, pVM->selm.s.cbMonitoredGuestTss,
1712 GCPtrTss, cbTss));
1713 }
1714 }
1715 }
1716
1717 if (!pVM->selm.s.fSyncTSSRing0Stack)
1718 {
1719 RTGCPTR pGuestTSS = pVM->selm.s.GCPtrGuestTss;
1720 uint32_t ESPR0;
1721 int rc = PGMPhysReadGCPtr(pVM, &ESPR0, pGuestTSS + RT_OFFSETOF(VBOXTSS, esp0), sizeof(ESPR0));
1722 if (VBOX_SUCCESS(rc))
1723 {
1724 RTSEL SelSS0;
1725 rc = PGMPhysReadGCPtr(pVM, &SelSS0, pGuestTSS + RT_OFFSETOF(VBOXTSS, ss0), sizeof(SelSS0));
1726 if (VBOX_SUCCESS(rc))
1727 {
1728 if ( ESPR0 == pVM->selm.s.Tss.esp1
1729 && SelSS0 == (pVM->selm.s.Tss.ss1 & ~1))
1730 return true;
1731
1732 RTGCPHYS GCPhys;
1733 uint64_t fFlags;
1734
1735 rc = PGMGstGetPage(pVM, pGuestTSS, &fFlags, &GCPhys);
1736 AssertRC(rc);
1737 AssertMsgFailed(("TSS out of sync!! (%04X:%08X vs %04X:%08X (guest)) Tss=%VGv Phys=%VGp\n",
1738 (pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1, SelSS0, ESPR0, pGuestTSS, GCPhys));
1739 }
1740 else
1741 AssertRC(rc);
1742 }
1743 else
1744 /* Happens during early Windows XP boot when it is switching page tables. */
1745 Assert(rc == VINF_SUCCESS || ((rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT) && !(CPUMGetGuestEFlags(pVM) & X86_EFL_IF)));
1746 }
1747 return false;
1748#else
1749 NOREF(pVM);
1750 return true;
1751#endif
1752}
1753
1754
1755/**
1756 * Returns flat address and limit of LDT by LDT selector from guest GDTR.
1757 *
1758 * Fully validate selector.
1759 *
1760 * @returns VBox status.
1761 * @param pVM VM Handle.
1762 * @param SelLdt LDT selector.
1763 * @param ppvLdt Where to store the flat address of LDT.
1764 * @param pcbLimit Where to store LDT limit.
1765 */
1766SELMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit)
1767{
1768 /* Get guest GDTR. */
1769 VBOXGDTR GDTR;
1770 CPUMGetGuestGDTR(pVM, &GDTR);
1771
1772 /* Check selector TI and GDT limit. */
1773 if ( SelLdt & X86_SEL_LDT
1774 || (SelLdt > GDTR.cbGdt))
1775 return VERR_INVALID_SELECTOR;
1776
1777 /* Read descriptor from GC. */
1778 VBOXDESC Desc;
1779 int rc = PGMPhysReadGCPtr(pVM, (void *)&Desc, (RTGCPTR)(GDTR.pGdt + (SelLdt & X86_SEL_MASK)), sizeof(Desc));
1780 if (VBOX_FAILURE(rc))
1781 {
1782 /* fatal */
1783 AssertMsgFailed(("Can't read LDT descriptor for selector=%04X\n", SelLdt));
1784 return VERR_SELECTOR_NOT_PRESENT;
1785 }
1786
1787 /* Check if LDT descriptor is not present. */
1788 if (Desc.Gen.u1Present == 0)
1789 return VERR_SELECTOR_NOT_PRESENT;
1790
1791 /* Check LDT descriptor type. */
1792 if ( Desc.Gen.u1DescType == 1
1793 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1794 return VERR_INVALID_SELECTOR;
1795
1796 /* LDT descriptor is ok. */
1797 if (ppvLdt)
1798 {
1799 *ppvLdt = (RTGCPTR)( (Desc.Gen.u8BaseHigh2 << 24)
1800 | (Desc.Gen.u8BaseHigh1 << 16)
1801 | Desc.Gen.u16BaseLow);
1802 *pcbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1803 }
1804 return VINF_SUCCESS;
1805}
1806
1807
1808/**
1809 * Gets information about a selector.
1810 * Intended for the debugger mostly and will prefer the guest
1811 * descriptor tables over the shadow ones.
1812 *
1813 * @returns VINF_SUCCESS on success.
1814 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table.
1815 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present.
1816 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page
1817 * backing the selector table wasn't present.
1818 * @returns Other VBox status code on other errors.
1819 *
1820 * @param pVM VM handle.
1821 * @param Sel The selector to get info about.
1822 * @param pSelInfo Where to store the information.
1823 */
1824SELMR3DECL(int) SELMR3GetSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo)
1825{
1826 Assert(pSelInfo);
1827
1828 /*
1829 * Read the descriptor entry
1830 */
1831 VBOXDESC Desc;
1832 if ( !(Sel & X86_SEL_LDT)
1833 && ( pVM->selm.s.SelCS == (Sel & X86_SEL_MASK)
1834 || pVM->selm.s.SelDS == (Sel & X86_SEL_MASK)
1835 || pVM->selm.s.SelCS64 == (Sel & X86_SEL_MASK)
1836 || pVM->selm.s.SelTSS == (Sel & X86_SEL_MASK)
1837 || pVM->selm.s.SelTSSTrap08 == (Sel & X86_SEL_MASK))
1838 )
1839 {
1840 /*
1841 * Hypervisor descriptor.
1842 */
1843 pSelInfo->fHyper = true;
1844 Desc = pVM->selm.s.paGdtHC[Sel >> X86_SEL_SHIFT];
1845 }
1846 else if (CPUMIsGuestInProtectedMode(pVM))
1847 {
1848 /*
1849 * Read it from the guest descriptor table.
1850 */
1851 pSelInfo->fHyper = false;
1852
1853 VBOXGDTR Gdtr;
1854 RTGCPTR GCPtrDesc;
1855 CPUMGetGuestGDTR(pVM, &Gdtr);
1856 if (!(Sel & X86_SEL_LDT))
1857 {
1858 /* GDT */
1859 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > (unsigned)Gdtr.cbGdt)
1860 return VERR_INVALID_SELECTOR;
1861 GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
1862 }
1863 else
1864 {
1865 /*
1866 * LDT - must locate the LDT first...
1867 */
1868 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1869 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(VBOXDESC) /* the first selector is invalid, right? */
1870 || (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > (unsigned)Gdtr.cbGdt)
1871 return VERR_INVALID_SELECTOR;
1872 GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);
1873 int rc = PGMPhysReadGCPtr(pVM, &Desc, GCPtrDesc, sizeof(Desc));
1874 if (VBOX_FAILURE(rc))
1875 return rc;
1876
1877 /* validate the LDT descriptor. */
1878 if (Desc.Gen.u1Present == 0)
1879 return VERR_SELECTOR_NOT_PRESENT;
1880 if ( Desc.Gen.u1DescType == 1
1881 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1882 return VERR_INVALID_SELECTOR;
1883
1884 unsigned cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1885 if (Desc.Gen.u1Granularity)
1886 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1887 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > cbLimit)
1888 return VERR_INVALID_SELECTOR;
1889
1890 /* calc the descriptor location. */
1891 GCPtrDesc = (Desc.Gen.u8BaseHigh2 << 24)
1892 | (Desc.Gen.u8BaseHigh1 << 16)
1893 | Desc.Gen.u16BaseLow;
1894 GCPtrDesc += (Sel & X86_SEL_MASK);
1895 }
1896
1897 /* read the descriptor. */
1898 int rc = PGMPhysReadGCPtr(pVM, &Desc, GCPtrDesc, sizeof(Desc));
1899 if (VBOX_FAILURE(rc))
1900 return rc;
1901 }
1902 else
1903 {
1904 /*
1905 * We're in real mode.
1906 */
1907 pSelInfo->Sel = Sel;
1908 pSelInfo->GCPtrBase = Sel << 4;
1909 pSelInfo->cbLimit = 0xffff;
1910 pSelInfo->fHyper = false;
1911 pSelInfo->fRealMode = true;
1912 memset(&pSelInfo->Raw, 0, sizeof(pSelInfo->Raw));
1913 return VINF_SUCCESS;
1914 }
1915
1916 /*
1917 * Extract the base and limit
1918 */
1919 pSelInfo->Sel = Sel;
1920 pSelInfo->Raw = Desc;
1921 pSelInfo->cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1922 if (Desc.Gen.u1Granularity)
1923 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1924 pSelInfo->GCPtrBase = (Desc.Gen.u8BaseHigh2 << 24)
1925 | (Desc.Gen.u8BaseHigh1 << 16)
1926 | Desc.Gen.u16BaseLow;
1927 pSelInfo->fRealMode = false;
1928
1929 return VINF_SUCCESS;
1930}
1931
1932
1933/**
1934 * Gets information about a selector from the shadow tables.
1935 *
1936 * This is intended to be faster than the SELMR3GetSelectorInfo() method, but requires
1937 * that the caller ensures that the shadow tables are up to date.
1938 *
1939 * @returns VINF_SUCCESS on success.
1940 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table.
1941 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present.
1942 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page
1943 * backing the selector table wasn't present.
1944 * @returns Other VBox status code on other errors.
1945 *
1946 * @param pVM VM handle.
1947 * @param Sel The selector to get info about.
1948 * @param pSelInfo Where to store the information.
1949 */
1950SELMR3DECL(int) SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo)
1951{
1952 Assert(pSelInfo);
1953
1954 /*
1955 * Read the descriptor entry
1956 */
1957 VBOXDESC Desc;
1958 if (!(Sel & X86_SEL_LDT))
1959 {
1960 /*
1961 * Global descriptor.
1962 */
1963 Desc = pVM->selm.s.paGdtHC[Sel >> X86_SEL_SHIFT];
1964 pSelInfo->fHyper = pVM->selm.s.SelCS == (Sel & X86_SEL_MASK)
1965 || pVM->selm.s.SelDS == (Sel & X86_SEL_MASK)
1966 || pVM->selm.s.SelCS64 == (Sel & X86_SEL_MASK)
1967 || pVM->selm.s.SelTSS == (Sel & X86_SEL_MASK)
1968 || pVM->selm.s.SelTSSTrap08 == (Sel & X86_SEL_MASK);
1969 /** @todo check that the GDT offset is valid. */
1970 }
1971 else
1972 {
1973 /*
1974 * Local Descriptor.
1975 */
1976 PVBOXDESC paLDT = (PVBOXDESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper);
1977 Desc = paLDT[Sel >> X86_SEL_SHIFT];
1978 /** @todo check if the LDT page is actually available. */
1979 /** @todo check that the LDT offset is valid. */
1980 pSelInfo->fHyper = false;
1981 }
1982
1983 /*
1984 * Extract the base and limit
1985 */
1986 pSelInfo->Sel = Sel;
1987 pSelInfo->Raw = Desc;
1988 pSelInfo->cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1989 if (Desc.Gen.u1Granularity)
1990 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1991 pSelInfo->GCPtrBase = (Desc.Gen.u8BaseHigh2 << 24)
1992 | (Desc.Gen.u8BaseHigh1 << 16)
1993 | Desc.Gen.u16BaseLow;
1994 pSelInfo->fRealMode = false;
1995
1996 return VINF_SUCCESS;
1997}
1998
1999
2000/**
2001 * Formats a descriptor.
2002 *
2003 * @param Desc Descriptor to format.
2004 * @param Sel Selector number.
2005 * @param pszOutput Output buffer.
2006 * @param cchOutput Size of output buffer.
2007 */
2008static void selmR3FormatDescriptor(VBOXDESC Desc, RTSEL Sel, char *pszOutput, size_t cchOutput)
2009{
2010 /*
2011 * Make variable description string.
2012 */
2013 static struct
2014 {
2015 unsigned cch;
2016 const char *psz;
2017 } const aTypes[32] =
2018 {
2019 #define STRENTRY(str) { sizeof(str) - 1, str }
2020 /* system */
2021 STRENTRY("Reserved0 "), /* 0x00 */
2022 STRENTRY("TSS16Avail "), /* 0x01 */
2023 STRENTRY("LDT "), /* 0x02 */
2024 STRENTRY("TSS16Busy "), /* 0x03 */
2025 STRENTRY("Call16 "), /* 0x04 */
2026 STRENTRY("Task "), /* 0x05 */
2027 STRENTRY("Int16 "), /* 0x06 */
2028 STRENTRY("Trap16 "), /* 0x07 */
2029 STRENTRY("Reserved8 "), /* 0x08 */
2030 STRENTRY("TSS32Avail "), /* 0x09 */
2031 STRENTRY("ReservedA "), /* 0x0a */
2032 STRENTRY("TSS32Busy "), /* 0x0b */
2033 STRENTRY("Call32 "), /* 0x0c */
2034 STRENTRY("ReservedD "), /* 0x0d */
2035 STRENTRY("Int32 "), /* 0x0e */
2036 STRENTRY("Trap32 "), /* 0x0f */
2037 /* non system */
2038 STRENTRY("DataRO "), /* 0x10 */
2039 STRENTRY("DataRO Accessed "), /* 0x11 */
2040 STRENTRY("DataRW "), /* 0x12 */
2041 STRENTRY("DataRW Accessed "), /* 0x13 */
2042 STRENTRY("DataDownRO "), /* 0x14 */
2043 STRENTRY("DataDownRO Accessed "), /* 0x15 */
2044 STRENTRY("DataDownRW "), /* 0x16 */
2045 STRENTRY("DataDownRW Accessed "), /* 0x17 */
2046 STRENTRY("CodeEO "), /* 0x18 */
2047 STRENTRY("CodeEO Accessed "), /* 0x19 */
2048 STRENTRY("CodeER "), /* 0x1a */
2049 STRENTRY("CodeER Accessed "), /* 0x1b */
2050 STRENTRY("CodeConfEO "), /* 0x1c */
2051 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
2052 STRENTRY("CodeConfER "), /* 0x1e */
2053 STRENTRY("CodeConfER Accessed ") /* 0x1f */
2054 #undef SYSENTRY
2055 };
2056 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
2057 char szMsg[128];
2058 char *psz = &szMsg[0];
2059 unsigned i = Desc.Gen.u1DescType << 4 | Desc.Gen.u4Type;
2060 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
2061 psz += aTypes[i].cch;
2062
2063 if (Desc.Gen.u1Present)
2064 ADD_STR(psz, "Present ");
2065 else
2066 ADD_STR(psz, "Not-Present ");
2067 if (Desc.Gen.u1Granularity)
2068 ADD_STR(psz, "Page ");
2069 if (Desc.Gen.u1DefBig)
2070 ADD_STR(psz, "32-bit ");
2071 else
2072 ADD_STR(psz, "16-bit ");
2073 #undef ADD_STR
2074 *psz = '\0';
2075
2076 /*
2077 * Limit and Base and format the output.
2078 */
2079 uint32_t u32Limit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
2080 if (Desc.Gen.u1Granularity)
2081 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
2082 uint32_t u32Base = Desc.Gen.u8BaseHigh2 << 24 | Desc.Gen.u8BaseHigh1 << 16 | Desc.Gen.u16BaseLow;
2083
2084 RTStrPrintf(pszOutput, cchOutput, "%04x - %08x %08x - base=%08x limit=%08x dpl=%d %s",
2085 Sel, Desc.au32[0], Desc.au32[1], u32Base, u32Limit, Desc.Gen.u2Dpl, szMsg);
2086}
2087
2088
2089/**
2090 * Dumps a descriptor.
2091 *
2092 * @param Desc Descriptor to dump.
2093 * @param Sel Selector number.
2094 * @param pszMsg Message to prepend the log entry with.
2095 */
2096SELMR3DECL(void) SELMR3DumpDescriptor(VBOXDESC Desc, RTSEL Sel, const char *pszMsg)
2097{
2098 char szOutput[128];
2099 selmR3FormatDescriptor(Desc, Sel, &szOutput[0], sizeof(szOutput));
2100 Log(("%s: %s\n", pszMsg, szOutput));
2101 NOREF(szOutput[0]);
2102}
2103
2104
2105/**
2106 * Display the shadow gdt.
2107 *
2108 * @param pVM VM Handle.
2109 * @param pHlp The info helpers.
2110 * @param pszArgs Arguments, ignored.
2111 */
2112static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2113{
2114 pHlp->pfnPrintf(pHlp, "Shadow GDT (GCAddr=%VGv):\n", MMHyperHC2GC(pVM, pVM->selm.s.paGdtHC));
2115 for (unsigned iGDT = 0; iGDT < SELM_GDT_ELEMENTS; iGDT++)
2116 {
2117 if (pVM->selm.s.paGdtHC[iGDT].Gen.u1Present)
2118 {
2119 char szOutput[128];
2120 selmR3FormatDescriptor(pVM->selm.s.paGdtHC[iGDT], iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2121 const char *psz = "";
2122 if (iGDT == ((unsigned)pVM->selm.s.SelCS >> X86_SEL_SHIFT))
2123 psz = " HyperCS";
2124 else if (iGDT == ((unsigned)pVM->selm.s.SelDS >> X86_SEL_SHIFT))
2125 psz = " HyperDS";
2126 else if (iGDT == ((unsigned)pVM->selm.s.SelCS64 >> X86_SEL_SHIFT))
2127 psz = " HyperCS64";
2128 else if (iGDT == ((unsigned)pVM->selm.s.SelTSS >> X86_SEL_SHIFT))
2129 psz = " HyperTSS";
2130 else if (iGDT == ((unsigned)pVM->selm.s.SelTSSTrap08 >> X86_SEL_SHIFT))
2131 psz = " HyperTSSTrap08";
2132 pHlp->pfnPrintf(pHlp, "%s%s\n", szOutput, psz);
2133 }
2134 }
2135}
2136
2137
2138/**
2139 * Display the guest gdt.
2140 *
2141 * @param pVM VM Handle.
2142 * @param pHlp The info helpers.
2143 * @param pszArgs Arguments, ignored.
2144 */
2145static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2146{
2147 VBOXGDTR GDTR;
2148 CPUMGetGuestGDTR(pVM, &GDTR);
2149 RTGCPTR pGDTGC = (RTGCPTR)GDTR.pGdt;
2150 unsigned cGDTs = ((unsigned)GDTR.cbGdt + 1) / sizeof(VBOXDESC);
2151
2152 pHlp->pfnPrintf(pHlp, "Guest GDT (GCAddr=%VGv limit=%x):\n", pGDTGC, GDTR.cbGdt);
2153 for (unsigned iGDT = 0; iGDT < cGDTs; iGDT++, pGDTGC += sizeof(VBOXDESC))
2154 {
2155 VBOXDESC GDTE;
2156 int rc = PGMPhysReadGCPtr(pVM, &GDTE, pGDTGC, sizeof(GDTE));
2157 if (VBOX_SUCCESS(rc))
2158 {
2159 if (GDTE.Gen.u1Present)
2160 {
2161 char szOutput[128];
2162 selmR3FormatDescriptor(GDTE, iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2163 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2164 }
2165 }
2166 else if (rc == VERR_PAGE_NOT_PRESENT)
2167 {
2168 if ((pGDTGC & PAGE_OFFSET_MASK) + sizeof(VBOXDESC) - 1 < sizeof(VBOXDESC))
2169 pHlp->pfnPrintf(pHlp, "%04 - page not present (GCAddr=%VGv)\n", iGDT << X86_SEL_SHIFT, pGDTGC);
2170 }
2171 else
2172 pHlp->pfnPrintf(pHlp, "%04 - read error rc=%Vrc GCAddr=%VGv\n", iGDT << X86_SEL_SHIFT, rc, pGDTGC);
2173 }
2174}
2175
2176
2177/**
2178 * Display the shadow ldt.
2179 *
2180 * @param pVM VM Handle.
2181 * @param pHlp The info helpers.
2182 * @param pszArgs Arguments, ignored.
2183 */
2184static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2185{
2186 unsigned cLDTs = ((unsigned)pVM->selm.s.cbLdtLimit + 1) >> X86_SEL_SHIFT;
2187 PVBOXDESC paLDT = (PVBOXDESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper);
2188 pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=%VGv limit=%d):\n", pVM->selm.s.GCPtrLdt + pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit);
2189 for (unsigned iLDT = 0; iLDT < cLDTs; iLDT++)
2190 {
2191 if (paLDT[iLDT].Gen.u1Present)
2192 {
2193 char szOutput[128];
2194 selmR3FormatDescriptor(paLDT[iLDT], (iLDT << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2195 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2196 }
2197 }
2198}
2199
2200
2201/**
2202 * Display the guest ldt.
2203 *
2204 * @param pVM VM Handle.
2205 * @param pHlp The info helpers.
2206 * @param pszArgs Arguments, ignored.
2207 */
2208static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2209{
2210 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
2211 if (!(SelLdt & X86_SEL_MASK))
2212 {
2213 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): Null-Selector\n", SelLdt);
2214 return;
2215 }
2216
2217 RTGCPTR pLdtGC;
2218 unsigned cbLdt;
2219 int rc = SELMGetLDTFromSel(pVM, SelLdt, &pLdtGC, &cbLdt);
2220 if (VBOX_FAILURE(rc))
2221 {
2222 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): rc=%Vrc\n", SelLdt, rc);
2223 return;
2224 }
2225
2226 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%VGv limit=%x):\n", SelLdt, pLdtGC, cbLdt);
2227 unsigned cLdts = (cbLdt + 1) >> X86_SEL_SHIFT;
2228 for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, pLdtGC += sizeof(VBOXDESC))
2229 {
2230 VBOXDESC LdtE;
2231 int rc = PGMPhysReadGCPtr(pVM, &LdtE, pLdtGC, sizeof(LdtE));
2232 if (VBOX_SUCCESS(rc))
2233 {
2234 if (LdtE.Gen.u1Present)
2235 {
2236 char szOutput[128];
2237 selmR3FormatDescriptor(LdtE, (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2238 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2239 }
2240 }
2241 else if (rc == VERR_PAGE_NOT_PRESENT)
2242 {
2243 if ((pLdtGC & PAGE_OFFSET_MASK) + sizeof(VBOXDESC) - 1 < sizeof(VBOXDESC))
2244 pHlp->pfnPrintf(pHlp, "%04 - page not present (GCAddr=%VGv)\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, pLdtGC);
2245 }
2246 else
2247 pHlp->pfnPrintf(pHlp, "%04 - read error rc=%Vrc GCAddr=%VGv\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, rc, pLdtGC);
2248 }
2249}
2250
2251
2252/**
2253 * Dumps the hypervisor GDT
2254 *
2255 * @param pVM VM handle.
2256 */
2257SELMR3DECL(void) SELMR3DumpHyperGDT(PVM pVM)
2258{
2259 DBGFR3Info(pVM, "gdt", NULL, NULL);
2260}
2261
2262/**
2263 * Dumps the hypervisor LDT
2264 *
2265 * @param pVM VM handle.
2266 */
2267SELMR3DECL(void) SELMR3DumpHyperLDT(PVM pVM)
2268{
2269 DBGFR3Info(pVM, "ldt", NULL, NULL);
2270}
2271
2272/**
2273 * Dumps the guest GDT
2274 *
2275 * @param pVM VM handle.
2276 */
2277SELMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM)
2278{
2279 DBGFR3Info(pVM, "gdtguest", NULL, NULL);
2280}
2281
2282/**
2283 * Dumps the guest LDT
2284 *
2285 * @param pVM VM handle.
2286 */
2287SELMR3DECL(void) SELMR3DumpGuestLDT(PVM pVM)
2288{
2289 DBGFR3Info(pVM, "ldtguest", NULL, NULL);
2290}
2291
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette