VirtualBox

source: vbox/trunk/src/VBox/VMM/SELM.cpp@ 28

Last change on this file since 28 was 23, checked in by vboxsync, 18 years ago

string.h & stdio.h + header cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 81.4 KB
Line 
1/* $Id: SELM.cpp 23 2007-01-15 14:08:28Z vboxsync $ */
2/** @file
3 * SELM - The Selector manager.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_SELM
26#include <VBox/selm.h>
27#include <VBox/cpum.h>
28#include <VBox/stam.h>
29#include <VBox/mm.h>
30#include <VBox/pdm.h>
31#include <VBox/pgm.h>
32#include <VBox/trpm.h>
33#include <VBox/dbgf.h>
34#include "SELMInternal.h"
35#include <VBox/vm.h>
36#include <VBox/err.h>
37#include <VBox/param.h>
38
39#include <iprt/assert.h>
40#include <VBox/log.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/string.h>
45#include "x86context.h"
46
47
48/**
49 * Enable or disable tracking of Guest's GDT/LDT/TSS.
50 * @{
51 */
52#define SELM_TRACK_GUEST_GDT_CHANGES
53#define SELM_TRACK_GUEST_LDT_CHANGES
54#define SELM_TRACK_GUEST_TSS_CHANGES
55/** @} */
56
57/**
58 * Enable or disable tracking of Shadow GDT/LDT/TSS.
59 * @{
60 */
61#define SELM_TRACK_SHADOW_GDT_CHANGES
62#define SELM_TRACK_SHADOW_LDT_CHANGES
63#define SELM_TRACK_SHADOW_TSS_CHANGES
64/** @} */
65
66
67/** SELM saved state version. */
68#define SELM_SAVED_STATE_VERSION 4
69
70/*******************************************************************************
71* Internal Functions *
72*******************************************************************************/
73static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM);
74static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
75static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
76static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
77static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
78static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
79//static DECLCALLBACK(void) selmR3InfoTss(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
80//static DECLCALLBACK(void) selmR3InfoTssGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
81static DECLCALLBACK(int) selmGuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
82static DECLCALLBACK(int) selmGuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
83static DECLCALLBACK(int) selmGuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
84
85
86
87/**
88 * Initializes the SELM.
89 *
90 * @returns VBox status code.
91 * @param pVM The VM to operate on.
92 */
93SELMR3DECL(int) SELMR3Init(PVM pVM)
94{
95 LogFlow(("SELMR3Init\n"));
96
97 /*
98 * Assert alignment and sizes.
99 */
100 AssertRelease(!(RT_OFFSETOF(VM, selm.s) & 31));
101 AssertRelease(!(RT_OFFSETOF(VM, selm.s.Tss) & 15));
102 AssertRelease(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding));
103
104 /*
105 * Init the structure.
106 */
107 pVM->selm.s.offVM = RT_OFFSETOF(VM, selm);
108 pVM->selm.s.SelCS = (SELM_GDT_ELEMENTS - 0x1) << 3;
109 pVM->selm.s.SelDS = (SELM_GDT_ELEMENTS - 0x2) << 3;
110 pVM->selm.s.SelCS64 = (SELM_GDT_ELEMENTS - 0x3) << 3;
111 pVM->selm.s.SelTSS = (SELM_GDT_ELEMENTS - 0x4) << 3;
112 pVM->selm.s.SelTSSTrap08 = (SELM_GDT_ELEMENTS - 0x5) << 3;
113
114 /*
115 * Allocate GDT table.
116 */
117 int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtHC[0]) * SELM_GDT_ELEMENTS,
118 PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdtHC);
119 AssertRCReturn(rc, rc);
120
121 /*
122 * Allocate LDT area.
123 */
124 rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.HCPtrLdt);
125 AssertRCReturn(rc, rc);
126
127 /*
128 * Init Guest's and Shadow GDT, LDT, TSS changes control variables.
129 */
130 pVM->selm.s.cbEffGuestGdtLimit = 0;
131 pVM->selm.s.GuestGdtr.pGdt = ~0;
132 pVM->selm.s.GCPtrGuestLdt = ~0;
133 pVM->selm.s.GCPtrGuestTss = ~0;
134
135 pVM->selm.s.paGdtGC = 0;
136 pVM->selm.s.GCPtrLdt = ~0;
137 pVM->selm.s.GCPtrTss = ~0;
138 pVM->selm.s.GCSelTss = ~0;
139
140 pVM->selm.s.fDisableMonitoring = false;
141
142 /*
143 * Register the saved state data unit.
144 */
145 rc = SSMR3RegisterInternal(pVM, "selm", 1, SELM_SAVED_STATE_VERSION, sizeof(SELM),
146 NULL, selmR3Save, NULL,
147 NULL, selmR3Load, NULL);
148 if (VBOX_FAILURE(rc))
149 return rc;
150
151 /*
152 * Statistics.
153 */
154 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT.");
155 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT.");
156 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected.");
157 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS.");
158 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
159 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS.");
160 STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
161 STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
162
163 /*
164 * Default action when entering raw mode for the first time
165 */
166 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
167 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
168 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
169
170 /*
171 * Register info handlers.
172 */
173 DBGFR3InfoRegisterInternal(pVM, "gdt", "Displays the shadow GDT. No arguments.", &selmR3InfoGdt);
174 DBGFR3InfoRegisterInternal(pVM, "gdtguest", "Displays the guest GDT. No arguments.", &selmR3InfoGdtGuest);
175 DBGFR3InfoRegisterInternal(pVM, "ldt", "Displays the shadow LDT. No arguments.", &selmR3InfoLdt);
176 DBGFR3InfoRegisterInternal(pVM, "ldtguest", "Displays the guest LDT. No arguments.", &selmR3InfoLdtGuest);
177 //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss);
178 //DBGFR3InfoRegisterInternal(pVM, "tssguest", "Displays the guest TSS. No arguments.", &selmR3InfoTssGuest);
179
180 return rc;
181}
182
183
184/**
185 * Finalizes HMA page attributes.
186 *
187 * @returns VBox status code.
188 * @param pVM The VM handle.
189 */
190SELMR3DECL(int) SELMR3InitFinalize(PVM pVM)
191{
192 /*
193 * Make Double Fault work with WP enabled?
194 *
195 * The double fault is a task switch and thus requires write access to the GDT of the TSS
196 * (to set it busy), to the old TSS (to store state), and to the Trap 8 TSS for the back link.
197 *
198 * Since we in enabling write access to these pages make ourself vulnerable to attacks,
199 * it is not possible to do this by default.
200 */
201 bool f;
202 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f);
203#if !defined(DEBUG_bird)
204 if (VBOX_SUCCESS(rc) && f)
205#endif
206 {
207 PVBOXDESC paGdt = pVM->selm.s.paGdtHC;
208 rc = PGMMapSetPage(pVM, MMHyperHC2GC(pVM, &paGdt[pVM->selm.s.SelTSSTrap08 >> 3]), sizeof(paGdt[0]),
209 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
210 AssertRC(rc);
211 rc = PGMMapSetPage(pVM, MMHyperHC2GC(pVM, &paGdt[pVM->selm.s.SelTSS >> 3]), sizeof(paGdt[0]),
212 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
213 AssertRC(rc);
214 rc = PGMMapSetPage(pVM, VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss), sizeof(pVM->selm.s.Tss),
215 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
216 AssertRC(rc);
217 rc = PGMMapSetPage(pVM, VM_GUEST_ADDR(pVM, &pVM->selm.s.TssTrap08), sizeof(pVM->selm.s.TssTrap08),
218 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
219 AssertRC(rc);
220 }
221 return VINF_SUCCESS;
222}
223
224
225/**
226 * Applies relocations to data and code managed by this
227 * component. This function will be called at init and
228 * whenever the VMM need to relocate it self inside the GC.
229 *
230 * @param pVM The VM.
231 */
232SELMR3DECL(void) SELMR3Relocate(PVM pVM)
233{
234 LogFlow(("SELMR3Relocate\n"));
235 PVBOXDESC paGdt = pVM->selm.s.paGdtHC;
236
237 /*
238 * Update GDTR and selector.
239 */
240 CPUMSetHyperGDTR(pVM, MMHyperHC2GC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1);
241
242 /** @todo selector relocations should be a seperate operation? */
243 CPUMSetHyperCS(pVM, pVM->selm.s.SelCS);
244 CPUMSetHyperDS(pVM, pVM->selm.s.SelDS);
245 CPUMSetHyperES(pVM, pVM->selm.s.SelDS);
246 CPUMSetHyperSS(pVM, pVM->selm.s.SelDS);
247 CPUMSetHyperTR(pVM, pVM->selm.s.SelTSS);
248
249 /*
250 * Set up global code and data descriptors for use in the guest context.
251 * Both are wide open (base 0, limit 4GB)
252 */
253 PVBOXDESC pDesc = &paGdt[pVM->selm.s.SelCS >> 3];
254 pDesc->Gen.u16LimitLow = 0xffff;
255 pDesc->Gen.u4LimitHigh = 0xf;
256 pDesc->Gen.u16BaseLow = 0;
257 pDesc->Gen.u8BaseHigh1 = 0;
258 pDesc->Gen.u8BaseHigh2 = 0;
259 pDesc->Gen.u4Type = X86_SELTYPE_MEM_EXECUTEREAD_ACC;
260 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
261 pDesc->Gen.u2Dpl = 0; /* supervisor */
262 pDesc->Gen.u1Present = 1;
263 pDesc->Gen.u1Available = 0;
264 pDesc->Gen.u1Reserved = 0;
265 pDesc->Gen.u1DefBig = 1; /* def 32 bit */
266 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
267
268 /* data */
269 pDesc = &paGdt[pVM->selm.s.SelDS >> 3];
270 pDesc->Gen.u16LimitLow = 0xffff;
271 pDesc->Gen.u4LimitHigh = 0xf;
272 pDesc->Gen.u16BaseLow = 0;
273 pDesc->Gen.u8BaseHigh1 = 0;
274 pDesc->Gen.u8BaseHigh2 = 0;
275 pDesc->Gen.u4Type = X86_SELTYPE_MEM_READWRITE_ACC;
276 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
277 pDesc->Gen.u2Dpl = 0; /* supervisor */
278 pDesc->Gen.u1Present = 1;
279 pDesc->Gen.u1Available = 0;
280 pDesc->Gen.u1Reserved = 0;
281 pDesc->Gen.u1DefBig = 1; /* big */
282 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
283
284 /* 64-bit mode code (& data?) */
285 pDesc = &paGdt[pVM->selm.s.SelCS64 >> 3];
286 pDesc->Gen.u16LimitLow = 0xffff;
287 pDesc->Gen.u4LimitHigh = 0xf;
288 pDesc->Gen.u16BaseLow = 0;
289 pDesc->Gen.u8BaseHigh1 = 0;
290 pDesc->Gen.u8BaseHigh2 = 0;
291 pDesc->Gen.u4Type = X86_SELTYPE_MEM_EXECUTEREAD_ACC;
292 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
293 pDesc->Gen.u2Dpl = 0; /* supervisor */
294 pDesc->Gen.u1Present = 1;
295 pDesc->Gen.u1Available = 0;
296 pDesc->Gen.u1Reserved = 1; /* The Long (L) attribute bit. */
297 pDesc->Gen.u1DefBig = 0; /* With L=1 this must be 0. */
298 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
299
300 /*
301 * TSS descriptor
302 */
303 pDesc = &paGdt[pVM->selm.s.SelTSS >> 3];
304 RTGCPTR pGCTSS = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss);
305 pDesc->Gen.u16BaseLow = RT_LOWORD(pGCTSS);
306 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(pGCTSS);
307 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(pGCTSS);
308 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
309 pDesc->Gen.u4LimitHigh = 0;
310 pDesc->Gen.u4Type = X86_SELTYPE_SYS_386_TSS_AVAIL;
311 pDesc->Gen.u1DescType = 0; /* system */
312 pDesc->Gen.u2Dpl = 0; /* supervisor */
313 pDesc->Gen.u1Present = 1;
314 pDesc->Gen.u1Available = 0;
315 pDesc->Gen.u1Reserved = 0;
316 pDesc->Gen.u1DefBig = 0;
317 pDesc->Gen.u1Granularity = 0; /* byte limit */
318
319 /*
320 * TSS descriptor for trap 08
321 */
322 pDesc = &paGdt[pVM->selm.s.SelTSSTrap08 >> 3];
323 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
324 pDesc->Gen.u4LimitHigh = 0;
325 pGCTSS = VM_GUEST_ADDR(pVM, &pVM->selm.s.TssTrap08);
326 pDesc->Gen.u16BaseLow = RT_LOWORD(pGCTSS);
327 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(pGCTSS);
328 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(pGCTSS);
329 pDesc->Gen.u4Type = X86_SELTYPE_SYS_386_TSS_AVAIL;
330 pDesc->Gen.u1DescType = 0; /* system */
331 pDesc->Gen.u2Dpl = 0; /* supervisor */
332 pDesc->Gen.u1Present = 1;
333 pDesc->Gen.u1Available = 0;
334 pDesc->Gen.u1Reserved = 0;
335 pDesc->Gen.u1DefBig = 0;
336 pDesc->Gen.u1Granularity = 0; /* byte limit */
337
338/** @todo SELM must be called when any of the CR3s changes during a cpu mode change. */
339/** @todo PGM knows the proper CR3 values these days, not CPUM. */
340 /*
341 * Update the TSSes.
342 */
343 /* Current TSS */
344 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVM);
345 pVM->selm.s.Tss.ss0 = pVM->selm.s.SelDS;
346 pVM->selm.s.Tss.esp0 = VMMGetStackGC(pVM);
347 pVM->selm.s.Tss.cs = pVM->selm.s.SelCS;
348 pVM->selm.s.Tss.ds = pVM->selm.s.SelDS;
349 pVM->selm.s.Tss.es = pVM->selm.s.SelDS;
350 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
351
352 /* trap 08 */
353 pVM->selm.s.TssTrap08.cr3 = PGMGetInterGCCR3(pVM); /* this should give use better survival chances. */
354 pVM->selm.s.TssTrap08.ss0 = pVM->selm.s.SelDS;
355 pVM->selm.s.TssTrap08.ss = pVM->selm.s.SelDS;
356 pVM->selm.s.TssTrap08.esp0 = VMMGetStackGC(pVM) - PAGE_SIZE / 2; /* upper half can be analysed this way. */
357 pVM->selm.s.TssTrap08.esp = pVM->selm.s.TssTrap08.esp0;
358 pVM->selm.s.TssTrap08.ebp = pVM->selm.s.TssTrap08.esp0;
359 pVM->selm.s.TssTrap08.cs = pVM->selm.s.SelCS;
360 pVM->selm.s.TssTrap08.ds = pVM->selm.s.SelDS;
361 pVM->selm.s.TssTrap08.es = pVM->selm.s.SelDS;
362 pVM->selm.s.TssTrap08.fs = 0;
363 pVM->selm.s.TssTrap08.gs = 0;
364 pVM->selm.s.TssTrap08.selLdt = 0;
365 pVM->selm.s.TssTrap08.eflags = 0x2; /* all cleared */
366 pVM->selm.s.TssTrap08.ecx = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */
367 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.ecx;
368 pVM->selm.s.TssTrap08.eax = pVM->selm.s.TssTrap08.ecx;
369 pVM->selm.s.TssTrap08.edx = VM_GUEST_ADDR(pVM, pVM); /* setup edx VM address. */
370 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.edx;
371 pVM->selm.s.TssTrap08.ebx = pVM->selm.s.TssTrap08.edx;
372 pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);
373 /* TRPM will be updating the eip */
374
375 if (!pVM->selm.s.fDisableMonitoring)
376 {
377 /*
378 * Update shadow GDT/LDT/TSS write access handlers.
379 */
380 int rc;
381#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
382 if (pVM->selm.s.paGdtGC != 0)
383 {
384 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtGC);
385 AssertRC(rc);
386 }
387 pVM->selm.s.paGdtGC = MMHyperHC2GC(pVM, paGdt);
388 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.paGdtGC,
389 pVM->selm.s.paGdtGC + SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1,
390 0, 0, "selmgcShadowGDTWriteHandler", 0, "Shadow GDT write access handler");
391 AssertRC(rc);
392#endif
393#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
394 if (pVM->selm.s.GCPtrTss != ~0U)
395 {
396 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrTss);
397 AssertRC(rc);
398 }
399 pVM->selm.s.GCPtrTss = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss);
400 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.GCPtrTss,
401 pVM->selm.s.GCPtrTss + sizeof(pVM->selm.s.Tss) - 1,
402 0, 0, "selmgcShadowTSSWriteHandler", 0, "Shadow TSS write access handler");
403 AssertRC(rc);
404#endif
405
406 /*
407 * Update the GC LDT region handler and address.
408 */
409#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
410 if (pVM->selm.s.GCPtrLdt != ~0U)
411 {
412 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrLdt);
413 AssertRC(rc);
414 }
415#endif
416 pVM->selm.s.GCPtrLdt = MMHyperHC2GC(pVM, pVM->selm.s.HCPtrLdt);
417#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
418 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.GCPtrLdt,
419 pVM->selm.s.GCPtrLdt + _64K + PAGE_SIZE - 1,
420 0, 0, "selmgcShadowLDTWriteHandler", 0, "Shadow LDT write access handler");
421 AssertRC(rc);
422#endif
423 }
424}
425
426
427/**
428 * Notification callback which is called whenever there is a chance that a CR3
429 * value might have changed.
430 * This is called by PGM.
431 *
432 * @param pVM The VM handle
433 */
434SELMR3DECL(void) SELMR3PagingModeChanged(PVM pVM)
435{
436 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVM);
437 pVM->selm.s.TssTrap08.cr3 = PGMGetInterGCCR3(pVM);
438}
439
440
441/**
442 * Terminates the SELM.
443 *
444 * Termination means cleaning up and freeing all resources,
445 * the VM it self is at this point powered off or suspended.
446 *
447 * @returns VBox status code.
448 * @param pVM The VM to operate on.
449 */
450SELMR3DECL(int) SELMR3Term(PVM pVM)
451{
452 return 0;
453}
454
455
456/**
457 * The VM is being reset.
458 *
459 * For the SELM component this means that any GDT/LDT/TSS monitors
460 * needs to be removed.
461 *
462 * @param pVM VM handle.
463 */
464SELMR3DECL(void) SELMR3Reset(PVM pVM)
465{
466 LogFlow(("SELMR3Reset:\n"));
467 VM_ASSERT_EMT(pVM);
468
469 /*
470 * Uninstall guest GDT/LDT/TSS write access handlers.
471 */
472 int rc;
473#ifdef SELM_TRACK_GUEST_GDT_CHANGES
474 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
475 {
476 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
477 AssertRC(rc);
478 pVM->selm.s.GuestGdtr.pGdt = ~0U;
479 pVM->selm.s.GuestGdtr.cbGdt = 0;
480 }
481 pVM->selm.s.fGDTRangeRegistered = false;
482#endif
483#ifdef SELM_TRACK_GUEST_LDT_CHANGES
484 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
485 {
486 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
487 AssertRC(rc);
488 pVM->selm.s.GCPtrGuestLdt = ~0U;
489 }
490#endif
491#ifdef SELM_TRACK_GUEST_TSS_CHANGES
492 if (pVM->selm.s.GCPtrGuestTss != ~0U)
493 {
494 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
495 AssertRC(rc);
496 pVM->selm.s.GCPtrGuestTss = ~0U;
497 pVM->selm.s.GCSelTss = ~0;
498 }
499#endif
500
501 /*
502 * Re-initialize other members.
503 */
504 pVM->selm.s.cbLdtLimit = 0;
505 pVM->selm.s.offLdtHyper = 0;
506 pVM->selm.s.cbMonitoredGuestTss = 0;
507
508 /*
509 * Default action when entering raw mode for the first time
510 */
511 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
512 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
513 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
514}
515
516/**
517 * Disable GDT/LDT/TSS monitoring and syncing
518 *
519 * @param pVM The VM to operate on.
520 */
521SELMR3DECL(void) SELMR3DisableMonitoring(PVM pVM)
522{
523 /*
524 * Uninstall guest GDT/LDT/TSS write access handlers.
525 */
526 int rc;
527#ifdef SELM_TRACK_GUEST_GDT_CHANGES
528 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
529 {
530 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
531 AssertRC(rc);
532 pVM->selm.s.GuestGdtr.pGdt = ~0U;
533 pVM->selm.s.GuestGdtr.cbGdt = 0;
534 }
535 pVM->selm.s.fGDTRangeRegistered = false;
536#endif
537#ifdef SELM_TRACK_GUEST_LDT_CHANGES
538 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
539 {
540 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
541 AssertRC(rc);
542 pVM->selm.s.GCPtrGuestLdt = ~0U;
543 }
544#endif
545#ifdef SELM_TRACK_GUEST_TSS_CHANGES
546 if (pVM->selm.s.GCPtrGuestTss != ~0U)
547 {
548 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
549 AssertRC(rc);
550 pVM->selm.s.GCPtrGuestTss = ~0U;
551 pVM->selm.s.GCSelTss = ~0;
552 }
553#endif
554
555 /*
556 * Unregister shadow GDT/LDT/TSS write access handlers.
557 */
558#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
559 if (pVM->selm.s.paGdtGC != 0)
560 {
561 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtGC);
562 AssertRC(rc);
563 pVM->selm.s.paGdtGC = 0;
564 }
565#endif
566#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
567 if (pVM->selm.s.GCPtrTss != ~0U)
568 {
569 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrTss);
570 AssertRC(rc);
571 pVM->selm.s.GCPtrTss = ~0U;
572 }
573#endif
574#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
575 if (pVM->selm.s.GCPtrLdt != ~0U)
576 {
577 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrLdt);
578 AssertRC(rc);
579 pVM->selm.s.GCPtrLdt = ~0U;
580 }
581#endif
582
583 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
584 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
585 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
586
587 pVM->selm.s.fDisableMonitoring = true;
588}
589
590/**
591 * Execute state save operation.
592 *
593 * @returns VBox status code.
594 * @param pVM VM Handle.
595 * @param pSSM SSM operation handle.
596 */
597static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM)
598{
599 LogFlow(("selmR3Save:\n"));
600
601 /*
602 * Save the basic bits - fortunately all the other things can be resynced on load.
603 */
604 PSELM pSelm = &pVM->selm.s;
605
606 SSMR3PutUInt(pSSM, pSelm->fDisableMonitoring);
607 SSMR3PutSel(pSSM, pSelm->SelCS);
608 SSMR3PutSel(pSSM, pSelm->SelDS);
609 SSMR3PutSel(pSSM, pSelm->SelCS64);
610 SSMR3PutSel(pSSM, pSelm->SelCS64); //reserved for DS64.
611 SSMR3PutSel(pSSM, pSelm->SelTSS);
612 return SSMR3PutSel(pSSM, pSelm->SelTSSTrap08);
613}
614
615
616/**
617 * Execute state load operation.
618 *
619 * @returns VBox status code.
620 * @param pVM VM Handle.
621 * @param pSSM SSM operation handle.
622 * @param u32Version Data layout version.
623 */
624static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
625{
626 LogFlow(("selmR3Load:\n"));
627
628 /*
629 * Validate version.
630 */
631 if (u32Version != SELM_SAVED_STATE_VERSION)
632 {
633 Log(("selmR3Load: Invalid version u32Version=%d!\n", u32Version));
634 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
635 }
636
637 /*
638 * Do a reset.
639 */
640 SELMR3Reset(pVM);
641
642 /* Get the monitoring flag. */
643 SSMR3GetUInt(pSSM, &pVM->selm.s.fDisableMonitoring);
644
645 /*
646 * Get the selectors.
647 */
648 RTSEL SelCS;
649 SSMR3GetSel(pSSM, &SelCS);
650 RTSEL SelDS;
651 SSMR3GetSel(pSSM, &SelDS);
652 RTSEL SelCS64;
653 SSMR3GetSel(pSSM, &SelCS64);
654 RTSEL SelDS64;
655 SSMR3GetSel(pSSM, &SelDS64);
656 RTSEL SelTSS;
657 SSMR3GetSel(pSSM, &SelTSS);
658 RTSEL SelTSSTrap08;
659 SSMR3GetSel(pSSM, &SelTSSTrap08);
660 if (u32Version == 1)
661 {
662 RTSEL SelTSSTrap0a;
663 int rc = SSMR3GetSel(pSSM, &SelTSSTrap0a);
664 if (VBOX_FAILURE(rc))
665 return rc;
666 }
667
668 /* Check that no selectors have be relocated. */
669 PSELM pSelm = &pVM->selm.s;
670 if ( SelCS != pSelm->SelCS
671 || SelDS != pSelm->SelDS
672 || SelCS64 != pSelm->SelCS64
673 || SelDS64 != pSelm->SelCS64
674 || SelTSS != pSelm->SelTSS
675 || SelTSSTrap08 != pSelm->SelTSSTrap08)
676 {
677 AssertMsgFailed(("Some selector have been relocated - this cannot happen!\n"));
678 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
679 }
680
681 /*
682 * Flag everything for resync.
683 */
684 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
685 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
686 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
687
688 return VINF_SUCCESS;
689}
690
691
692
693
694/**
695 * Sets up the virtualization of a guest GDT.
696 *
697 * @returns VBox status code.
698 * @param pVM The VM to operate on.
699 * @param paGDTEs Pointer to GDT array.
700 * @param cGDTEs Number of entries in the GDT array.
701 */
702SELMR3DECL(int) SELMR3GdtSetup(PVM pVM, PCVBOXDESC paGDTEs, unsigned cGDTEs)
703{
704 AssertMsg(cGDTEs <= (unsigned)(pVM->selm.s.SelTSSTrap08 >> 3), ("Oops! the loaded GDT is as large as our.. we assume no clashes!!!\n"));
705
706 /*
707 * Enumerate the array.
708 */
709 PCVBOXDESC pGDTESrc = paGDTEs;
710 PVBOXDESC pGDTEDst = pVM->selm.s.paGdtHC;
711 for (unsigned iGDT = 0; iGDT < cGDTEs; iGDT++, pGDTEDst++, pGDTESrc++)
712 {
713 /* ASSUME no clashes for now - lazy bird!!! */
714 if (pGDTESrc->Gen.u1Present)
715 {
716 pGDTEDst->Gen = pGDTESrc->Gen;
717 /* mark non ring-3 selectors as not present. */
718 if (pGDTEDst->Gen.u2Dpl != 3)
719 pGDTEDst->Gen.u1Present = 0;
720 }
721 else
722 {
723 /* zero it. */
724 pGDTEDst->au32[0] = 0;
725 pGDTEDst->au32[1] = 0;
726 }
727 }
728
729 return VINF_SUCCESS;
730}
731
732
733/**
734 * Updates the Guest GDT & LDT virtualization based on current CPU state.
735 *
736 * @returns VBox status code.
737 * @param pVM The VM to operate on.
738 */
739SELMR3DECL(int) SELMR3UpdateFromCPUM(PVM pVM)
740{
741 int rc = VINF_SUCCESS;
742
743 if (pVM->selm.s.fDisableMonitoring)
744 {
745 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
746 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
747 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
748
749 return VINF_SUCCESS;
750 }
751
752 STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a);
753
754 /*
755 * GDT sync
756 */
757 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_GDT))
758 {
759 /*
760 * Always assume the best
761 */
762 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
763
764 /* If the GDT was changed, then make sure the LDT is checked too */
765 /** @todo only do this if the actual ldtr selector was changed; this is a bit excessive */
766 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
767 /* Same goes for the TSS selector */
768 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
769
770 /*
771 * Get the GDTR and check if there is anything to do (there usually is).
772 */
773 VBOXGDTR GDTR;
774 CPUMGetGuestGDTR(pVM, &GDTR);
775 if (GDTR.cbGdt < sizeof(VBOXDESC))
776 {
777 Log(("No GDT entries...\n"));
778 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
779 return VINF_SUCCESS;
780 }
781
782 /*
783 * Read the Guest GDT.
784 * ASSUMES that the entire GDT is in memory.
785 */
786 RTUINT cbEffLimit = GDTR.cbGdt;
787 PVBOXDESC pGDTE = &pVM->selm.s.paGdtHC[1];
788 rc = PGMPhysReadGCPtr(pVM, pGDTE, GDTR.pGdt + sizeof(VBOXDESC), cbEffLimit + 1 - sizeof(VBOXDESC));
789 if (VBOX_FAILURE(rc))
790 {
791 /*
792 * Read it page by page.
793 *
794 * Keep track of the last valid page and delay memsets and
795 * adjust cbEffLimit to reflect the effective size. The latter
796 * is something we do in the belief that the guest will probably
797 * never actually commit the last page, thus allowing us to keep
798 * our selectors in the high end of the GDT.
799 */
800 RTUINT cbLeft = cbEffLimit + 1 - sizeof(VBOXDESC);
801 RTGCPTR GCPtrSrc = (RTGCPTR)GDTR.pGdt + sizeof(VBOXDESC);
802 uint8_t *pu8Dst = (uint8_t *)&pVM->selm.s.paGdtHC[1];
803 uint8_t *pu8DstInvalid = pu8Dst;
804
805 while (cbLeft)
806 {
807 RTUINT cb = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
808 cb = RT_MIN(cb, cbLeft);
809 rc = PGMPhysReadGCPtr(pVM, pu8Dst, GCPtrSrc, cb);
810 if (VBOX_SUCCESS(rc))
811 {
812 if (pu8DstInvalid != pu8Dst)
813 memset(pu8DstInvalid, 0, pu8Dst - pu8DstInvalid);
814 GCPtrSrc += cb;
815 pu8Dst += cb;
816 pu8DstInvalid = pu8Dst;
817 }
818 else if ( rc == VERR_PAGE_NOT_PRESENT
819 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
820 {
821 GCPtrSrc += cb;
822 pu8Dst += cb;
823 }
824 else
825 {
826 AssertReleaseMsgFailed(("Couldn't read GDT at %RX32, rc=%Vrc!\n", GDTR.pGdt, rc));
827 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
828 return VERR_NOT_IMPLEMENTED;
829 }
830 cbLeft -= cb;
831 }
832
833 /* any invalid pages at the end? */
834 if (pu8DstInvalid != pu8Dst)
835 {
836 cbEffLimit = pu8DstInvalid - (uint8_t *)pVM->selm.s.paGdtHC - 1;
837 /* If any GDTEs was invalidated, zero them. */
838 if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit)
839 memset(pu8DstInvalid + cbEffLimit + 1, 0, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit);
840 }
841
842 /* keep track of the effective limit. */
843 if (cbEffLimit != pVM->selm.s.cbEffGuestGdtLimit)
844 {
845 Log(("SELMR3UpdateFromCPUM: cbEffGuestGdtLimit=%#x -> %#x (actual %#x)\n",
846 pVM->selm.s.cbEffGuestGdtLimit, cbEffLimit, GDTR.cbGdt));
847 pVM->selm.s.cbEffGuestGdtLimit = cbEffLimit;
848 }
849 }
850
851 /*
852 * Check if the Guest GDT intrudes on our GDT entries.
853 */
854 // RTSEL aHyperGDT[MAX_NEEDED_HYPERVISOR_GDTS];
855 if (cbEffLimit >= pVM->selm.s.SelTSSTrap08)
856 {
857#if 0
858 PVBOXDESC pGDTEStart = pVM->selm.s.paGdtHC;
859 PVBOXDESC pGDTE = (PVBOXDESC)((char *)pGDTEStart + GDTR.cbGdt + 1 - sizeof(VBOXDESC));
860 int iGDT = 0;
861
862 /* Disabling this for now; previously saw triple faults with OS/2, before fixing the above if statement */
863 Log(("Internal SELM GDT conflict: use non-present entries\n"));
864 while (pGDTE > pGDTEStart && iGDT < MAX_NEEDED_HYPERVISOR_GDTS)
865 {
866 /* We can reuse non-present entries */
867 if (!pGDTE->Gen.u1Present)
868 {
869 aHyperGDT[iGDT] = ((uintptr_t)pGDTE - (uintptr_t)pVM->selm.s.paGdtHC) / sizeof(VBOXDESC);
870 aHyperGDT[iGDT] = aHyperGDT[iGDT] << X86_SEL_SHIFT;
871 Log(("SELM: Found unused GDT %04X\n", aHyperGDT[iGDT]));
872 iGDT++;
873 }
874
875 pGDTE--;
876 }
877 if (iGDT != MAX_NEEDED_HYPERVISOR_GDTS)
878#endif
879 {
880 AssertReleaseMsgFailed(("Internal SELM GDT conflict.\n"));
881 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
882 return VERR_NOT_IMPLEMENTED;
883 }
884 }
885
886 /*
887 * Work thru the copied GDT entries adjusting them for correct virtualization.
888 */
889 PVBOXDESC pGDTEEnd = (PVBOXDESC)((char *)pGDTE + cbEffLimit + 1 - sizeof(VBOXDESC));
890 while (pGDTE < pGDTEEnd)
891 {
892 if (pGDTE->Gen.u1Present)
893 {
894 /*
895 * Code and data selectors are generally 1:1, with the
896 * 'little' adjustment we do for DPL 0 selectors.
897 */
898 if (pGDTE->Gen.u1DescType)
899 {
900 /*
901 * Hack for A-bit against Trap E on read-only GDT.
902 */
903 /** @todo Fix this by loading ds and cs before turning off WP. */
904 pGDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
905
906 /*
907 * All DPL 0 code and data segments are squeezed into DPL 1.
908 *
909 * We're skipping conforming segments here because those
910 * cannot give us any trouble.
911 */
912 if ( pGDTE->Gen.u2Dpl == 0
913 && (pGDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
914 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
915 pGDTE->Gen.u2Dpl = 1;
916 }
917 else
918 {
919 /*
920 * System type selectors are marked not present.
921 * Recompiler or special handling is required for these.
922 */
923 /** @todo what about interrupt gates and rawr0? */
924 pGDTE->Gen.u1Present = 0;
925 }
926 }
927
928 /* Next GDT entry. */
929 pGDTE++;
930 }
931
932#if 0 /** @todo r=bird: The relocation code won't be working right. Start with the IF below. */
933 /*
934 * Check if the Guest GDT intrudes on our GDT entries.
935 */
936 if (cbEffLimit >= pVM->selm.s.SelTSSTrap08)
937 {
938 /* Reinitialize our hypervisor GDTs */
939 pVM->selm.s.SelCS = aHyperGDT[0];
940 pVM->selm.s.SelDS = aHyperGDT[1];
941 pVM->selm.s.SelCS64 = aHyperGDT[2];
942 pVM->selm.s.SelTSS = aHyperGDT[3];
943 pVM->selm.s.SelTSSTrap08 = aHyperGDT[4];
944 SELMR3Relocate(pVM); /** @todo r=bird: Must call VMR3Relocate! */
945 }
946#endif
947
948 /*
949 * Adjust the cached GDT limit.
950 * Any GDT entries which have been removed must be cleared.
951 */
952 if (pVM->selm.s.GuestGdtr.cbGdt != GDTR.cbGdt)
953 {
954 if (pVM->selm.s.GuestGdtr.cbGdt > GDTR.cbGdt)
955 memset(pGDTE, 0, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt);
956#ifndef SELM_TRACK_GUEST_GDT_CHANGES
957 pVM->selm.s.GuestGdtr.cbGdt = GDTR.cbGdt;
958#endif
959 }
960
961#ifdef SELM_TRACK_GUEST_GDT_CHANGES
962 /*
963 * Check if Guest's GDTR is changed.
964 */
965 if ( GDTR.pGdt != pVM->selm.s.GuestGdtr.pGdt
966 || GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
967 {
968 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%08X cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt));
969
970 /*
971 * [Re]Register write virtual handler for guest's GDT.
972 */
973 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
974 {
975 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
976 AssertRC(rc);
977 }
978
979 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,
980 0, selmGuestGDTWriteHandler, "selmgcGuestGDTWriteHandler", 0, "Guest GDT write access handler");
981 if (VBOX_FAILURE(rc))
982 return rc;
983
984 /* Update saved Guest GDTR. */
985 pVM->selm.s.GuestGdtr = GDTR;
986 pVM->selm.s.fGDTRangeRegistered = true;
987 }
988#endif
989 }
990
991 /*
992 * TSS sync
993 */
994 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS))
995 {
996 SELMR3SyncTSS(pVM);
997 }
998
999 /*
1000 * LDT sync
1001 */
1002 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_LDT))
1003 {
1004 /*
1005 * Always assume the best
1006 */
1007 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
1008
1009 /*
1010 * LDT handling is done similarly to the GDT handling with a shadow
1011 * array. However, since the LDT is expected to be swappable (at least
1012 * some ancient OSes makes it swappable) it must be floating and
1013 * synced on a per-page basis.
1014 *
1015 * Eventually we will change this to be fully on demand. Meaning that
1016 * we will only sync pages containing LDT selectors actually used and
1017 * let the #PF handler lazily sync pages as they are used.
1018 * (This applies to GDT too, when we start making OS/2 fast.)
1019 */
1020
1021 /*
1022 * First, determin the current LDT selector.
1023 */
1024 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1025 if ((SelLdt & X86_SEL_MASK) == 0)
1026 {
1027 /* ldtr = 0 - update hyper LDTR and deregister any active handler. */
1028 CPUMSetHyperLDTR(pVM, 0);
1029#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1030 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1031 {
1032 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1033 AssertRC(rc);
1034 pVM->selm.s.GCPtrGuestLdt = ~0U;
1035 }
1036#endif
1037 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1038 return VINF_SUCCESS;
1039 }
1040
1041 /*
1042 * Get the LDT selector.
1043 */
1044 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelLdt >> X86_SEL_SHIFT];
1045 RTGCPTR GCPtrLdt = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1046 unsigned cbLdt = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1047 if (pDesc->Gen.u1Granularity)
1048 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1049
1050 /*
1051 * Validate it.
1052 */
1053 if ( !cbLdt
1054 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt
1055 || pDesc->Gen.u1DescType
1056 || pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1057 {
1058 AssertMsg(!cbLdt, ("Invalid LDT %04x!\n", SelLdt));
1059
1060 /* cbLdt > 0:
1061 * This is quite impossible, so we do as most people do when faced with
1062 * the impossible, we simply ignore it.
1063 */
1064 CPUMSetHyperLDTR(pVM, 0);
1065#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1066 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1067 {
1068 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1069 AssertRC(rc);
1070 pVM->selm.s.GCPtrGuestLdt = ~0U;
1071 }
1072#endif
1073 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1074 return VINF_SUCCESS;
1075 }
1076 /** @todo check what intel does about odd limits. */
1077 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(VBOXDESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1078
1079 /*
1080 * Use the cached guest ldt address if the descriptor has already been modified (see below)
1081 * (this is necessary due to redundant LDT updates; see todo above at GDT sync)
1082 */
1083 if (MMHyperIsInsideArea(pVM, GCPtrLdt) == true)
1084 GCPtrLdt = pVM->selm.s.GCPtrGuestLdt; /* use the old one */
1085
1086
1087#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1088 /** @todo Handle only present LDT segments. */
1089 // if (pDesc->Gen.u1Present)
1090 {
1091 /*
1092 * Check if Guest's LDT address/limit is changed.
1093 */
1094 if ( GCPtrLdt != pVM->selm.s.GCPtrGuestLdt
1095 || cbLdt != pVM->selm.s.cbLdtLimit)
1096 {
1097 Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %VGv:%04x to %VGv:%04x. (GDTR=%VGv:%04x)\n",
1098 pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1099
1100 /*
1101 * [Re]Register write virtual handler for guest's GDT.
1102 * In the event of LDT overlapping something, don't install it just assume it's being updated.
1103 */
1104 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1105 {
1106 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1107 AssertRC(rc);
1108 }
1109#ifdef DEBUG
1110 if (pDesc->Gen.u1Present)
1111 Log(("LDT selector marked not present!!\n"));
1112#endif
1113 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */,
1114 0, selmGuestLDTWriteHandler, "selmgcGuestLDTWriteHandler", 0, "Guest LDT write access handler");
1115 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
1116 {
1117 /** @todo investigate the various cases where conflicts happen and try avoid them by enh. the instruction emulation. */
1118 pVM->selm.s.GCPtrGuestLdt = ~0;
1119 Log(("WARNING: Guest LDT (%VGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=%VGv:%04x)\n",
1120 GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1121 }
1122 else if (VBOX_SUCCESS(rc))
1123 pVM->selm.s.GCPtrGuestLdt = GCPtrLdt;
1124 else
1125 {
1126 CPUMSetHyperLDTR(pVM, 0);
1127 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1128 return rc;
1129 }
1130
1131 pVM->selm.s.cbLdtLimit = cbLdt;
1132 }
1133 }
1134#else
1135 pVM->selm.s.cbLdtLimit = cbLdt;
1136#endif
1137
1138 /*
1139 * Calc Shadow LDT base.
1140 */
1141 unsigned off;
1142 pVM->selm.s.offLdtHyper = off = (GCPtrLdt & PAGE_OFFSET_MASK);
1143 RTGCPTR GCPtrShadowLDT = (RTGCPTR)((RTGCUINTPTR)pVM->selm.s.GCPtrLdt + off);
1144 PVBOXDESC pShadowLDT = (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1145
1146 /*
1147 * Enable the LDT selector in the shadow GDT.
1148 */
1149 pDesc->Gen.u1Present = 1;
1150 pDesc->Gen.u16BaseLow = RT_LOWORD(GCPtrShadowLDT);
1151 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(GCPtrShadowLDT);
1152 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(GCPtrShadowLDT);
1153 pDesc->Gen.u1Available = 0;
1154 pDesc->Gen.u1Reserved = 0;
1155 if (cbLdt > 0xffff)
1156 {
1157 cbLdt = 0xffff;
1158 pDesc->Gen.u4LimitHigh = 0;
1159 pDesc->Gen.u16LimitLow = pDesc->Gen.u1Granularity ? 0xf : 0xffff;
1160 }
1161
1162 /*
1163 * Set Hyper LDTR and notify TRPM.
1164 */
1165 CPUMSetHyperLDTR(pVM, SelLdt);
1166
1167 /*
1168 * Loop synchronising the LDT page by page.
1169 */
1170 /** @todo investigate how intel handle various operations on half present cross page entries. */
1171 off = GCPtrLdt & (sizeof(VBOXDESC) - 1);
1172 AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt));
1173 GCPtrLdt += sizeof(VBOXDESC);
1174 pShadowLDT++;
1175 unsigned cbLeft = cbLdt + 1 - sizeof(VBOXDESC);
1176 PVBOXDESC pLDTE = pShadowLDT;
1177 while (cbLeft)
1178 {
1179 /*
1180 * Read a chunk.
1181 */
1182 unsigned cbChunk = PAGE_SIZE - ((RTGCUINTPTR)GCPtrLdt & PAGE_OFFSET_MASK);
1183 if (cbChunk > cbLeft)
1184 cbChunk = cbLeft;
1185 rc = PGMPhysReadGCPtr(pVM, pShadowLDT, GCPtrLdt, cbChunk);
1186 if (VBOX_SUCCESS(rc))
1187 {
1188 /*
1189 * Mark page
1190 */
1191 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D);
1192 AssertRC(rc);
1193
1194 /*
1195 * Loop thru the available LDT entries.
1196 * Figure out where to start and end and the potential cross pageness of
1197 * things adds a little complexity. pLDTE is updated there and not in the
1198 * 'next' part of the loop. The pLDTEEnd is inclusive.
1199 */
1200 PVBOXDESC pLDTEEnd = (PVBOXDESC)((uintptr_t)pShadowLDT + cbChunk) - 1;
1201 if (pLDTE + 1 < pShadowLDT)
1202 pLDTE = (PVBOXDESC)((uintptr_t)pShadowLDT + off);
1203 while (pLDTE <= pLDTEEnd)
1204 {
1205 if (pLDTE->Gen.u1Present)
1206 {
1207 /*
1208 * Code and data selectors are generally 1:1, with the
1209 * 'little' adjustment we do for DPL 0 selectors.
1210 */
1211 if (pLDTE->Gen.u1DescType)
1212 {
1213 /*
1214 * Hack for A-bit against Trap E on read-only GDT.
1215 */
1216 /** @todo Fix this by loading ds and cs before turning off WP. */
1217 if (!(pLDTE->Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1218 pLDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1219
1220 /*
1221 * All DPL 0 code and data segments are squeezed into DPL 1.
1222 *
1223 * We're skipping conforming segments here because those
1224 * cannot give us any trouble.
1225 */
1226 if ( pLDTE->Gen.u2Dpl == 0
1227 && (pLDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1228 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
1229 pLDTE->Gen.u2Dpl = 1;
1230 }
1231 else
1232 {
1233 /*
1234 * System type selectors are marked not present.
1235 * Recompiler or special handling is required for these.
1236 */
1237 /** @todo what about interrupt gates and rawr0? */
1238 pLDTE->Gen.u1Present = 0;
1239 }
1240 }
1241
1242 /* Next LDT entry. */
1243 pLDTE++;
1244 }
1245 }
1246 else
1247 {
1248 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%d\n", rc));
1249 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0);
1250 AssertRC(rc);
1251 }
1252
1253 /*
1254 * Advance to the next page.
1255 */
1256 cbLeft -= cbChunk;
1257 GCPtrShadowLDT += cbChunk;
1258 pShadowLDT = (PVBOXDESC)((char *)pShadowLDT + cbChunk);
1259 GCPtrLdt += cbChunk;
1260 }
1261 }
1262
1263 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1264 return VINF_SUCCESS;
1265}
1266
1267
1268/**
1269 * \#PF Handler callback for virtual access handler ranges.
1270 *
1271 * Important to realize that a physical page in a range can have aliases, and
1272 * for ALL and WRITE handlers these will also trigger.
1273 *
1274 * @returns VINF_SUCCESS if the handler have carried out the operation.
1275 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1276 * @param pVM VM Handle.
1277 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1278 * @param pvPtr The HC mapping of that address.
1279 * @param pvBuf What the guest is reading/writing.
1280 * @param cbBuf How much it's reading/writing.
1281 * @param enmAccessType The access type.
1282 * @param pvUser User argument.
1283 */
1284static DECLCALLBACK(int) selmGuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1285{
1286 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1287 Log(("selmGuestGDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1288 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
1289
1290 return VINF_PGM_HANDLER_DO_DEFAULT;
1291}
1292
1293/**
1294 * \#PF Handler callback for virtual access handler ranges.
1295 *
1296 * Important to realize that a physical page in a range can have aliases, and
1297 * for ALL and WRITE handlers these will also trigger.
1298 *
1299 * @returns VINF_SUCCESS if the handler have carried out the operation.
1300 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1301 * @param pVM VM Handle.
1302 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1303 * @param pvPtr The HC mapping of that address.
1304 * @param pvBuf What the guest is reading/writing.
1305 * @param cbBuf How much it's reading/writing.
1306 * @param enmAccessType The access type.
1307 * @param pvUser User argument.
1308 */
1309static DECLCALLBACK(int) selmGuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1310{
1311 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1312 Log(("selmGuestLDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1313 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
1314 return VINF_PGM_HANDLER_DO_DEFAULT;
1315}
1316
1317/**
1318 * \#PF Handler callback for virtual access handler ranges.
1319 *
1320 * Important to realize that a physical page in a range can have aliases, and
1321 * for ALL and WRITE handlers these will also trigger.
1322 *
1323 * @returns VINF_SUCCESS if the handler have carried out the operation.
1324 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1325 * @param pVM VM Handle.
1326 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1327 * @param pvPtr The HC mapping of that address.
1328 * @param pvBuf What the guest is reading/writing.
1329 * @param cbBuf How much it's reading/writing.
1330 * @param enmAccessType The access type.
1331 * @param pvUser User argument.
1332 */
1333static DECLCALLBACK(int) selmGuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1334{
1335 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1336 Log(("selmGuestTSSWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1337 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1338 return VINF_PGM_HANDLER_DO_DEFAULT;
1339}
1340
1341/**
1342 * Check if the TSS ring 0 stack selector and pointer were updated (for now)
1343 *
1344 * @returns VBox status code.
1345 * @param pVM The VM to operate on.
1346 */
1347SELMR3DECL(int) SELMR3SyncTSS(PVM pVM)
1348{
1349 int rc;
1350
1351 if (pVM->selm.s.fDisableMonitoring)
1352 {
1353 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
1354 return VINF_SUCCESS;
1355 }
1356
1357/** @todo r=bird: SELMR3SyncTSS should be VMMAll code.
1358 * All the base, size, flags and stuff must be kept up to date in the CPUM tr register.
1359 */
1360 STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a);
1361
1362 Assert(!VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_GDT));
1363 Assert(VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS));
1364
1365 /*
1366 * TSS sync
1367 */
1368 RTSEL SelTss = CPUMGetGuestTR(pVM);
1369 if (SelTss & X86_SEL_MASK)
1370 {
1371 /** @todo r=bird: strictly speaking, this is wrong as we shouldn't bother with changes to
1372 * the TSS selector once its loaded. There are a bunch of this kind of problems (see Sander's
1373 * comment in the unzip defect)
1374 * The first part here should only be done when we're loading TR. The latter part which is
1375 * updating of the ss0:esp0 pair can be done by the access handler now since we can trap all
1376 * accesses, also REM ones. */
1377
1378 /*
1379 * Guest TR is not NULL.
1380 */
1381 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelTss >> X86_SEL_SHIFT];
1382 RTGCPTR GCPtrTss = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1383 unsigned cbTss = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1384 if (pDesc->Gen.u1Granularity)
1385 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1386 cbTss++;
1387 pVM->selm.s.cbGuestTss = cbTss;
1388 pVM->selm.s.fGuestTss32Bit = pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1389 || pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1390
1391 /* Don't bother with anything but the core structure. (Actually all we care for is the r0 ss.) */
1392 if (cbTss > sizeof(VBOXTSS))
1393 cbTss = sizeof(VBOXTSS);
1394 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbTss - 1) >> PAGE_SHIFT),
1395 ("GCPtrTss=%VGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss));
1396
1397 // All system GDTs are marked not present above. That explains why this check fails.
1398 //if (pDesc->Gen.u1Present)
1399 /** @todo Handle only present TSS segments. */
1400 {
1401 /*
1402 * Check if Guest's TSS is changed.
1403 */
1404 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss
1405 || cbTss != pVM->selm.s.cbMonitoredGuestTss)
1406 {
1407 Log(("SELMR3UpdateFromCPUM: Guest's TSS is changed to pTss=%08X cbTss=%08X cbGuestTss\n", GCPtrTss, cbTss, pVM->selm.s.cbGuestTss));
1408
1409 /*
1410 * Validate it.
1411 */
1412 if ( SelTss & X86_SEL_LDT
1413 || !cbTss
1414 || SelTss >= pVM->selm.s.GuestGdtr.cbGdt
1415 || pDesc->Gen.u1DescType
1416 || ( pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
1417 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
1418 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL
1419 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) )
1420 {
1421 AssertMsgFailed(("Invalid Guest TSS %04x!\n", SelTss));
1422 }
1423 else
1424 {
1425 /*
1426 * [Re]Register write virtual handler for guest's TSS.
1427 */
1428 if (pVM->selm.s.GCPtrGuestTss != ~0U)
1429 {
1430 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
1431 AssertRC(rc);
1432 }
1433
1434 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbTss - 1,
1435 0, selmGuestTSSWriteHandler, "selmgcGuestTSSWriteHandler", 0, "Guest TSS write access handler");
1436 if (VBOX_FAILURE(rc))
1437 {
1438 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1439 return rc;
1440 }
1441
1442 /* Update saved Guest TSS info. */
1443 pVM->selm.s.GCPtrGuestTss = GCPtrTss;
1444 pVM->selm.s.cbMonitoredGuestTss = cbTss;
1445 pVM->selm.s.GCSelTss = SelTss;
1446 }
1447 }
1448
1449 /* Update the ring 0 stack selector and base address */
1450 /* feeling very lazy; reading too much */
1451 VBOXTSS tss;
1452 rc = PGMPhysReadGCPtr(pVM, &tss, GCPtrTss, sizeof(VBOXTSS));
1453 if (VBOX_FAILURE(rc))
1454 {
1455 /// @todo this might not be as fatal as it seems!
1456 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
1457 STAM_PROFILE_STOP(&pVM->selm.s.StatTSSSync, a);
1458 return VERR_NOT_IMPLEMENTED;
1459 }
1460#ifdef DEBUG
1461 uint32_t ssr0, espr0;
1462
1463 SELMGetRing1Stack(pVM, &ssr0, &espr0);
1464 ssr0 &= ~1;
1465
1466 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
1467 {
1468 Log(("SELMR3SyncTSS: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
1469 }
1470Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
1471#endif
1472 /* Update our TSS structure for the guest's ring 1 stack */
1473 SELMSetRing1Stack(pVM, tss.ss0 | 1, tss.esp0);
1474 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
1475 }
1476 }
1477
1478 STAM_PROFILE_STOP(&pVM->selm.s.StatTSSSync, a);
1479 return VINF_SUCCESS;
1480}
1481
1482
1483/**
1484 * Compares the Guest GDT and LDT with the shadow tables.
1485 * This is a VBOX_STRICT only function.
1486 *
1487 * @returns VBox status code.
1488 * @param pVM The VM Handle.
1489 */
1490SELMR3DECL(int) SELMR3DebugCheck(PVM pVM)
1491{
1492#ifdef VBOX_STRICT
1493 /*
1494 * Get GDTR and check for conflict.
1495 */
1496 VBOXGDTR GDTR;
1497 CPUMGetGuestGDTR(pVM, &GDTR);
1498 if (GDTR.cbGdt == 0)
1499 return VINF_SUCCESS;
1500
1501#if 0
1502 if (GDTR.cbGdt >= (unsigned)(pVM->selm.s.SelTSSTrap08 >> X86_SEL_SHIFT))
1503 {
1504 AssertReleaseMsgFailed(("Internal SELM GDT conflict.\n"));
1505 return VERR_NOT_IMPLEMENTED;
1506 }
1507#endif
1508
1509 if (GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
1510 Log(("SELMR3DebugCheck: limits have changed! new=%d old=%d\n", GDTR.cbGdt, pVM->selm.s.GuestGdtr.cbGdt));
1511
1512 /*
1513 * Loop thru the GDT checking each entry.
1514 */
1515 RTGCPTR GCPtrGDTEGuest = GDTR.pGdt;
1516 PVBOXDESC pGDTE = pVM->selm.s.paGdtHC;
1517 PVBOXDESC pGDTEEnd = (PVBOXDESC)((uintptr_t)pGDTE + GDTR.cbGdt);
1518 while (pGDTE < pGDTEEnd)
1519 {
1520 VBOXDESC GDTEGuest;
1521 int rc = PGMPhysReadGCPtr(pVM, &GDTEGuest, GCPtrGDTEGuest, sizeof(GDTEGuest));
1522 if (VBOX_SUCCESS(rc))
1523 {
1524 if (pGDTE->Gen.u1DescType || pGDTE->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1525 {
1526 if ( pGDTE->Gen.u16LimitLow != GDTEGuest.Gen.u16LimitLow
1527 || pGDTE->Gen.u4LimitHigh != GDTEGuest.Gen.u4LimitHigh
1528 || pGDTE->Gen.u16BaseLow != GDTEGuest.Gen.u16BaseLow
1529 || pGDTE->Gen.u8BaseHigh1 != GDTEGuest.Gen.u8BaseHigh1
1530 || pGDTE->Gen.u8BaseHigh2 != GDTEGuest.Gen.u8BaseHigh2
1531 || pGDTE->Gen.u1DefBig != GDTEGuest.Gen.u1DefBig
1532 || pGDTE->Gen.u1DescType != GDTEGuest.Gen.u1DescType)
1533 {
1534 unsigned iGDT = pGDTE - pVM->selm.s.paGdtHC;
1535 SELMR3DumpDescriptor(*pGDTE, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, shadow");
1536 SELMR3DumpDescriptor(GDTEGuest, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, guest");
1537 }
1538 }
1539 }
1540
1541 /* Advance to the next descriptor. */
1542 GCPtrGDTEGuest += sizeof(VBOXDESC);
1543 pGDTE++;
1544 }
1545
1546
1547 /*
1548 * LDT?
1549 */
1550 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1551 if ((SelLdt & X86_SEL_MASK) == 0)
1552 return VINF_SUCCESS;
1553 if (SelLdt > GDTR.cbGdt)
1554 {
1555 Log(("SELMR3DebugCheck: ldt is out of bound SelLdt=%#x\n", SelLdt));
1556 return VERR_INTERNAL_ERROR;
1557 }
1558 VBOXDESC LDTDesc;
1559 int rc = PGMPhysReadGCPtr(pVM, &LDTDesc, GDTR.pGdt + (SelLdt & X86_SEL_MASK), sizeof(LDTDesc));
1560 if (VBOX_FAILURE(rc))
1561 {
1562 Log(("SELMR3DebugCheck: Failed to read LDT descriptor. rc=%d\n", rc));
1563 return rc;
1564 }
1565 RTGCPTR GCPtrLDTEGuest = LDTDesc.Gen.u16BaseLow | (LDTDesc.Gen.u8BaseHigh1 << 16) | (LDTDesc.Gen.u8BaseHigh2 << 24);
1566 unsigned cbLdt = LDTDesc.Gen.u16LimitLow | (LDTDesc.Gen.u4LimitHigh << 16);
1567 if (LDTDesc.Gen.u1Granularity)
1568 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1569
1570 /*
1571 * Validate it.
1572 */
1573 if (!cbLdt)
1574 return VINF_SUCCESS;
1575 /** @todo check what intel does about odd limits. */
1576 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(VBOXDESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1577 if ( LDTDesc.Gen.u1DescType
1578 || LDTDesc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT
1579 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt)
1580 {
1581 Log(("SELmR3DebugCheck: Invalid LDT %04x!\n", SelLdt));
1582 return VERR_INTERNAL_ERROR;
1583 }
1584
1585 /*
1586 * Loop thru the LDT checking each entry.
1587 */
1588 unsigned off = (GCPtrLDTEGuest & PAGE_OFFSET_MASK);
1589 PVBOXDESC pLDTE = (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1590 PVBOXDESC pLDTEEnd = (PVBOXDESC)((uintptr_t)pGDTE + cbLdt);
1591 while (pLDTE < pLDTEEnd)
1592 {
1593 VBOXDESC LDTEGuest;
1594 int rc = PGMPhysReadGCPtr(pVM, &LDTEGuest, GCPtrLDTEGuest, sizeof(LDTEGuest));
1595 if (VBOX_SUCCESS(rc))
1596 {
1597 if ( pLDTE->Gen.u16LimitLow != LDTEGuest.Gen.u16LimitLow
1598 || pLDTE->Gen.u4LimitHigh != LDTEGuest.Gen.u4LimitHigh
1599 || pLDTE->Gen.u16BaseLow != LDTEGuest.Gen.u16BaseLow
1600 || pLDTE->Gen.u8BaseHigh1 != LDTEGuest.Gen.u8BaseHigh1
1601 || pLDTE->Gen.u8BaseHigh2 != LDTEGuest.Gen.u8BaseHigh2
1602 || pLDTE->Gen.u1DefBig != LDTEGuest.Gen.u1DefBig
1603 || pLDTE->Gen.u1DescType != LDTEGuest.Gen.u1DescType)
1604 {
1605 unsigned iLDT = pLDTE - (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1606 SELMR3DumpDescriptor(*pLDTE, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, shadow");
1607 SELMR3DumpDescriptor(LDTEGuest, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, guest");
1608 }
1609 }
1610
1611 /* Advance to the next descriptor. */
1612 GCPtrLDTEGuest += sizeof(VBOXDESC);
1613 pLDTE++;
1614 }
1615
1616#else
1617 NOREF(pVM);
1618#endif
1619
1620 return VINF_SUCCESS;
1621}
1622
1623
1624/**
1625 * Validates the RawR0 TSS values against the one in the Guest TSS.
1626 *
1627 * @returns true if it matches.
1628 * @returns false and assertions on mismatch..
1629 * @param pVM VM Handle.
1630 */
1631SELMR3DECL(bool) SELMR3CheckTSS(PVM pVM)
1632{
1633#ifdef VBOX_STRICT
1634
1635 RTSEL SelTss = CPUMGetGuestTR(pVM);
1636 if (SelTss & X86_SEL_MASK)
1637 {
1638 AssertMsg((SelTss & X86_SEL_MASK) == (pVM->selm.s.GCSelTss & X86_SEL_MASK), ("New TSS selector = %04X, old TSS selector = %04X\n", SelTss, pVM->selm.s.GCSelTss));
1639
1640 /*
1641 * Guest TR is not NULL.
1642 */
1643 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelTss >> X86_SEL_SHIFT];
1644 RTGCPTR GCPtrTss = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1645 unsigned cbTss = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1646 if (pDesc->Gen.u1Granularity)
1647 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1648 cbTss++;
1649 /* Don't bother with anything but the core structure. (Actually all we care for is the r0 ss.) */
1650 if (cbTss > sizeof(VBOXTSS))
1651 cbTss = sizeof(VBOXTSS);
1652 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbTss - 1) >> PAGE_SHIFT),
1653 ("GCPtrTss=%VGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss));
1654
1655 // All system GDTs are marked not present above. That explains why this check fails.
1656 //if (pDesc->Gen.u1Present)
1657 /** @todo Handle only present TSS segments. */
1658 {
1659 /*
1660 * Check if Guest's TSS was changed.
1661 */
1662 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss
1663 || cbTss != pVM->selm.s.cbMonitoredGuestTss)
1664 {
1665 AssertMsgFailed(("Guest's TSS is changed from %RGv:%04x to %RGv:%04x\n",
1666 pVM->selm.s.GCPtrGuestTss, pVM->selm.s.cbMonitoredGuestTss,
1667 GCPtrTss, cbTss));
1668 }
1669 }
1670 }
1671
1672 RTGCPTR pGuestTSS = pVM->selm.s.GCPtrGuestTss;
1673 uint32_t ESPR0;
1674 int rc = PGMPhysReadGCPtr(pVM, &ESPR0, pGuestTSS + RT_OFFSETOF(VBOXTSS, esp0), sizeof(ESPR0));
1675 if (VBOX_SUCCESS(rc))
1676 {
1677 RTSEL SelSS0;
1678 rc = PGMPhysReadGCPtr(pVM, &SelSS0, pGuestTSS + RT_OFFSETOF(VBOXTSS, ss0), sizeof(SelSS0));
1679 if (VBOX_SUCCESS(rc))
1680 {
1681 if ( ESPR0 == pVM->selm.s.Tss.esp1
1682 && SelSS0 == (pVM->selm.s.Tss.ss1 & ~1))
1683 return true;
1684
1685 RTGCPHYS GCPhys;
1686 uint64_t fFlags;
1687
1688 rc = PGMGstGetPage(pVM, pGuestTSS, &fFlags, &GCPhys);
1689 AssertRC(rc);
1690 AssertMsgFailed(("TSS out of sync!! (%04X:%08X vs %04X:%08X (guest)) Tss=%VGv Phys=%VGp\n",
1691 (pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1, SelSS0, ESPR0, pGuestTSS, GCPhys));
1692 }
1693 else
1694 AssertRC(rc);
1695 }
1696 else
1697 /* Happens during early Windows XP boot when it is switching page tables. */
1698 Assert(rc == VINF_SUCCESS || ((rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT) && !(CPUMGetGuestEFlags(pVM) & X86_EFL_IF)));
1699 return false;
1700#else
1701 NOREF(pVM);
1702 return true;
1703#endif
1704}
1705
1706
1707/**
1708 * Returns flat address and limit of LDT by LDT selector from guest GDTR.
1709 *
1710 * Fully validate selector.
1711 *
1712 * @returns VBox status.
1713 * @param pVM VM Handle.
1714 * @param SelLdt LDT selector.
1715 * @param ppvLdt Where to store the flat address of LDT.
1716 * @param pcbLimit Where to store LDT limit.
1717 */
1718SELMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit)
1719{
1720 /* Get guest GDTR. */
1721 VBOXGDTR GDTR;
1722 CPUMGetGuestGDTR(pVM, &GDTR);
1723
1724 /* Check selector TI and GDT limit. */
1725 if ( SelLdt & X86_SEL_LDT
1726 || (SelLdt > GDTR.cbGdt))
1727 return VERR_INVALID_SELECTOR;
1728
1729 /* Read descriptor from GC. */
1730 VBOXDESC Desc;
1731 int rc = PGMPhysReadGCPtr(pVM, (void *)&Desc, (RTGCPTR)(GDTR.pGdt + (SelLdt & X86_SEL_MASK)), sizeof(Desc));
1732 if (VBOX_FAILURE(rc))
1733 {
1734 /* fatal */
1735 AssertMsgFailed(("Can't read LDT descriptor for selector=%04X\n", SelLdt));
1736 return VERR_SELECTOR_NOT_PRESENT;
1737 }
1738
1739 /* Check if LDT descriptor is not present. */
1740 if (Desc.Gen.u1Present == 0)
1741 return VERR_SELECTOR_NOT_PRESENT;
1742
1743 /* Check LDT descriptor type. */
1744 if ( Desc.Gen.u1DescType == 1
1745 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1746 return VERR_INVALID_SELECTOR;
1747
1748 /* LDT descriptor is ok. */
1749 if (ppvLdt)
1750 {
1751 *ppvLdt = (RTGCPTR)( (Desc.Gen.u8BaseHigh2 << 24)
1752 | (Desc.Gen.u8BaseHigh1 << 16)
1753 | Desc.Gen.u16BaseLow);
1754 *pcbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1755 }
1756 return VINF_SUCCESS;
1757}
1758
1759
1760/**
1761 * Gets information about a selector.
1762 * Intended for the debugger mostly and will prefer the guest
1763 * descriptor tables over the shadow ones.
1764 *
1765 * @returns VINF_SUCCESS on success.
1766 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table.
1767 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present.
1768 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page
1769 * backing the selector table wasn't present.
1770 * @returns Other VBox status code on other errors.
1771 *
1772 * @param pVM VM handle.
1773 * @param Sel The selector to get info about.
1774 * @param pSelInfo Where to store the information.
1775 */
1776SELMR3DECL(int) SELMR3GetSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo)
1777{
1778 Assert(pSelInfo);
1779
1780 /*
1781 * Read the descriptor entry
1782 */
1783 VBOXDESC Desc;
1784 if ( !(Sel & X86_SEL_LDT)
1785 && ( pVM->selm.s.SelCS == (Sel & X86_SEL_MASK)
1786 || pVM->selm.s.SelDS == (Sel & X86_SEL_MASK)
1787 || pVM->selm.s.SelCS64 == (Sel & X86_SEL_MASK)
1788 || pVM->selm.s.SelTSS == (Sel & X86_SEL_MASK)
1789 || pVM->selm.s.SelTSSTrap08 == (Sel & X86_SEL_MASK))
1790 )
1791 {
1792 /*
1793 * Hypervisor descriptor.
1794 */
1795 pSelInfo->fHyper = true;
1796 Desc = pVM->selm.s.paGdtHC[Sel >> X86_SEL_SHIFT];
1797 }
1798 else if (CPUMIsGuestInProtectedMode(pVM))
1799 {
1800 /*
1801 * Read it from the guest descriptor table.
1802 */
1803 pSelInfo->fHyper = false;
1804
1805 VBOXGDTR Gdtr;
1806 RTGCPTR GCPtrDesc;
1807 CPUMGetGuestGDTR(pVM, &Gdtr);
1808 if (!(Sel & X86_SEL_LDT))
1809 {
1810 /* GDT */
1811 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > (unsigned)Gdtr.cbGdt)
1812 return VERR_INVALID_SELECTOR;
1813 GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
1814 }
1815 else
1816 {
1817 /*
1818 * LDT - must locate the LDT first...
1819 */
1820 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1821 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(VBOXDESC) /* the first selector is invalid, right? */
1822 || (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > (unsigned)Gdtr.cbGdt)
1823 return VERR_INVALID_SELECTOR;
1824 GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);
1825 int rc = PGMPhysReadGCPtr(pVM, &Desc, GCPtrDesc, sizeof(Desc));
1826 if (VBOX_FAILURE(rc))
1827 return rc;
1828
1829 /* validate the LDT descriptor. */
1830 if (Desc.Gen.u1Present == 0)
1831 return VERR_SELECTOR_NOT_PRESENT;
1832 if ( Desc.Gen.u1DescType == 1
1833 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1834 return VERR_INVALID_SELECTOR;
1835
1836 unsigned cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1837 if (Desc.Gen.u1Granularity)
1838 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1839 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > cbLimit)
1840 return VERR_INVALID_SELECTOR;
1841
1842 /* calc the descriptor location. */
1843 GCPtrDesc = (Desc.Gen.u8BaseHigh2 << 24)
1844 | (Desc.Gen.u8BaseHigh1 << 16)
1845 | Desc.Gen.u16BaseLow;
1846 GCPtrDesc += (Sel & X86_SEL_MASK);
1847 }
1848
1849 /* read the descriptor. */
1850 int rc = PGMPhysReadGCPtr(pVM, &Desc, GCPtrDesc, sizeof(Desc));
1851 if (VBOX_FAILURE(rc))
1852 return rc;
1853 }
1854 else
1855 {
1856 /*
1857 * We're in real mode.
1858 */
1859 pSelInfo->Sel = Sel;
1860 pSelInfo->GCPtrBase = Sel << 4;
1861 pSelInfo->cbLimit = 0xffff;
1862 pSelInfo->fHyper = false;
1863 pSelInfo->fRealMode = true;
1864 memset(&pSelInfo->Raw, 0, sizeof(pSelInfo->Raw));
1865 return VINF_SUCCESS;
1866 }
1867
1868 /*
1869 * Extract the base and limit
1870 */
1871 pSelInfo->Sel = Sel;
1872 pSelInfo->Raw = Desc;
1873 pSelInfo->cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1874 if (Desc.Gen.u1Granularity)
1875 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1876 pSelInfo->GCPtrBase = (Desc.Gen.u8BaseHigh2 << 24)
1877 | (Desc.Gen.u8BaseHigh1 << 16)
1878 | Desc.Gen.u16BaseLow;
1879 pSelInfo->fRealMode = false;
1880
1881 return VINF_SUCCESS;
1882}
1883
1884
1885/**
1886 * Gets information about a selector from the shadow tables.
1887 *
1888 * This is intended to be faster than the SELMR3GetSelectorInfo() method, but requires
1889 * that the caller ensures that the shadow tables are up to date.
1890 *
1891 * @returns VINF_SUCCESS on success.
1892 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table.
1893 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present.
1894 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page
1895 * backing the selector table wasn't present.
1896 * @returns Other VBox status code on other errors.
1897 *
1898 * @param pVM VM handle.
1899 * @param Sel The selector to get info about.
1900 * @param pSelInfo Where to store the information.
1901 */
1902SELMR3DECL(int) SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo)
1903{
1904 Assert(pSelInfo);
1905
1906 /*
1907 * Read the descriptor entry
1908 */
1909 VBOXDESC Desc;
1910 if (!(Sel & X86_SEL_LDT))
1911 {
1912 /*
1913 * Global descriptor.
1914 */
1915 Desc = pVM->selm.s.paGdtHC[Sel >> X86_SEL_SHIFT];
1916 pSelInfo->fHyper = pVM->selm.s.SelCS == (Sel & X86_SEL_MASK)
1917 || pVM->selm.s.SelDS == (Sel & X86_SEL_MASK)
1918 || pVM->selm.s.SelCS64 == (Sel & X86_SEL_MASK)
1919 || pVM->selm.s.SelTSS == (Sel & X86_SEL_MASK)
1920 || pVM->selm.s.SelTSSTrap08 == (Sel & X86_SEL_MASK);
1921 /** @todo check that the GDT offset is valid. */
1922 }
1923 else
1924 {
1925 /*
1926 * Local Descriptor.
1927 */
1928 PVBOXDESC paLDT = (PVBOXDESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper);
1929 Desc = paLDT[Sel >> X86_SEL_SHIFT];
1930 /** @todo check if the LDT page is actually available. */
1931 /** @todo check that the LDT offset is valid. */
1932 pSelInfo->fHyper = false;
1933 }
1934
1935 /*
1936 * Extract the base and limit
1937 */
1938 pSelInfo->Sel = Sel;
1939 pSelInfo->Raw = Desc;
1940 pSelInfo->cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1941 if (Desc.Gen.u1Granularity)
1942 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1943 pSelInfo->GCPtrBase = (Desc.Gen.u8BaseHigh2 << 24)
1944 | (Desc.Gen.u8BaseHigh1 << 16)
1945 | Desc.Gen.u16BaseLow;
1946 pSelInfo->fRealMode = false;
1947
1948 return VINF_SUCCESS;
1949}
1950
1951
1952/**
1953 * Formats a descriptor.
1954 *
1955 * @param Desc Descriptor to format.
1956 * @param Sel Selector number.
1957 * @param pszOutput Output buffer.
1958 * @param cchOutput Size of output buffer.
1959 */
1960static void selmR3FormatDescriptor(VBOXDESC Desc, RTSEL Sel, char *pszOutput, size_t cchOutput)
1961{
1962 /*
1963 * Make variable description string.
1964 */
1965 static struct
1966 {
1967 unsigned cch;
1968 const char *psz;
1969 } const aTypes[32] =
1970 {
1971 #define STRENTRY(str) { sizeof(str) - 1, str }
1972 /* system */
1973 STRENTRY("Reserved0 "), /* 0x00 */
1974 STRENTRY("TSS16Avail "), /* 0x01 */
1975 STRENTRY("LDT "), /* 0x02 */
1976 STRENTRY("TSS16Busy "), /* 0x03 */
1977 STRENTRY("Call16 "), /* 0x04 */
1978 STRENTRY("Task "), /* 0x05 */
1979 STRENTRY("Int16 "), /* 0x06 */
1980 STRENTRY("Trap16 "), /* 0x07 */
1981 STRENTRY("Reserved8 "), /* 0x08 */
1982 STRENTRY("TSS32Avail "), /* 0x09 */
1983 STRENTRY("ReservedA "), /* 0x0a */
1984 STRENTRY("TSS32Busy "), /* 0x0b */
1985 STRENTRY("Call32 "), /* 0x0c */
1986 STRENTRY("ReservedD "), /* 0x0d */
1987 STRENTRY("Int32 "), /* 0x0e */
1988 STRENTRY("Trap32 "), /* 0x0f */
1989 /* non system */
1990 STRENTRY("DataRO "), /* 0x10 */
1991 STRENTRY("DataRO Accessed "), /* 0x11 */
1992 STRENTRY("DataRW "), /* 0x12 */
1993 STRENTRY("DataRW Accessed "), /* 0x13 */
1994 STRENTRY("DataDownRO "), /* 0x14 */
1995 STRENTRY("DataDownRO Accessed "), /* 0x15 */
1996 STRENTRY("DataDownRW "), /* 0x16 */
1997 STRENTRY("DataDownRW Accessed "), /* 0x17 */
1998 STRENTRY("CodeEO "), /* 0x18 */
1999 STRENTRY("CodeEO Accessed "), /* 0x19 */
2000 STRENTRY("CodeER "), /* 0x1a */
2001 STRENTRY("CodeER Accessed "), /* 0x1b */
2002 STRENTRY("CodeConfEO "), /* 0x1c */
2003 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
2004 STRENTRY("CodeConfER "), /* 0x1e */
2005 STRENTRY("CodeConfER Accessed ") /* 0x1f */
2006 #undef SYSENTRY
2007 };
2008 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
2009 char szMsg[128];
2010 char *psz = &szMsg[0];
2011 unsigned i = Desc.Gen.u1DescType << 4 | Desc.Gen.u4Type;
2012 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
2013 psz += aTypes[i].cch;
2014
2015 if (Desc.Gen.u1Present)
2016 ADD_STR(psz, "Present ");
2017 else
2018 ADD_STR(psz, "Not-Present ");
2019 if (Desc.Gen.u1Granularity)
2020 ADD_STR(psz, "Page ");
2021 if (Desc.Gen.u1DefBig)
2022 ADD_STR(psz, "32-bit ");
2023 else
2024 ADD_STR(psz, "16-bit ");
2025 #undef ADD_STR
2026 *psz = '\0';
2027
2028 /*
2029 * Limit and Base and format the output.
2030 */
2031 uint32_t u32Limit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
2032 if (Desc.Gen.u1Granularity)
2033 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
2034 uint32_t u32Base = Desc.Gen.u8BaseHigh2 << 24 | Desc.Gen.u8BaseHigh1 << 16 | Desc.Gen.u16BaseLow;
2035
2036 RTStrPrintf(pszOutput, cchOutput, "%04x - %08x %08x - base=%08x limit=%08x dpl=%d %s",
2037 Sel, Desc.au32[0], Desc.au32[1], u32Base, u32Limit, Desc.Gen.u2Dpl, szMsg);
2038}
2039
2040
2041/**
2042 * Dumps a descriptor.
2043 *
2044 * @param Desc Descriptor to dump.
2045 * @param Sel Selector number.
2046 * @param pszMsg Message to prepend the log entry with.
2047 */
2048SELMR3DECL(void) SELMR3DumpDescriptor(VBOXDESC Desc, RTSEL Sel, const char *pszMsg)
2049{
2050 char szOutput[128];
2051 selmR3FormatDescriptor(Desc, Sel, &szOutput[0], sizeof(szOutput));
2052 Log(("%s: %s\n", pszMsg, szOutput));
2053 NOREF(szOutput[0]);
2054}
2055
2056
2057/**
2058 * Display the shadow gdt.
2059 *
2060 * @param pVM VM Handle.
2061 * @param pHlp The info helpers.
2062 * @param pszArgs Arguments, ignored.
2063 */
2064static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2065{
2066 pHlp->pfnPrintf(pHlp, "Shadow GDT (GCAddr=%VGv):\n", MMHyperHC2GC(pVM, pVM->selm.s.paGdtHC));
2067 for (unsigned iGDT = 0; iGDT < SELM_GDT_ELEMENTS; iGDT++)
2068 {
2069 if (pVM->selm.s.paGdtHC[iGDT].Gen.u1Present)
2070 {
2071 char szOutput[128];
2072 selmR3FormatDescriptor(pVM->selm.s.paGdtHC[iGDT], iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2073 const char *psz = "";
2074 if (iGDT == ((unsigned)pVM->selm.s.SelCS >> X86_SEL_SHIFT))
2075 psz = " HyperCS";
2076 else if (iGDT == ((unsigned)pVM->selm.s.SelDS >> X86_SEL_SHIFT))
2077 psz = " HyperDS";
2078 else if (iGDT == ((unsigned)pVM->selm.s.SelCS64 >> X86_SEL_SHIFT))
2079 psz = " HyperCS64";
2080 else if (iGDT == ((unsigned)pVM->selm.s.SelTSS >> X86_SEL_SHIFT))
2081 psz = " HyperTSS";
2082 else if (iGDT == ((unsigned)pVM->selm.s.SelTSSTrap08 >> X86_SEL_SHIFT))
2083 psz = " HyperTSSTrap08";
2084 pHlp->pfnPrintf(pHlp, "%s%s\n", szOutput, psz);
2085 }
2086 }
2087}
2088
2089
2090/**
2091 * Display the guest gdt.
2092 *
2093 * @param pVM VM Handle.
2094 * @param pHlp The info helpers.
2095 * @param pszArgs Arguments, ignored.
2096 */
2097static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2098{
2099 VBOXGDTR GDTR;
2100 CPUMGetGuestGDTR(pVM, &GDTR);
2101 RTGCPTR pGDTGC = (RTGCPTR)GDTR.pGdt;
2102 unsigned cGDTs = ((unsigned)GDTR.cbGdt + 1) / sizeof(VBOXDESC);
2103
2104 pHlp->pfnPrintf(pHlp, "Guest GDT (GCAddr=%VGv limit=%x):\n", pGDTGC, GDTR.cbGdt);
2105 for (unsigned iGDT = 0; iGDT < cGDTs; iGDT++, pGDTGC += sizeof(VBOXDESC))
2106 {
2107 VBOXDESC GDTE;
2108 int rc = PGMPhysReadGCPtr(pVM, &GDTE, pGDTGC, sizeof(GDTE));
2109 if (VBOX_SUCCESS(rc))
2110 {
2111 if (GDTE.Gen.u1Present)
2112 {
2113 char szOutput[128];
2114 selmR3FormatDescriptor(GDTE, iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2115 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2116 }
2117 }
2118 else if (rc == VERR_PAGE_NOT_PRESENT)
2119 {
2120 if ((pGDTGC & PAGE_OFFSET_MASK) + sizeof(VBOXDESC) - 1 < sizeof(VBOXDESC))
2121 pHlp->pfnPrintf(pHlp, "%04 - page not present (GCAddr=%VGv)\n", iGDT << X86_SEL_SHIFT, pGDTGC);
2122 }
2123 else
2124 pHlp->pfnPrintf(pHlp, "%04 - read error rc=%Vrc GCAddr=%VGv\n", iGDT << X86_SEL_SHIFT, rc, pGDTGC);
2125 }
2126}
2127
2128
2129/**
2130 * Display the shadow ldt.
2131 *
2132 * @param pVM VM Handle.
2133 * @param pHlp The info helpers.
2134 * @param pszArgs Arguments, ignored.
2135 */
2136static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2137{
2138 unsigned cLDTs = ((unsigned)pVM->selm.s.cbLdtLimit + 1) >> X86_SEL_SHIFT;
2139 PVBOXDESC paLDT = (PVBOXDESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper);
2140 pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=%VGv limit=%d):\n", pVM->selm.s.GCPtrLdt + pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit);
2141 for (unsigned iLDT = 0; iLDT < cLDTs; iLDT++)
2142 {
2143 if (paLDT[iLDT].Gen.u1Present)
2144 {
2145 char szOutput[128];
2146 selmR3FormatDescriptor(paLDT[iLDT], (iLDT << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2147 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2148 }
2149 }
2150}
2151
2152
2153/**
2154 * Display the guest ldt.
2155 *
2156 * @param pVM VM Handle.
2157 * @param pHlp The info helpers.
2158 * @param pszArgs Arguments, ignored.
2159 */
2160static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2161{
2162 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
2163 if (!(SelLdt & X86_SEL_MASK))
2164 {
2165 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): Null-Selector\n", SelLdt);
2166 return;
2167 }
2168
2169 RTGCPTR pLdtGC;
2170 unsigned cbLdt;
2171 int rc = SELMGetLDTFromSel(pVM, SelLdt, &pLdtGC, &cbLdt);
2172 if (VBOX_FAILURE(rc))
2173 {
2174 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): rc=%Vrc\n", SelLdt, rc);
2175 return;
2176 }
2177
2178 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%VGv limit=%x):\n", SelLdt, pLdtGC, cbLdt);
2179 unsigned cLdts = (cbLdt + 1) >> X86_SEL_SHIFT;
2180 for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, pLdtGC += sizeof(VBOXDESC))
2181 {
2182 VBOXDESC LdtE;
2183 int rc = PGMPhysReadGCPtr(pVM, &LdtE, pLdtGC, sizeof(LdtE));
2184 if (VBOX_SUCCESS(rc))
2185 {
2186 if (LdtE.Gen.u1Present)
2187 {
2188 char szOutput[128];
2189 selmR3FormatDescriptor(LdtE, (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2190 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2191 }
2192 }
2193 else if (rc == VERR_PAGE_NOT_PRESENT)
2194 {
2195 if ((pLdtGC & PAGE_OFFSET_MASK) + sizeof(VBOXDESC) - 1 < sizeof(VBOXDESC))
2196 pHlp->pfnPrintf(pHlp, "%04 - page not present (GCAddr=%VGv)\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, pLdtGC);
2197 }
2198 else
2199 pHlp->pfnPrintf(pHlp, "%04 - read error rc=%Vrc GCAddr=%VGv\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, rc, pLdtGC);
2200 }
2201}
2202
2203
2204/**
2205 * Dumps the hypervisor GDT
2206 *
2207 * @param pVM VM handle.
2208 */
2209SELMR3DECL(void) SELMR3DumpHyperGDT(PVM pVM)
2210{
2211 DBGFR3Info(pVM, "gdt", NULL, NULL);
2212}
2213
2214/**
2215 * Dumps the hypervisor LDT
2216 *
2217 * @param pVM VM handle.
2218 */
2219SELMR3DECL(void) SELMR3DumpHyperLDT(PVM pVM)
2220{
2221 DBGFR3Info(pVM, "ldt", NULL, NULL);
2222}
2223
2224/**
2225 * Dumps the guest GDT
2226 *
2227 * @param pVM VM handle.
2228 */
2229SELMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM)
2230{
2231 DBGFR3Info(pVM, "gdtguest", NULL, NULL);
2232}
2233
2234/**
2235 * Dumps the guest LDT
2236 *
2237 * @param pVM VM handle.
2238 */
2239SELMR3DECL(void) SELMR3DumpGuestLDT(PVM pVM)
2240{
2241 DBGFR3Info(pVM, "ldtguest", NULL, NULL);
2242}
2243
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette