VirtualBox

source: vbox/trunk/src/VBox/VMM/SELM.cpp@ 19463

Last change on this file since 19463 was 19463, checked in by vboxsync, 15 years ago

dbgfsel.h,VMM,DBGC: Named the union containing the raw data to shut up gcc warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 101.4 KB
Line 
1/* $Id: SELM.cpp 19463 2009-05-06 20:30:57Z vboxsync $ */
2/** @file
3 * SELM - The Selector Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_selm SELM - The Selector Manager
23 *
24 * SELM takes care of GDT, LDT and TSS shadowing in raw-mode, and the injection
25 * of a few hyper selector for the raw-mode context. In the hardware assisted
26 * virtualization mode its only task is to decode entries in the guest GDT or
27 * LDT once in a while.
28 *
29 * @see grp_selm
30 *
31 *
32 * @section seg_selm_shadowing Shadowing
33 *
34 * SELMR3UpdateFromCPUM() and SELMR3SyncTSS() does the bulk synchronization
35 * work. The three structures (GDT, LDT, TSS) are all shadowed wholesale atm.
36 * The idea is to do it in a more on-demand fashion when we get time. There
37 * also a whole bunch of issues with the current synchronization of all three
38 * tables, see notes and todos in the code.
39 *
40 * When the guest makes changes to the GDT we will try update the shadow copy
41 * without involving SELMR3UpdateFromCPUM(), see selmGCSyncGDTEntry().
42 *
43 * When the guest make LDT changes we'll trigger a full resync of the LDT
44 * (SELMR3UpdateFromCPUM()), which, needless to say, isn't optimal.
45 *
46 * The TSS shadowing is limited to the fields we need to care about, namely SS0
47 * and ESP0. The Patch Manager makes use of these. We monitor updates to the
48 * guest TSS and will try keep our SS0 and ESP0 copies up to date this way
49 * rather than go the SELMR3SyncTSS() route.
50 *
51 * When in raw-mode SELM also injects a few extra GDT selectors which are used
52 * by the raw-mode (hyper) context. These start their life at the high end of
53 * the table and will be relocated when the guest tries to make use of them...
54 * Well, that was that idea at least, only the code isn't quite there yet which
55 * is why we have trouble with guests which actually have a full sized GDT.
56 *
57 * So, the summary of the current GDT, LDT and TSS shadowing is that there is a
58 * lot of relatively simple and enjoyable work to be done, see @bugref{3267}.
59 *
60 */
61
62/*******************************************************************************
63* Header Files *
64*******************************************************************************/
65#define LOG_GROUP LOG_GROUP_SELM
66#include <VBox/selm.h>
67#include <VBox/cpum.h>
68#include <VBox/stam.h>
69#include <VBox/mm.h>
70#include <VBox/ssm.h>
71#include <VBox/pgm.h>
72#include <VBox/trpm.h>
73#include <VBox/dbgf.h>
74#include "SELMInternal.h"
75#include <VBox/vm.h>
76#include <VBox/err.h>
77#include <VBox/param.h>
78
79#include <iprt/assert.h>
80#include <VBox/log.h>
81#include <iprt/asm.h>
82#include <iprt/string.h>
83#include <iprt/thread.h>
84#include <iprt/string.h>
85
86
87/**
88 * Enable or disable tracking of Guest's GDT/LDT/TSS.
89 * @{
90 */
91#define SELM_TRACK_GUEST_GDT_CHANGES
92#define SELM_TRACK_GUEST_LDT_CHANGES
93#define SELM_TRACK_GUEST_TSS_CHANGES
94/** @} */
95
96/**
97 * Enable or disable tracking of Shadow GDT/LDT/TSS.
98 * @{
99 */
100#define SELM_TRACK_SHADOW_GDT_CHANGES
101#define SELM_TRACK_SHADOW_LDT_CHANGES
102#define SELM_TRACK_SHADOW_TSS_CHANGES
103/** @} */
104
105
106/** SELM saved state version. */
107#define SELM_SAVED_STATE_VERSION 5
108
109
110/*******************************************************************************
111* Internal Functions *
112*******************************************************************************/
113static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM);
114static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
115static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
116static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
117static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
118static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
119static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
120static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
121static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
122static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
123//static DECLCALLBACK(void) selmR3InfoTss(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
124//static DECLCALLBACK(void) selmR3InfoTssGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
125
126
127
128/**
129 * Initializes the SELM.
130 *
131 * @returns VBox status code.
132 * @param pVM The VM to operate on.
133 */
134VMMR3DECL(int) SELMR3Init(PVM pVM)
135{
136 LogFlow(("SELMR3Init\n"));
137
138 /*
139 * Assert alignment and sizes.
140 * (The TSS block requires contiguous back.)
141 */
142 AssertCompile(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding)); AssertRelease(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding));
143 AssertCompileMemberAlignment(VM, selm.s, 32); AssertRelease(!(RT_OFFSETOF(VM, selm.s) & 31));
144#if 0 /* doesn't work */
145 AssertCompile((RT_OFFSETOF(VM, selm.s.Tss) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.Tss));
146 AssertCompile((RT_OFFSETOF(VM, selm.s.TssTrap08) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.TssTrap08));
147#endif
148 AssertRelease((RT_OFFSETOF(VM, selm.s.Tss) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.Tss));
149 AssertRelease((RT_OFFSETOF(VM, selm.s.TssTrap08) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.TssTrap08));
150 AssertRelease(sizeof(pVM->selm.s.Tss.IntRedirBitmap) == 0x20);
151
152 /*
153 * Init the structure.
154 */
155 pVM->selm.s.offVM = RT_OFFSETOF(VM, selm);
156 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] = (SELM_GDT_ELEMENTS - 0x1) << 3;
157 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] = (SELM_GDT_ELEMENTS - 0x2) << 3;
158 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] = (SELM_GDT_ELEMENTS - 0x3) << 3;
159 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = (SELM_GDT_ELEMENTS - 0x4) << 3;
160 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = (SELM_GDT_ELEMENTS - 0x5) << 3;
161
162 /*
163 * Allocate GDT table.
164 */
165 int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtR3[0]) * SELM_GDT_ELEMENTS,
166 PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdtR3);
167 AssertRCReturn(rc, rc);
168
169 /*
170 * Allocate LDT area.
171 */
172 rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.pvLdtR3);
173 AssertRCReturn(rc, rc);
174
175 /*
176 * Init Guest's and Shadow GDT, LDT, TSS changes control variables.
177 */
178 pVM->selm.s.cbEffGuestGdtLimit = 0;
179 pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX;
180 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
181 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
182
183 pVM->selm.s.paGdtRC = NIL_RTRCPTR; /* Must be set in SELMR3Relocate because of monitoring. */
184 pVM->selm.s.pvLdtRC = RTRCPTR_MAX;
185 pVM->selm.s.pvMonShwTssRC = RTRCPTR_MAX;
186 pVM->selm.s.GCSelTss = RTSEL_MAX;
187
188 pVM->selm.s.fDisableMonitoring = false;
189 pVM->selm.s.fSyncTSSRing0Stack = false;
190
191 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. Outside the TSS on purpose; the CPU will not check it
192 * for I/O operations. */
193 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
194 /* bit set to 1 means no redirection */
195 memset(pVM->selm.s.Tss.IntRedirBitmap, 0xff, sizeof(pVM->selm.s.Tss.IntRedirBitmap));
196
197 /*
198 * Register the saved state data unit.
199 */
200 rc = SSMR3RegisterInternal(pVM, "selm", 1, SELM_SAVED_STATE_VERSION, sizeof(SELM),
201 NULL, selmR3Save, NULL,
202 NULL, selmR3Load, selmR3LoadDone);
203 if (RT_FAILURE(rc))
204 return rc;
205
206 /*
207 * Statistics.
208 */
209 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT.");
210 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT.");
211 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected.");
212 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS.");
213 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSRedir, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES, "The number of handled redir bitmap writes to the Guest TSS.");
214 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
215 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS.");
216 STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
217 STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
218
219 STAM_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors.");
220 STAM_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors.");
221
222 /*
223 * Default action when entering raw mode for the first time
224 */
225 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
226 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
227 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
228 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
229
230 /*
231 * Register info handlers.
232 */
233 DBGFR3InfoRegisterInternal(pVM, "gdt", "Displays the shadow GDT. No arguments.", &selmR3InfoGdt);
234 DBGFR3InfoRegisterInternal(pVM, "gdtguest", "Displays the guest GDT. No arguments.", &selmR3InfoGdtGuest);
235 DBGFR3InfoRegisterInternal(pVM, "ldt", "Displays the shadow LDT. No arguments.", &selmR3InfoLdt);
236 DBGFR3InfoRegisterInternal(pVM, "ldtguest", "Displays the guest LDT. No arguments.", &selmR3InfoLdtGuest);
237 //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss);
238 //DBGFR3InfoRegisterInternal(pVM, "tssguest", "Displays the guest TSS. No arguments.", &selmR3InfoTssGuest);
239
240 return rc;
241}
242
243
244/**
245 * Finalizes HMA page attributes.
246 *
247 * @returns VBox status code.
248 * @param pVM The VM handle.
249 */
250VMMR3DECL(int) SELMR3InitFinalize(PVM pVM)
251{
252 /** @cfgm{/DoubleFault,bool,false}
253 * Enables catching of double faults in the raw-mode context VMM code. This can
254 * be used when the tripple faults or hangs occure and one suspect an unhandled
255 * double fault. This is not enabled by default because it means making the
256 * hyper selectors writeable for all supervisor code, including the guest's.
257 * The double fault is a task switch and thus requires write access to the GDT
258 * of the TSS (to set it busy), to the old TSS (to store state), and to the Trap
259 * 8 TSS for the back link.
260 */
261 bool f;
262#if defined(DEBUG_bird)
263 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, true);
264#else
265 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, false);
266#endif
267 AssertLogRelRCReturn(rc, rc);
268 if (f)
269 {
270 PX86DESC paGdt = pVM->selm.s.paGdtR3;
271 rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3]), sizeof(paGdt[0]),
272 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
273 AssertRC(rc);
274 rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3]), sizeof(paGdt[0]),
275 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
276 AssertRC(rc);
277 rc = PGMMapSetPage(pVM, VM_RC_ADDR(pVM, &pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]), sizeof(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]),
278 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
279 AssertRC(rc);
280 rc = PGMMapSetPage(pVM, VM_RC_ADDR(pVM, &pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]), sizeof(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]),
281 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
282 AssertRC(rc);
283 }
284 return VINF_SUCCESS;
285}
286
287
288/**
289 * Setup the hypervisor GDT selectors in our shadow table
290 *
291 * @param pVM The VM handle.
292 */
293static void selmR3SetupHyperGDTSelectors(PVM pVM)
294{
295 PX86DESC paGdt = pVM->selm.s.paGdtR3;
296
297 /*
298 * Set up global code and data descriptors for use in the guest context.
299 * Both are wide open (base 0, limit 4GB)
300 */
301 PX86DESC pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] >> 3];
302 pDesc->Gen.u16LimitLow = 0xffff;
303 pDesc->Gen.u4LimitHigh = 0xf;
304 pDesc->Gen.u16BaseLow = 0;
305 pDesc->Gen.u8BaseHigh1 = 0;
306 pDesc->Gen.u8BaseHigh2 = 0;
307 pDesc->Gen.u4Type = X86_SEL_TYPE_ER_ACC;
308 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
309 pDesc->Gen.u2Dpl = 0; /* supervisor */
310 pDesc->Gen.u1Present = 1;
311 pDesc->Gen.u1Available = 0;
312 pDesc->Gen.u1Long = 0;
313 pDesc->Gen.u1DefBig = 1; /* def 32 bit */
314 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
315
316 /* data */
317 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] >> 3];
318 pDesc->Gen.u16LimitLow = 0xffff;
319 pDesc->Gen.u4LimitHigh = 0xf;
320 pDesc->Gen.u16BaseLow = 0;
321 pDesc->Gen.u8BaseHigh1 = 0;
322 pDesc->Gen.u8BaseHigh2 = 0;
323 pDesc->Gen.u4Type = X86_SEL_TYPE_RW_ACC;
324 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
325 pDesc->Gen.u2Dpl = 0; /* supervisor */
326 pDesc->Gen.u1Present = 1;
327 pDesc->Gen.u1Available = 0;
328 pDesc->Gen.u1Long = 0;
329 pDesc->Gen.u1DefBig = 1; /* big */
330 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
331
332 /* 64-bit mode code (& data?) */
333 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] >> 3];
334 pDesc->Gen.u16LimitLow = 0xffff;
335 pDesc->Gen.u4LimitHigh = 0xf;
336 pDesc->Gen.u16BaseLow = 0;
337 pDesc->Gen.u8BaseHigh1 = 0;
338 pDesc->Gen.u8BaseHigh2 = 0;
339 pDesc->Gen.u4Type = X86_SEL_TYPE_ER_ACC;
340 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
341 pDesc->Gen.u2Dpl = 0; /* supervisor */
342 pDesc->Gen.u1Present = 1;
343 pDesc->Gen.u1Available = 0;
344 pDesc->Gen.u1Long = 1; /* The Long (L) attribute bit. */
345 pDesc->Gen.u1DefBig = 0; /* With L=1 this must be 0. */
346 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
347
348 /*
349 * TSS descriptor
350 */
351 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3];
352 RTRCPTR RCPtrTSS = VM_RC_ADDR(pVM, &pVM->selm.s.Tss);
353 pDesc->Gen.u16BaseLow = RT_LOWORD(RCPtrTSS);
354 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(RCPtrTSS);
355 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(RCPtrTSS);
356 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
357 pDesc->Gen.u4LimitHigh = 0;
358 pDesc->Gen.u4Type = X86_SEL_TYPE_SYS_386_TSS_AVAIL;
359 pDesc->Gen.u1DescType = 0; /* system */
360 pDesc->Gen.u2Dpl = 0; /* supervisor */
361 pDesc->Gen.u1Present = 1;
362 pDesc->Gen.u1Available = 0;
363 pDesc->Gen.u1Long = 0;
364 pDesc->Gen.u1DefBig = 0;
365 pDesc->Gen.u1Granularity = 0; /* byte limit */
366
367 /*
368 * TSS descriptor for trap 08
369 */
370 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3];
371 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
372 pDesc->Gen.u4LimitHigh = 0;
373 RCPtrTSS = VM_RC_ADDR(pVM, &pVM->selm.s.TssTrap08);
374 pDesc->Gen.u16BaseLow = RT_LOWORD(RCPtrTSS);
375 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(RCPtrTSS);
376 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(RCPtrTSS);
377 pDesc->Gen.u4Type = X86_SEL_TYPE_SYS_386_TSS_AVAIL;
378 pDesc->Gen.u1DescType = 0; /* system */
379 pDesc->Gen.u2Dpl = 0; /* supervisor */
380 pDesc->Gen.u1Present = 1;
381 pDesc->Gen.u1Available = 0;
382 pDesc->Gen.u1Long = 0;
383 pDesc->Gen.u1DefBig = 0;
384 pDesc->Gen.u1Granularity = 0; /* byte limit */
385}
386
387/**
388 * Applies relocations to data and code managed by this
389 * component. This function will be called at init and
390 * whenever the VMM need to relocate it self inside the GC.
391 *
392 * @param pVM The VM.
393 */
394VMMR3DECL(void) SELMR3Relocate(PVM pVM)
395{
396 PX86DESC paGdt = pVM->selm.s.paGdtR3;
397 LogFlow(("SELMR3Relocate\n"));
398
399 for (unsigned i=0;i<pVM->cCPUs;i++)
400 {
401 PVMCPU pVCpu = &pVM->aCpus[i];
402
403 /*
404 * Update GDTR and selector.
405 */
406 CPUMSetHyperGDTR(pVCpu, MMHyperR3ToRC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1);
407
408 /** @todo selector relocations should be a seperate operation? */
409 CPUMSetHyperCS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]);
410 CPUMSetHyperDS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
411 CPUMSetHyperES(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
412 CPUMSetHyperSS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
413 CPUMSetHyperTR(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]);
414 }
415
416 selmR3SetupHyperGDTSelectors(pVM);
417
418/** @todo SELM must be called when any of the CR3s changes during a cpu mode change. */
419/** @todo PGM knows the proper CR3 values these days, not CPUM. */
420 /*
421 * Update the TSSes.
422 */
423 /* Only applies to raw mode which supports only 1 VCPU */
424 PVMCPU pVCpu = &pVM->aCpus[0];
425
426 /* Current TSS */
427 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
428 pVM->selm.s.Tss.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
429 pVM->selm.s.Tss.esp0 = VMMGetStackRC(pVM);
430 pVM->selm.s.Tss.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
431 pVM->selm.s.Tss.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
432 pVM->selm.s.Tss.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
433 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
434
435 /* trap 08 */
436 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu); /* this should give use better survival chances. */
437 pVM->selm.s.TssTrap08.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
438 pVM->selm.s.TssTrap08.ss = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
439 pVM->selm.s.TssTrap08.esp0 = VMMGetStackRC(pVM) - PAGE_SIZE / 2; /* upper half can be analysed this way. */
440 pVM->selm.s.TssTrap08.esp = pVM->selm.s.TssTrap08.esp0;
441 pVM->selm.s.TssTrap08.ebp = pVM->selm.s.TssTrap08.esp0;
442 pVM->selm.s.TssTrap08.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
443 pVM->selm.s.TssTrap08.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
444 pVM->selm.s.TssTrap08.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
445 pVM->selm.s.TssTrap08.fs = 0;
446 pVM->selm.s.TssTrap08.gs = 0;
447 pVM->selm.s.TssTrap08.selLdt = 0;
448 pVM->selm.s.TssTrap08.eflags = 0x2; /* all cleared */
449 pVM->selm.s.TssTrap08.ecx = VM_RC_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */
450 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.ecx;
451 pVM->selm.s.TssTrap08.eax = pVM->selm.s.TssTrap08.ecx;
452 pVM->selm.s.TssTrap08.edx = VM_RC_ADDR(pVM, pVM); /* setup edx VM address. */
453 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.edx;
454 pVM->selm.s.TssTrap08.ebx = pVM->selm.s.TssTrap08.edx;
455 pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);
456 /* TRPM will be updating the eip */
457
458 if (!pVM->selm.s.fDisableMonitoring)
459 {
460 /*
461 * Update shadow GDT/LDT/TSS write access handlers.
462 */
463 int rc;
464#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
465 if (pVM->selm.s.paGdtRC != NIL_RTRCPTR)
466 {
467 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtRC);
468 AssertRC(rc);
469 }
470 pVM->selm.s.paGdtRC = MMHyperR3ToRC(pVM, paGdt);
471 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.paGdtRC,
472 pVM->selm.s.paGdtRC + SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1,
473 0, 0, "selmRCShadowGDTWriteHandler", 0, "Shadow GDT write access handler");
474 AssertRC(rc);
475#endif
476#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
477 if (pVM->selm.s.pvMonShwTssRC != RTRCPTR_MAX)
478 {
479 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvMonShwTssRC);
480 AssertRC(rc);
481 }
482 pVM->selm.s.pvMonShwTssRC = VM_RC_ADDR(pVM, &pVM->selm.s.Tss);
483 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.pvMonShwTssRC,
484 pVM->selm.s.pvMonShwTssRC + sizeof(pVM->selm.s.Tss) - 1,
485 0, 0, "selmRCShadowTSSWriteHandler", 0, "Shadow TSS write access handler");
486 AssertRC(rc);
487#endif
488
489 /*
490 * Update the GC LDT region handler and address.
491 */
492#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
493 if (pVM->selm.s.pvLdtRC != RTRCPTR_MAX)
494 {
495 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvLdtRC);
496 AssertRC(rc);
497 }
498#endif
499 pVM->selm.s.pvLdtRC = MMHyperR3ToRC(pVM, pVM->selm.s.pvLdtR3);
500#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
501 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.pvLdtRC,
502 pVM->selm.s.pvLdtRC + _64K + PAGE_SIZE - 1,
503 0, 0, "selmRCShadowLDTWriteHandler", 0, "Shadow LDT write access handler");
504 AssertRC(rc);
505#endif
506 }
507}
508
509
510/**
511 * Terminates the SELM.
512 *
513 * Termination means cleaning up and freeing all resources,
514 * the VM it self is at this point powered off or suspended.
515 *
516 * @returns VBox status code.
517 * @param pVM The VM to operate on.
518 */
519VMMR3DECL(int) SELMR3Term(PVM pVM)
520{
521 return 0;
522}
523
524
525/**
526 * The VM is being reset.
527 *
528 * For the SELM component this means that any GDT/LDT/TSS monitors
529 * needs to be removed.
530 *
531 * @param pVM VM handle.
532 */
533VMMR3DECL(void) SELMR3Reset(PVM pVM)
534{
535 LogFlow(("SELMR3Reset:\n"));
536 VM_ASSERT_EMT(pVM);
537
538 /*
539 * Uninstall guest GDT/LDT/TSS write access handlers.
540 */
541 int rc;
542#ifdef SELM_TRACK_GUEST_GDT_CHANGES
543 if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)
544 {
545 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
546 AssertRC(rc);
547 pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX;
548 pVM->selm.s.GuestGdtr.cbGdt = 0;
549 }
550 pVM->selm.s.fGDTRangeRegistered = false;
551#endif
552#ifdef SELM_TRACK_GUEST_LDT_CHANGES
553 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
554 {
555 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
556 AssertRC(rc);
557 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
558 }
559#endif
560#ifdef SELM_TRACK_GUEST_TSS_CHANGES
561 if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX)
562 {
563 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
564 AssertRC(rc);
565 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
566 pVM->selm.s.GCSelTss = RTSEL_MAX;
567 }
568#endif
569
570 /*
571 * Re-initialize other members.
572 */
573 pVM->selm.s.cbLdtLimit = 0;
574 pVM->selm.s.offLdtHyper = 0;
575 pVM->selm.s.cbMonitoredGuestTss = 0;
576
577 pVM->selm.s.fSyncTSSRing0Stack = false;
578
579 /*
580 * Default action when entering raw mode for the first time
581 */
582 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
583 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
584 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
585 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
586}
587
588/**
589 * Disable GDT/LDT/TSS monitoring and syncing
590 *
591 * @param pVM The VM to operate on.
592 */
593VMMR3DECL(void) SELMR3DisableMonitoring(PVM pVM)
594{
595 /*
596 * Uninstall guest GDT/LDT/TSS write access handlers.
597 */
598 int rc;
599#ifdef SELM_TRACK_GUEST_GDT_CHANGES
600 if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)
601 {
602 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
603 AssertRC(rc);
604 pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX;
605 pVM->selm.s.GuestGdtr.cbGdt = 0;
606 }
607 pVM->selm.s.fGDTRangeRegistered = false;
608#endif
609#ifdef SELM_TRACK_GUEST_LDT_CHANGES
610 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
611 {
612 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
613 AssertRC(rc);
614 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
615 }
616#endif
617#ifdef SELM_TRACK_GUEST_TSS_CHANGES
618 if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX)
619 {
620 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
621 AssertRC(rc);
622 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
623 pVM->selm.s.GCSelTss = RTSEL_MAX;
624 }
625#endif
626
627 /*
628 * Unregister shadow GDT/LDT/TSS write access handlers.
629 */
630#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
631 if (pVM->selm.s.paGdtRC != NIL_RTRCPTR)
632 {
633 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtRC);
634 AssertRC(rc);
635 pVM->selm.s.paGdtRC = NIL_RTRCPTR;
636 }
637#endif
638#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
639 if (pVM->selm.s.pvMonShwTssRC != RTRCPTR_MAX)
640 {
641 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvMonShwTssRC);
642 AssertRC(rc);
643 pVM->selm.s.pvMonShwTssRC = RTRCPTR_MAX;
644 }
645#endif
646#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
647 if (pVM->selm.s.pvLdtRC != RTRCPTR_MAX)
648 {
649 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvLdtRC);
650 AssertRC(rc);
651 pVM->selm.s.pvLdtRC = RTRCPTR_MAX;
652 }
653#endif
654
655 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
656 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
657 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
658 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
659
660 pVM->selm.s.fDisableMonitoring = true;
661}
662
663
664/**
665 * Execute state save operation.
666 *
667 * @returns VBox status code.
668 * @param pVM VM Handle.
669 * @param pSSM SSM operation handle.
670 */
671static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM)
672{
673 LogFlow(("selmR3Save:\n"));
674
675 /*
676 * Save the basic bits - fortunately all the other things can be resynced on load.
677 */
678 PSELM pSelm = &pVM->selm.s;
679
680 SSMR3PutBool(pSSM, pSelm->fDisableMonitoring);
681 SSMR3PutBool(pSSM, pSelm->fSyncTSSRing0Stack);
682 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS]);
683 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_DS]);
684 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]);
685 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]); /* reserved for DS64. */
686 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_TSS]);
687 return SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]);
688}
689
690
691/**
692 * Execute state load operation.
693 *
694 * @returns VBox status code.
695 * @param pVM VM Handle.
696 * @param pSSM SSM operation handle.
697 * @param u32Version Data layout version.
698 */
699static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
700{
701 LogFlow(("selmR3Load:\n"));
702
703 /*
704 * Validate version.
705 */
706 if (u32Version != SELM_SAVED_STATE_VERSION)
707 {
708 AssertMsgFailed(("selmR3Load: Invalid version u32Version=%d!\n", u32Version));
709 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
710 }
711
712 /*
713 * Do a reset.
714 */
715 SELMR3Reset(pVM);
716
717 /* Get the monitoring flag. */
718 SSMR3GetBool(pSSM, &pVM->selm.s.fDisableMonitoring);
719
720 /* Get the TSS state flag. */
721 SSMR3GetBool(pSSM, &pVM->selm.s.fSyncTSSRing0Stack);
722
723 /*
724 * Get the selectors.
725 */
726 RTSEL SelCS;
727 SSMR3GetSel(pSSM, &SelCS);
728 RTSEL SelDS;
729 SSMR3GetSel(pSSM, &SelDS);
730 RTSEL SelCS64;
731 SSMR3GetSel(pSSM, &SelCS64);
732 RTSEL SelDS64;
733 SSMR3GetSel(pSSM, &SelDS64);
734 RTSEL SelTSS;
735 SSMR3GetSel(pSSM, &SelTSS);
736 RTSEL SelTSSTrap08;
737 SSMR3GetSel(pSSM, &SelTSSTrap08);
738
739 /* Copy the selectors; they will be checked during relocation. */
740 PSELM pSelm = &pVM->selm.s;
741 pSelm->aHyperSel[SELM_HYPER_SEL_CS] = SelCS;
742 pSelm->aHyperSel[SELM_HYPER_SEL_DS] = SelDS;
743 pSelm->aHyperSel[SELM_HYPER_SEL_CS64] = SelCS64;
744 pSelm->aHyperSel[SELM_HYPER_SEL_TSS] = SelTSS;
745 pSelm->aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SelTSSTrap08;
746
747 return VINF_SUCCESS;
748}
749
750
751/**
752 * Sync the GDT, LDT and TSS after loading the state.
753 *
754 * Just to play save, we set the FFs to force syncing before
755 * executing GC code.
756 *
757 * @returns VBox status code.
758 * @param pVM VM Handle.
759 * @param pSSM SSM operation handle.
760 */
761static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
762{
763 PVMCPU pVCpu = VMMGetCpu(pVM);
764
765 LogFlow(("selmR3LoadDone:\n"));
766
767 /*
768 * Don't do anything if it's a load failure.
769 */
770 int rc = SSMR3HandleGetStatus(pSSM);
771 if (RT_FAILURE(rc))
772 return VINF_SUCCESS;
773
774 /*
775 * Do the syncing if we're in protected mode.
776 */
777 if (PGMGetGuestMode(pVCpu) != PGMMODE_REAL)
778 {
779 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
780 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
781 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
782 SELMR3UpdateFromCPUM(pVM, pVCpu);
783 }
784
785 /*
786 * Flag everything for resync on next raw mode entry.
787 */
788 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
789 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
790 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
791
792 return VINF_SUCCESS;
793}
794
795
796/**
797 * Updates the Guest GDT & LDT virtualization based on current CPU state.
798 *
799 * @returns VBox status code.
800 * @param pVM The VM to operate on.
801 * @param pVCpu The VMCPU to operate on.
802 */
803VMMR3DECL(int) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu)
804{
805 int rc = VINF_SUCCESS;
806
807 if (pVM->selm.s.fDisableMonitoring)
808 {
809 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
810 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
811 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
812
813 return VINF_SUCCESS;
814 }
815
816 STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a);
817
818 /*
819 * GDT sync
820 */
821 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
822 {
823 /*
824 * Always assume the best
825 */
826 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
827
828 /* If the GDT was changed, then make sure the LDT is checked too */
829 /** @todo only do this if the actual ldtr selector was changed; this is a bit excessive */
830 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
831 /* Same goes for the TSS selector */
832 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
833
834 /*
835 * Get the GDTR and check if there is anything to do (there usually is).
836 */
837 VBOXGDTR GDTR;
838 CPUMGetGuestGDTR(pVCpu, &GDTR);
839 if (GDTR.cbGdt < sizeof(X86DESC))
840 {
841 Log(("No GDT entries...\n"));
842 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
843 return VINF_SUCCESS;
844 }
845
846 /*
847 * Read the Guest GDT.
848 * ASSUMES that the entire GDT is in memory.
849 */
850 RTUINT cbEffLimit = GDTR.cbGdt;
851 PX86DESC pGDTE = &pVM->selm.s.paGdtR3[1];
852 rc = PGMPhysSimpleReadGCPtr(pVCpu, pGDTE, GDTR.pGdt + sizeof(X86DESC), cbEffLimit + 1 - sizeof(X86DESC));
853 if (RT_FAILURE(rc))
854 {
855 /*
856 * Read it page by page.
857 *
858 * Keep track of the last valid page and delay memsets and
859 * adjust cbEffLimit to reflect the effective size. The latter
860 * is something we do in the belief that the guest will probably
861 * never actually commit the last page, thus allowing us to keep
862 * our selectors in the high end of the GDT.
863 */
864 RTUINT cbLeft = cbEffLimit + 1 - sizeof(X86DESC);
865 RTGCPTR GCPtrSrc = (RTGCPTR)GDTR.pGdt + sizeof(X86DESC);
866 uint8_t *pu8Dst = (uint8_t *)&pVM->selm.s.paGdtR3[1];
867 uint8_t *pu8DstInvalid = pu8Dst;
868
869 while (cbLeft)
870 {
871 RTUINT cb = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
872 cb = RT_MIN(cb, cbLeft);
873 rc = PGMPhysSimpleReadGCPtr(pVCpu, pu8Dst, GCPtrSrc, cb);
874 if (RT_SUCCESS(rc))
875 {
876 if (pu8DstInvalid != pu8Dst)
877 memset(pu8DstInvalid, 0, pu8Dst - pu8DstInvalid);
878 GCPtrSrc += cb;
879 pu8Dst += cb;
880 pu8DstInvalid = pu8Dst;
881 }
882 else if ( rc == VERR_PAGE_NOT_PRESENT
883 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
884 {
885 GCPtrSrc += cb;
886 pu8Dst += cb;
887 }
888 else
889 {
890 AssertReleaseMsgFailed(("Couldn't read GDT at %016RX64, rc=%Rrc!\n", GDTR.pGdt, rc));
891 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
892 return VERR_NOT_IMPLEMENTED;
893 }
894 cbLeft -= cb;
895 }
896
897 /* any invalid pages at the end? */
898 if (pu8DstInvalid != pu8Dst)
899 {
900 cbEffLimit = pu8DstInvalid - (uint8_t *)pVM->selm.s.paGdtR3 - 1;
901 /* If any GDTEs was invalidated, zero them. */
902 if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit)
903 memset(pu8DstInvalid + cbEffLimit + 1, 0, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit);
904 }
905
906 /* keep track of the effective limit. */
907 if (cbEffLimit != pVM->selm.s.cbEffGuestGdtLimit)
908 {
909 Log(("SELMR3UpdateFromCPUM: cbEffGuestGdtLimit=%#x -> %#x (actual %#x)\n",
910 pVM->selm.s.cbEffGuestGdtLimit, cbEffLimit, GDTR.cbGdt));
911 pVM->selm.s.cbEffGuestGdtLimit = cbEffLimit;
912 }
913 }
914
915 /*
916 * Check if the Guest GDT intrudes on our GDT entries.
917 */
918 /** @todo we should try to minimize relocations by making sure our current selectors can be reused. */
919 RTSEL aHyperSel[SELM_HYPER_SEL_MAX];
920 if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
921 {
922 PX86DESC pGDTEStart = pVM->selm.s.paGdtR3;
923 PX86DESC pGDTE = (PX86DESC)((char *)pGDTEStart + GDTR.cbGdt + 1 - sizeof(X86DESC));
924 int iGDT = 0;
925
926 Log(("Internal SELM GDT conflict: use non-present entries\n"));
927 STAM_COUNTER_INC(&pVM->selm.s.StatScanForHyperSels);
928 while (pGDTE > pGDTEStart)
929 {
930 /* We can reuse non-present entries */
931 if (!pGDTE->Gen.u1Present)
932 {
933 aHyperSel[iGDT] = ((uintptr_t)pGDTE - (uintptr_t)pVM->selm.s.paGdtR3) / sizeof(X86DESC);
934 aHyperSel[iGDT] = aHyperSel[iGDT] << X86_SEL_SHIFT;
935 Log(("SELM: Found unused GDT %04X\n", aHyperSel[iGDT]));
936 iGDT++;
937 if (iGDT >= SELM_HYPER_SEL_MAX)
938 break;
939 }
940
941 pGDTE--;
942 }
943 if (iGDT != SELM_HYPER_SEL_MAX)
944 {
945 AssertReleaseMsgFailed(("Internal SELM GDT conflict.\n"));
946 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
947 return VERR_NOT_IMPLEMENTED;
948 }
949 }
950 else
951 {
952 aHyperSel[SELM_HYPER_SEL_CS] = SELM_HYPER_DEFAULT_SEL_CS;
953 aHyperSel[SELM_HYPER_SEL_DS] = SELM_HYPER_DEFAULT_SEL_DS;
954 aHyperSel[SELM_HYPER_SEL_CS64] = SELM_HYPER_DEFAULT_SEL_CS64;
955 aHyperSel[SELM_HYPER_SEL_TSS] = SELM_HYPER_DEFAULT_SEL_TSS;
956 aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SELM_HYPER_DEFAULT_SEL_TSS_TRAP08;
957 }
958
959 /*
960 * Work thru the copied GDT entries adjusting them for correct virtualization.
961 */
962 PX86DESC pGDTEEnd = (PX86DESC)((char *)pGDTE + cbEffLimit + 1 - sizeof(X86DESC));
963 while (pGDTE < pGDTEEnd)
964 {
965 if (pGDTE->Gen.u1Present)
966 {
967 /*
968 * Code and data selectors are generally 1:1, with the
969 * 'little' adjustment we do for DPL 0 selectors.
970 */
971 if (pGDTE->Gen.u1DescType)
972 {
973 /*
974 * Hack for A-bit against Trap E on read-only GDT.
975 */
976 /** @todo Fix this by loading ds and cs before turning off WP. */
977 pGDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
978
979 /*
980 * All DPL 0 code and data segments are squeezed into DPL 1.
981 *
982 * We're skipping conforming segments here because those
983 * cannot give us any trouble.
984 */
985 if ( pGDTE->Gen.u2Dpl == 0
986 && (pGDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
987 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
988 pGDTE->Gen.u2Dpl = 1;
989 }
990 else
991 {
992 /*
993 * System type selectors are marked not present.
994 * Recompiler or special handling is required for these.
995 */
996 /** @todo what about interrupt gates and rawr0? */
997 pGDTE->Gen.u1Present = 0;
998 }
999 }
1000
1001 /* Next GDT entry. */
1002 pGDTE++;
1003 }
1004
1005 /*
1006 * Check if our hypervisor selectors were changed.
1007 */
1008 if ( aHyperSel[SELM_HYPER_SEL_CS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]
1009 || aHyperSel[SELM_HYPER_SEL_DS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]
1010 || aHyperSel[SELM_HYPER_SEL_CS64] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64]
1011 || aHyperSel[SELM_HYPER_SEL_TSS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]
1012 || aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08])
1013 {
1014 /* Reinitialize our hypervisor GDTs */
1015 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] = aHyperSel[SELM_HYPER_SEL_CS];
1016 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] = aHyperSel[SELM_HYPER_SEL_DS];
1017 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] = aHyperSel[SELM_HYPER_SEL_CS64];
1018 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = aHyperSel[SELM_HYPER_SEL_TSS];
1019 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1020
1021 STAM_COUNTER_INC(&pVM->selm.s.StatHyperSelsChanged);
1022
1023 /*
1024 * Do the relocation callbacks to let everyone update their hyper selector dependencies.
1025 * (SELMR3Relocate will call selmR3SetupHyperGDTSelectors() for us.)
1026 */
1027 VMR3Relocate(pVM, 0);
1028 }
1029 else if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
1030 /* We overwrote all entries above, so we have to save them again. */
1031 selmR3SetupHyperGDTSelectors(pVM);
1032
1033 /*
1034 * Adjust the cached GDT limit.
1035 * Any GDT entries which have been removed must be cleared.
1036 */
1037 if (pVM->selm.s.GuestGdtr.cbGdt != GDTR.cbGdt)
1038 {
1039 if (pVM->selm.s.GuestGdtr.cbGdt > GDTR.cbGdt)
1040 memset(pGDTE, 0, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt);
1041#ifndef SELM_TRACK_GUEST_GDT_CHANGES
1042 pVM->selm.s.GuestGdtr.cbGdt = GDTR.cbGdt;
1043#endif
1044 }
1045
1046#ifdef SELM_TRACK_GUEST_GDT_CHANGES
1047 /*
1048 * Check if Guest's GDTR is changed.
1049 */
1050 if ( GDTR.pGdt != pVM->selm.s.GuestGdtr.pGdt
1051 || GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
1052 {
1053 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%016RX64 cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt));
1054
1055 /*
1056 * [Re]Register write virtual handler for guest's GDT.
1057 */
1058 if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)
1059 {
1060 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
1061 AssertRC(rc);
1062 }
1063
1064 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,
1065 0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0, "Guest GDT write access handler");
1066 if (RT_FAILURE(rc))
1067 return rc;
1068
1069 /* Update saved Guest GDTR. */
1070 pVM->selm.s.GuestGdtr = GDTR;
1071 pVM->selm.s.fGDTRangeRegistered = true;
1072 }
1073#endif
1074 }
1075
1076 /*
1077 * TSS sync
1078 */
1079 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
1080 {
1081 SELMR3SyncTSS(pVM, pVCpu);
1082 }
1083
1084 /*
1085 * LDT sync
1086 */
1087 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_LDT))
1088 {
1089 /*
1090 * Always assume the best
1091 */
1092 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
1093
1094 /*
1095 * LDT handling is done similarly to the GDT handling with a shadow
1096 * array. However, since the LDT is expected to be swappable (at least
1097 * some ancient OSes makes it swappable) it must be floating and
1098 * synced on a per-page basis.
1099 *
1100 * Eventually we will change this to be fully on demand. Meaning that
1101 * we will only sync pages containing LDT selectors actually used and
1102 * let the #PF handler lazily sync pages as they are used.
1103 * (This applies to GDT too, when we start making OS/2 fast.)
1104 */
1105
1106 /*
1107 * First, determin the current LDT selector.
1108 */
1109 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
1110 if ((SelLdt & X86_SEL_MASK) == 0)
1111 {
1112 /* ldtr = 0 - update hyper LDTR and deregister any active handler. */
1113 CPUMSetHyperLDTR(pVCpu, 0);
1114#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1115 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
1116 {
1117 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1118 AssertRC(rc);
1119 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
1120 }
1121#endif
1122 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1123 return VINF_SUCCESS;
1124 }
1125
1126 /*
1127 * Get the LDT selector.
1128 */
1129 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelLdt >> X86_SEL_SHIFT];
1130 RTGCPTR GCPtrLdt = X86DESC_BASE(*pDesc);
1131 unsigned cbLdt = X86DESC_LIMIT(*pDesc);
1132 if (pDesc->Gen.u1Granularity)
1133 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1134
1135 /*
1136 * Validate it.
1137 */
1138 if ( !cbLdt
1139 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt
1140 || pDesc->Gen.u1DescType
1141 || pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1142 {
1143 AssertMsg(!cbLdt, ("Invalid LDT %04x!\n", SelLdt));
1144
1145 /* cbLdt > 0:
1146 * This is quite impossible, so we do as most people do when faced with
1147 * the impossible, we simply ignore it.
1148 */
1149 CPUMSetHyperLDTR(pVCpu, 0);
1150#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1151 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
1152 {
1153 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1154 AssertRC(rc);
1155 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
1156 }
1157#endif
1158 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1159 return VINF_SUCCESS;
1160 }
1161 /** @todo check what intel does about odd limits. */
1162 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(X86DESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1163
1164 /*
1165 * Use the cached guest ldt address if the descriptor has already been modified (see below)
1166 * (this is necessary due to redundant LDT updates; see todo above at GDT sync)
1167 */
1168 if (MMHyperIsInsideArea(pVM, GCPtrLdt))
1169 GCPtrLdt = pVM->selm.s.GCPtrGuestLdt; /* use the old one */
1170
1171
1172#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1173 /** @todo Handle only present LDT segments. */
1174 // if (pDesc->Gen.u1Present)
1175 {
1176 /*
1177 * Check if Guest's LDT address/limit is changed.
1178 */
1179 if ( GCPtrLdt != pVM->selm.s.GCPtrGuestLdt
1180 || cbLdt != pVM->selm.s.cbLdtLimit)
1181 {
1182 Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %RGv:%04x to %RGv:%04x. (GDTR=%016RX64:%04x)\n",
1183 pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1184
1185 /*
1186 * [Re]Register write virtual handler for guest's GDT.
1187 * In the event of LDT overlapping something, don't install it just assume it's being updated.
1188 */
1189 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
1190 {
1191 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1192 AssertRC(rc);
1193 }
1194#ifdef DEBUG
1195 if (pDesc->Gen.u1Present)
1196 Log(("LDT selector marked not present!!\n"));
1197#endif
1198 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */,
1199 0, selmR3GuestLDTWriteHandler, "selmRCGuestLDTWriteHandler", 0, "Guest LDT write access handler");
1200 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
1201 {
1202 /** @todo investigate the various cases where conflicts happen and try avoid them by enh. the instruction emulation. */
1203 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
1204 Log(("WARNING: Guest LDT (%RGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=%016RX64:%04x)\n",
1205 GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1206 }
1207 else if (RT_SUCCESS(rc))
1208 pVM->selm.s.GCPtrGuestLdt = GCPtrLdt;
1209 else
1210 {
1211 CPUMSetHyperLDTR(pVCpu, 0);
1212 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1213 return rc;
1214 }
1215
1216 pVM->selm.s.cbLdtLimit = cbLdt;
1217 }
1218 }
1219#else
1220 pVM->selm.s.cbLdtLimit = cbLdt;
1221#endif
1222
1223 /*
1224 * Calc Shadow LDT base.
1225 */
1226 unsigned off;
1227 pVM->selm.s.offLdtHyper = off = (GCPtrLdt & PAGE_OFFSET_MASK);
1228 RTGCPTR GCPtrShadowLDT = (RTGCPTR)((RTGCUINTPTR)pVM->selm.s.pvLdtRC + off);
1229 PX86DESC pShadowLDT = (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off);
1230
1231 /*
1232 * Enable the LDT selector in the shadow GDT.
1233 */
1234 pDesc->Gen.u1Present = 1;
1235 pDesc->Gen.u16BaseLow = RT_LOWORD(GCPtrShadowLDT);
1236 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(GCPtrShadowLDT);
1237 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(GCPtrShadowLDT);
1238 pDesc->Gen.u1Available = 0;
1239 pDesc->Gen.u1Long = 0;
1240 if (cbLdt > 0xffff)
1241 {
1242 cbLdt = 0xffff;
1243 pDesc->Gen.u4LimitHigh = 0;
1244 pDesc->Gen.u16LimitLow = pDesc->Gen.u1Granularity ? 0xf : 0xffff;
1245 }
1246
1247 /*
1248 * Set Hyper LDTR and notify TRPM.
1249 */
1250 CPUMSetHyperLDTR(pVCpu, SelLdt);
1251
1252 /*
1253 * Loop synchronising the LDT page by page.
1254 */
1255 /** @todo investigate how intel handle various operations on half present cross page entries. */
1256 off = GCPtrLdt & (sizeof(X86DESC) - 1);
1257 AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt));
1258
1259 /* Note: Do not skip the first selector; unlike the GDT, a zero LDT selector is perfectly valid. */
1260 unsigned cbLeft = cbLdt + 1;
1261 PX86DESC pLDTE = pShadowLDT;
1262 while (cbLeft)
1263 {
1264 /*
1265 * Read a chunk.
1266 */
1267 unsigned cbChunk = PAGE_SIZE - ((RTGCUINTPTR)GCPtrLdt & PAGE_OFFSET_MASK);
1268 if (cbChunk > cbLeft)
1269 cbChunk = cbLeft;
1270 rc = PGMPhysSimpleReadGCPtr(pVCpu, pShadowLDT, GCPtrLdt, cbChunk);
1271 if (RT_SUCCESS(rc))
1272 {
1273 /*
1274 * Mark page
1275 */
1276 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D);
1277 AssertRC(rc);
1278
1279 /*
1280 * Loop thru the available LDT entries.
1281 * Figure out where to start and end and the potential cross pageness of
1282 * things adds a little complexity. pLDTE is updated there and not in the
1283 * 'next' part of the loop. The pLDTEEnd is inclusive.
1284 */
1285 PX86DESC pLDTEEnd = (PX86DESC)((uintptr_t)pShadowLDT + cbChunk) - 1;
1286 if (pLDTE + 1 < pShadowLDT)
1287 pLDTE = (PX86DESC)((uintptr_t)pShadowLDT + off);
1288 while (pLDTE <= pLDTEEnd)
1289 {
1290 if (pLDTE->Gen.u1Present)
1291 {
1292 /*
1293 * Code and data selectors are generally 1:1, with the
1294 * 'little' adjustment we do for DPL 0 selectors.
1295 */
1296 if (pLDTE->Gen.u1DescType)
1297 {
1298 /*
1299 * Hack for A-bit against Trap E on read-only GDT.
1300 */
1301 /** @todo Fix this by loading ds and cs before turning off WP. */
1302 if (!(pLDTE->Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1303 pLDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1304
1305 /*
1306 * All DPL 0 code and data segments are squeezed into DPL 1.
1307 *
1308 * We're skipping conforming segments here because those
1309 * cannot give us any trouble.
1310 */
1311 if ( pLDTE->Gen.u2Dpl == 0
1312 && (pLDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1313 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
1314 pLDTE->Gen.u2Dpl = 1;
1315 }
1316 else
1317 {
1318 /*
1319 * System type selectors are marked not present.
1320 * Recompiler or special handling is required for these.
1321 */
1322 /** @todo what about interrupt gates and rawr0? */
1323 pLDTE->Gen.u1Present = 0;
1324 }
1325 }
1326
1327 /* Next LDT entry. */
1328 pLDTE++;
1329 }
1330 }
1331 else
1332 {
1333 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%Rrc\n", rc));
1334 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0);
1335 AssertRC(rc);
1336 }
1337
1338 /*
1339 * Advance to the next page.
1340 */
1341 cbLeft -= cbChunk;
1342 GCPtrShadowLDT += cbChunk;
1343 pShadowLDT = (PX86DESC)((char *)pShadowLDT + cbChunk);
1344 GCPtrLdt += cbChunk;
1345 }
1346 }
1347
1348 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1349 return VINF_SUCCESS;
1350}
1351
1352
1353/**
1354 * \#PF Handler callback for virtual access handler ranges.
1355 *
1356 * Important to realize that a physical page in a range can have aliases, and
1357 * for ALL and WRITE handlers these will also trigger.
1358 *
1359 * @returns VINF_SUCCESS if the handler have carried out the operation.
1360 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1361 * @param pVM VM Handle.
1362 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1363 * @param pvPtr The HC mapping of that address.
1364 * @param pvBuf What the guest is reading/writing.
1365 * @param cbBuf How much it's reading/writing.
1366 * @param enmAccessType The access type.
1367 * @param pvUser User argument.
1368 */
1369static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1370{
1371 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1372 Log(("selmR3GuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf));
1373
1374 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_GDT);
1375 return VINF_PGM_HANDLER_DO_DEFAULT;
1376}
1377
1378
1379/**
1380 * \#PF Handler callback for virtual access handler ranges.
1381 *
1382 * Important to realize that a physical page in a range can have aliases, and
1383 * for ALL and WRITE handlers these will also trigger.
1384 *
1385 * @returns VINF_SUCCESS if the handler have carried out the operation.
1386 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1387 * @param pVM VM Handle.
1388 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1389 * @param pvPtr The HC mapping of that address.
1390 * @param pvBuf What the guest is reading/writing.
1391 * @param cbBuf How much it's reading/writing.
1392 * @param enmAccessType The access type.
1393 * @param pvUser User argument.
1394 */
1395static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1396{
1397 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1398 Log(("selmR3GuestLDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf));
1399 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_LDT);
1400 return VINF_PGM_HANDLER_DO_DEFAULT;
1401}
1402
1403
1404/**
1405 * \#PF Handler callback for virtual access handler ranges.
1406 *
1407 * Important to realize that a physical page in a range can have aliases, and
1408 * for ALL and WRITE handlers these will also trigger.
1409 *
1410 * @returns VINF_SUCCESS if the handler have carried out the operation.
1411 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1412 * @param pVM VM Handle.
1413 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1414 * @param pvPtr The HC mapping of that address.
1415 * @param pvBuf What the guest is reading/writing.
1416 * @param cbBuf How much it's reading/writing.
1417 * @param enmAccessType The access type.
1418 * @param pvUser User argument.
1419 */
1420static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1421{
1422 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1423 Log(("selmR3GuestTSSWriteHandler: write %.*Rhxs to %RGv size %d\n", RT_MIN(8, cbBuf), pvBuf, GCPtr, cbBuf));
1424
1425 /** @todo This can be optimized by checking for the ESP0 offset and tracking TR
1426 * reloads in REM (setting VM_FF_SELM_SYNC_TSS if TR is reloaded). We
1427 * should probably also deregister the virtual handler if TR.base/size
1428 * changes while we're in REM. */
1429
1430 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_TSS);
1431
1432 return VINF_PGM_HANDLER_DO_DEFAULT;
1433}
1434
1435
1436/**
1437 * Synchronize the shadowed fields in the TSS.
1438 *
1439 * At present we're shadowing the ring-0 stack selector & pointer, and the
1440 * interrupt redirection bitmap (if present). We take the lazy approach wrt to
1441 * REM and this function is called both if REM made any changes to the TSS or
1442 * loaded TR.
1443 *
1444 * @returns VBox status code.
1445 * @param pVM The VM to operate on.
1446 * @param pVCpu The VMCPU to operate on.
1447 */
1448VMMR3DECL(int) SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu)
1449{
1450 int rc;
1451
1452 if (pVM->selm.s.fDisableMonitoring)
1453 {
1454 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1455 return VINF_SUCCESS;
1456 }
1457
1458 STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a);
1459 Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS));
1460
1461 /*
1462 * Get TR and extract and store the basic info.
1463 *
1464 * Note! The TSS limit is not checked by the LTR code, so we
1465 * have to be a bit careful with it. We make sure cbTss
1466 * won't be zero if TR is valid and if it's NULL we'll
1467 * make sure cbTss is 0.
1468 */
1469 CPUMSELREGHID trHid;
1470 RTSEL SelTss = CPUMGetGuestTR(pVCpu, &trHid);
1471 RTGCPTR GCPtrTss = trHid.u64Base;
1472 uint32_t cbTss = trHid.u32Limit;
1473 Assert( (SelTss & X86_SEL_MASK)
1474 || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
1475 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */));
1476 if (SelTss & X86_SEL_MASK)
1477 {
1478 Assert(!(SelTss & X86_SEL_LDT));
1479 Assert(trHid.Attr.n.u1DescType == 0);
1480 Assert( trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY
1481 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY);
1482 if (!++cbTss)
1483 cbTss = UINT32_MAX;
1484 }
1485 else
1486 {
1487 Assert( (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
1488 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */));
1489 cbTss = 0; /* the reset case. */
1490 }
1491 pVM->selm.s.cbGuestTss = cbTss;
1492 pVM->selm.s.fGuestTss32Bit = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1493 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1494
1495 /*
1496 * Figure out the size of what need to monitor.
1497 */
1498 /* We're not interested in any 16-bit TSSes. */
1499 uint32_t cbMonitoredTss = cbTss;
1500 if ( trHid.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL
1501 && trHid.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
1502 cbMonitoredTss = 0;
1503
1504 pVM->selm.s.offGuestIoBitmap = 0;
1505 bool fNoRing1Stack = true;
1506 if (cbMonitoredTss)
1507 {
1508 /*
1509 * 32-bit TSS. What we're really keen on is the SS0 and ESP0 fields.
1510 * If VME is enabled we also want to keep an eye on the interrupt
1511 * redirection bitmap.
1512 */
1513 VBOXTSS Tss;
1514 uint32_t cr4 = CPUMGetGuestCR4(pVCpu);
1515 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Tss, GCPtrTss, RT_OFFSETOF(VBOXTSS, IntRedirBitmap));
1516 if ( !(cr4 & X86_CR4_VME)
1517 || ( VBOX_SUCCESS(rc)
1518 && Tss.offIoBitmap < sizeof(VBOXTSS) /* too small */
1519 && Tss.offIoBitmap > cbTss) /* beyond the end */ /** @todo not sure how the partial case is handled; probably not allowed. */
1520 )
1521 /* No interrupt redirection bitmap, just ESP0 and SS0. */
1522 cbMonitoredTss = RT_UOFFSETOF(VBOXTSS, padding_ss0);
1523 else if (RT_SUCCESS(rc))
1524 {
1525 /*
1526 * Everything up to and including the interrupt redirection bitmap. Unfortunately
1527 * this can be quite a large chunk. We use to skip it earlier and just hope it
1528 * was kind of static...
1529 *
1530 * Update the virtual interrupt redirection bitmap while we're here.
1531 * (It is located in the 32 bytes before TR:offIoBitmap.)
1532 */
1533 cbMonitoredTss = Tss.offIoBitmap;
1534 pVM->selm.s.offGuestIoBitmap = Tss.offIoBitmap;
1535
1536 uint32_t offRedirBitmap = Tss.offIoBitmap - sizeof(Tss.IntRedirBitmap);
1537 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pVM->selm.s.Tss.IntRedirBitmap,
1538 GCPtrTss + offRedirBitmap, sizeof(Tss.IntRedirBitmap));
1539 AssertRC(rc);
1540 /** @todo memset the bitmap on failure? */
1541 Log2(("Redirection bitmap:\n"));
1542 Log2(("%.*Rhxd\n", sizeof(Tss.IntRedirBitmap), &pVM->selm.s.Tss.IntRedirBitmap));
1543 }
1544 else
1545 {
1546 cbMonitoredTss = RT_OFFSETOF(VBOXTSS, IntRedirBitmap);
1547 pVM->selm.s.offGuestIoBitmap = 0;
1548 /** @todo memset the bitmap? */
1549 }
1550
1551 /*
1552 * Update the ring 0 stack selector and base address.
1553 */
1554 if (RT_SUCCESS(rc))
1555 {
1556#ifdef LOG_ENABLED
1557 if (LogIsEnabled())
1558 {
1559 uint32_t ssr0, espr0;
1560 SELMGetRing1Stack(pVM, &ssr0, &espr0);
1561 if ((ssr0 & ~1) != Tss.ss0 || espr0 != Tss.esp0)
1562 {
1563 RTGCPHYS GCPhys = NIL_RTGCPHYS;
1564 rc = PGMGstGetPage(pVCpu, GCPtrTss, NULL, &GCPhys); AssertRC(rc);
1565 Log(("SELMR3SyncTSS: Updating TSS ring 0 stack to %04X:%08X from %04X:%08X; TSS Phys=%VGp)\n",
1566 Tss.ss0, Tss.esp0, (ssr0 & ~1), espr0, GCPhys));
1567 AssertMsg(ssr0 != Tss.ss0,
1568 ("ring-1 leak into TSS.SS0! %04X:%08X from %04X:%08X; TSS Phys=%VGp)\n",
1569 Tss.ss0, Tss.esp0, (ssr0 & ~1), espr0, GCPhys));
1570 }
1571 Log(("offIoBitmap=%#x\n", Tss.offIoBitmap));
1572 }
1573#endif /* LOG_ENABLED */
1574 AssertMsg(!(Tss.ss0 & 3), ("ring-1 leak into TSS.SS0? %04X:%08X\n", Tss.ss0, Tss.esp0));
1575
1576 /* Update our TSS structure for the guest's ring 1 stack */
1577 selmSetRing1Stack(pVM, Tss.ss0 | 1, Tss.esp0);
1578 pVM->selm.s.fSyncTSSRing0Stack = fNoRing1Stack = false;
1579 }
1580 }
1581
1582 /*
1583 * Flush the ring-1 stack and the direct syscall dispatching if we
1584 * cannot obtain SS0:ESP0.
1585 */
1586 if (fNoRing1Stack)
1587 {
1588 selmSetRing1Stack(pVM, 0 /* invalid SS */, 0);
1589 pVM->selm.s.fSyncTSSRing0Stack = cbMonitoredTss != 0;
1590
1591 /** @todo handle these dependencies better! */
1592 TRPMR3SetGuestTrapHandler(pVM, 0x2E, TRPM_INVALID_HANDLER);
1593 TRPMR3SetGuestTrapHandler(pVM, 0x80, TRPM_INVALID_HANDLER);
1594 }
1595
1596 /*
1597 * Check for monitor changes and apply them.
1598 */
1599 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss
1600 || cbMonitoredTss != pVM->selm.s.cbMonitoredGuestTss)
1601 {
1602 Log(("SELMR3SyncTSS: Guest's TSS is changed to pTss=%RGv cbMonitoredTss=%08X cbGuestTss=%#08x\n",
1603 GCPtrTss, cbMonitoredTss, pVM->selm.s.cbGuestTss));
1604
1605 /* Release the old range first. */
1606 if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX)
1607 {
1608 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
1609 AssertRC(rc);
1610 }
1611
1612 /* Register the write handler if TS != 0. */
1613 if (cbMonitoredTss != 0)
1614 {
1615 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbMonitoredTss - 1,
1616 0, selmR3GuestTSSWriteHandler,
1617 "selmRCGuestTSSWriteHandler", 0, "Guest TSS write access handler");
1618 if (RT_FAILURE(rc))
1619 {
1620 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1621 return rc;
1622 }
1623
1624 /* Update saved Guest TSS info. */
1625 pVM->selm.s.GCPtrGuestTss = GCPtrTss;
1626 pVM->selm.s.cbMonitoredGuestTss = cbMonitoredTss;
1627 pVM->selm.s.GCSelTss = SelTss;
1628 }
1629 else
1630 {
1631 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
1632 pVM->selm.s.cbMonitoredGuestTss = 0;
1633 pVM->selm.s.GCSelTss = 0;
1634 }
1635 }
1636
1637 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1638
1639 STAM_PROFILE_STOP(&pVM->selm.s.StatTSSSync, a);
1640 return VINF_SUCCESS;
1641}
1642
1643
1644/**
1645 * Compares the Guest GDT and LDT with the shadow tables.
1646 * This is a VBOX_STRICT only function.
1647 *
1648 * @returns VBox status code.
1649 * @param pVM The VM Handle.
1650 */
1651VMMR3DECL(int) SELMR3DebugCheck(PVM pVM)
1652{
1653#ifdef VBOX_STRICT
1654 PVMCPU pVCpu = VMMGetCpu(pVM);
1655
1656 /*
1657 * Get GDTR and check for conflict.
1658 */
1659 VBOXGDTR GDTR;
1660 CPUMGetGuestGDTR(pVCpu, &GDTR);
1661 if (GDTR.cbGdt == 0)
1662 return VINF_SUCCESS;
1663
1664 if (GDTR.cbGdt >= (unsigned)(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> X86_SEL_SHIFT))
1665 Log(("SELMR3DebugCheck: guest GDT size forced us to look for unused selectors.\n"));
1666
1667 if (GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
1668 Log(("SELMR3DebugCheck: limits have changed! new=%d old=%d\n", GDTR.cbGdt, pVM->selm.s.GuestGdtr.cbGdt));
1669
1670 /*
1671 * Loop thru the GDT checking each entry.
1672 */
1673 RTGCPTR GCPtrGDTEGuest = GDTR.pGdt;
1674 PX86DESC pGDTE = pVM->selm.s.paGdtR3;
1675 PX86DESC pGDTEEnd = (PX86DESC)((uintptr_t)pGDTE + GDTR.cbGdt);
1676 while (pGDTE < pGDTEEnd)
1677 {
1678 X86DESC GDTEGuest;
1679 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &GDTEGuest, GCPtrGDTEGuest, sizeof(GDTEGuest));
1680 if (RT_SUCCESS(rc))
1681 {
1682 if (pGDTE->Gen.u1DescType || pGDTE->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1683 {
1684 if ( pGDTE->Gen.u16LimitLow != GDTEGuest.Gen.u16LimitLow
1685 || pGDTE->Gen.u4LimitHigh != GDTEGuest.Gen.u4LimitHigh
1686 || pGDTE->Gen.u16BaseLow != GDTEGuest.Gen.u16BaseLow
1687 || pGDTE->Gen.u8BaseHigh1 != GDTEGuest.Gen.u8BaseHigh1
1688 || pGDTE->Gen.u8BaseHigh2 != GDTEGuest.Gen.u8BaseHigh2
1689 || pGDTE->Gen.u1DefBig != GDTEGuest.Gen.u1DefBig
1690 || pGDTE->Gen.u1DescType != GDTEGuest.Gen.u1DescType)
1691 {
1692 unsigned iGDT = pGDTE - pVM->selm.s.paGdtR3;
1693 SELMR3DumpDescriptor(*pGDTE, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, shadow");
1694 SELMR3DumpDescriptor(GDTEGuest, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, guest");
1695 }
1696 }
1697 }
1698
1699 /* Advance to the next descriptor. */
1700 GCPtrGDTEGuest += sizeof(X86DESC);
1701 pGDTE++;
1702 }
1703
1704
1705 /*
1706 * LDT?
1707 */
1708 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
1709 if ((SelLdt & X86_SEL_MASK) == 0)
1710 return VINF_SUCCESS;
1711 if (SelLdt > GDTR.cbGdt)
1712 {
1713 Log(("SELMR3DebugCheck: ldt is out of bound SelLdt=%#x\n", SelLdt));
1714 return VERR_INTERNAL_ERROR;
1715 }
1716 X86DESC LDTDesc;
1717 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &LDTDesc, GDTR.pGdt + (SelLdt & X86_SEL_MASK), sizeof(LDTDesc));
1718 if (RT_FAILURE(rc))
1719 {
1720 Log(("SELMR3DebugCheck: Failed to read LDT descriptor. rc=%d\n", rc));
1721 return rc;
1722 }
1723 RTGCPTR GCPtrLDTEGuest = X86DESC_BASE(LDTDesc);
1724 unsigned cbLdt = X86DESC_LIMIT(LDTDesc);
1725 if (LDTDesc.Gen.u1Granularity)
1726 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1727
1728 /*
1729 * Validate it.
1730 */
1731 if (!cbLdt)
1732 return VINF_SUCCESS;
1733 /** @todo check what intel does about odd limits. */
1734 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(X86DESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1735 if ( LDTDesc.Gen.u1DescType
1736 || LDTDesc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT
1737 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt)
1738 {
1739 Log(("SELmR3DebugCheck: Invalid LDT %04x!\n", SelLdt));
1740 return VERR_INTERNAL_ERROR;
1741 }
1742
1743 /*
1744 * Loop thru the LDT checking each entry.
1745 */
1746 unsigned off = (GCPtrLDTEGuest & PAGE_OFFSET_MASK);
1747 PX86DESC pLDTE = (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off);
1748 PX86DESC pLDTEEnd = (PX86DESC)((uintptr_t)pGDTE + cbLdt);
1749 while (pLDTE < pLDTEEnd)
1750 {
1751 X86DESC LDTEGuest;
1752 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &LDTEGuest, GCPtrLDTEGuest, sizeof(LDTEGuest));
1753 if (RT_SUCCESS(rc))
1754 {
1755 if ( pLDTE->Gen.u16LimitLow != LDTEGuest.Gen.u16LimitLow
1756 || pLDTE->Gen.u4LimitHigh != LDTEGuest.Gen.u4LimitHigh
1757 || pLDTE->Gen.u16BaseLow != LDTEGuest.Gen.u16BaseLow
1758 || pLDTE->Gen.u8BaseHigh1 != LDTEGuest.Gen.u8BaseHigh1
1759 || pLDTE->Gen.u8BaseHigh2 != LDTEGuest.Gen.u8BaseHigh2
1760 || pLDTE->Gen.u1DefBig != LDTEGuest.Gen.u1DefBig
1761 || pLDTE->Gen.u1DescType != LDTEGuest.Gen.u1DescType)
1762 {
1763 unsigned iLDT = pLDTE - (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off);
1764 SELMR3DumpDescriptor(*pLDTE, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, shadow");
1765 SELMR3DumpDescriptor(LDTEGuest, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, guest");
1766 }
1767 }
1768
1769 /* Advance to the next descriptor. */
1770 GCPtrLDTEGuest += sizeof(X86DESC);
1771 pLDTE++;
1772 }
1773
1774#else /* !VBOX_STRICT */
1775 NOREF(pVM);
1776#endif /* !VBOX_STRICT */
1777
1778 return VINF_SUCCESS;
1779}
1780
1781
1782/**
1783 * Validates the RawR0 TSS values against the one in the Guest TSS.
1784 *
1785 * @returns true if it matches.
1786 * @returns false and assertions on mismatch..
1787 * @param pVM VM Handle.
1788 */
1789VMMR3DECL(bool) SELMR3CheckTSS(PVM pVM)
1790{
1791#ifdef VBOX_STRICT
1792 PVMCPU pVCpu = VMMGetCpu(pVM);
1793
1794 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
1795 return true;
1796
1797 /*
1798 * Get TR and extract the basic info.
1799 */
1800 CPUMSELREGHID trHid;
1801 RTSEL SelTss = CPUMGetGuestTR(pVCpu, &trHid);
1802 RTGCPTR GCPtrTss = trHid.u64Base;
1803 uint32_t cbTss = trHid.u32Limit;
1804 Assert( (SelTss & X86_SEL_MASK)
1805 || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
1806 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */));
1807 if (SelTss & X86_SEL_MASK)
1808 {
1809 AssertReturn(!(SelTss & X86_SEL_LDT), false);
1810 AssertReturn(trHid.Attr.n.u1DescType == 0, false);
1811 AssertReturn( trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY
1812 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY,
1813 false);
1814 if (!++cbTss)
1815 cbTss = UINT32_MAX;
1816 }
1817 else
1818 {
1819 AssertReturn( (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
1820 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */),
1821 false);
1822 cbTss = 0; /* the reset case. */
1823 }
1824 AssertMsgReturn(pVM->selm.s.cbGuestTss == cbTss, ("%#x %#x\n", pVM->selm.s.cbGuestTss, cbTss), false);
1825 AssertMsgReturn(pVM->selm.s.fGuestTss32Bit == ( trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1826 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY),
1827 ("%RTbool u4Type=%d\n", pVM->selm.s.fGuestTss32Bit, trHid.Attr.n.u4Type),
1828 false);
1829 AssertMsgReturn( pVM->selm.s.GCSelTss == SelTss
1830 || (!pVM->selm.s.GCSelTss && !(SelTss & X86_SEL_LDT)),
1831 ("%#x %#x\n", pVM->selm.s.GCSelTss, SelTss),
1832 false);
1833 AssertMsgReturn( pVM->selm.s.GCPtrGuestTss == GCPtrTss
1834 || (pVM->selm.s.GCPtrGuestTss == RTRCPTR_MAX && !GCPtrTss),
1835 ("%#RGv %#RGv\n", pVM->selm.s.GCPtrGuestTss, GCPtrTss),
1836 false);
1837
1838
1839 /*
1840 * Figure out the size of what need to monitor.
1841 */
1842 bool fNoRing1Stack = true;
1843 /* We're not interested in any 16-bit TSSes. */
1844 uint32_t cbMonitoredTss = cbTss;
1845 if ( trHid.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL
1846 && trHid.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
1847 cbMonitoredTss = 0;
1848 if (cbMonitoredTss)
1849 {
1850 VBOXTSS Tss;
1851 uint32_t cr4 = CPUMGetGuestCR4(pVCpu);
1852 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Tss, GCPtrTss, RT_OFFSETOF(VBOXTSS, IntRedirBitmap));
1853 AssertReturn( rc == VINF_SUCCESS
1854 /* Happends early in XP boot during page table switching. */
1855 || ( (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
1856 && !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF)),
1857 false);
1858 if ( !(cr4 & X86_CR4_VME)
1859 || ( VBOX_SUCCESS(rc)
1860 && Tss.offIoBitmap < sizeof(VBOXTSS) /* too small */
1861 && Tss.offIoBitmap > cbTss)
1862 )
1863 cbMonitoredTss = RT_UOFFSETOF(VBOXTSS, padding_ss0);
1864 else if (RT_SUCCESS(rc))
1865 {
1866 cbMonitoredTss = Tss.offIoBitmap;
1867 AssertMsgReturn(pVM->selm.s.offGuestIoBitmap == Tss.offIoBitmap,
1868 ("#x %#x\n", pVM->selm.s.offGuestIoBitmap, Tss.offIoBitmap),
1869 false);
1870
1871 /* check the bitmap */
1872 uint32_t offRedirBitmap = Tss.offIoBitmap - sizeof(Tss.IntRedirBitmap);
1873 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Tss.IntRedirBitmap,
1874 GCPtrTss + offRedirBitmap, sizeof(Tss.IntRedirBitmap));
1875 AssertRCReturn(rc, false);
1876 AssertMsgReturn(!memcmp(&Tss.IntRedirBitmap[0], &pVM->selm.s.Tss.IntRedirBitmap[0], sizeof(Tss.IntRedirBitmap)),
1877 ("offIoBitmap=%#x cbTss=%#x\n"
1878 " Guest: %.32Rhxs\n"
1879 "Shadow: %.32Rhxs\n",
1880 Tss.offIoBitmap, cbTss,
1881 &Tss.IntRedirBitmap[0],
1882 &pVM->selm.s.Tss.IntRedirBitmap[0]),
1883 false);
1884 }
1885 else
1886 cbMonitoredTss = RT_OFFSETOF(VBOXTSS, IntRedirBitmap);
1887
1888 /*
1889 * Check SS0 and ESP0.
1890 */
1891 if ( !pVM->selm.s.fSyncTSSRing0Stack
1892 && RT_SUCCESS(rc))
1893 {
1894 if ( Tss.esp0 != pVM->selm.s.Tss.esp1
1895 || Tss.ss0 != (pVM->selm.s.Tss.ss1 & ~1))
1896 {
1897 RTGCPHYS GCPhys;
1898 rc = PGMGstGetPage(pVCpu, GCPtrTss, NULL, &GCPhys); AssertRC(rc);
1899 AssertMsgFailed(("TSS out of sync!! (%04X:%08X vs %04X:%08X (guest)) Tss=%RGv Phys=%RGp\n",
1900 (pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1,
1901 Tss.ss1, Tss.esp1, GCPtrTss, GCPhys));
1902 return false;
1903 }
1904 }
1905 AssertMsgReturn(pVM->selm.s.cbMonitoredGuestTss == cbMonitoredTss, ("%#x %#x\n", pVM->selm.s.cbMonitoredGuestTss, cbMonitoredTss), false);
1906 }
1907 else
1908 {
1909 AssertMsgReturn(pVM->selm.s.Tss.ss1 == 0 && pVM->selm.s.Tss.esp1 == 0, ("%04x:%08x\n", pVM->selm.s.Tss.ss1, pVM->selm.s.Tss.esp1), false);
1910 AssertReturn(!pVM->selm.s.fSyncTSSRing0Stack, false);
1911 AssertMsgReturn(pVM->selm.s.cbMonitoredGuestTss == cbMonitoredTss, ("%#x %#x\n", pVM->selm.s.cbMonitoredGuestTss, cbMonitoredTss), false);
1912 }
1913
1914
1915
1916 return true;
1917
1918#else /* !VBOX_STRICT */
1919 NOREF(pVM);
1920 return true;
1921#endif /* !VBOX_STRICT */
1922}
1923
1924
1925/**
1926 * Returns flat address and limit of LDT by LDT selector from guest GDTR.
1927 *
1928 * Fully validate selector.
1929 *
1930 * @returns VBox status.
1931 * @param pVM VM Handle.
1932 * @param SelLdt LDT selector.
1933 * @param ppvLdt Where to store the flat address of LDT.
1934 * @param pcbLimit Where to store LDT limit.
1935 */
1936VMMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit)
1937{
1938 PVMCPU pVCpu = VMMGetCpu(pVM);
1939
1940 /* Get guest GDTR. */
1941 VBOXGDTR GDTR;
1942 CPUMGetGuestGDTR(pVCpu, &GDTR);
1943
1944 /* Check selector TI and GDT limit. */
1945 if ( SelLdt & X86_SEL_LDT
1946 || (SelLdt > GDTR.cbGdt))
1947 return VERR_INVALID_SELECTOR;
1948
1949 /* Read descriptor from GC. */
1950 X86DESC Desc;
1951 int rc = PGMPhysSimpleReadGCPtr(pVCpu, (void *)&Desc, (RTGCPTR)(GDTR.pGdt + (SelLdt & X86_SEL_MASK)), sizeof(Desc));
1952 if (RT_FAILURE(rc))
1953 {
1954 /* fatal */
1955 AssertMsgFailed(("Can't read LDT descriptor for selector=%04X\n", SelLdt));
1956 return VERR_SELECTOR_NOT_PRESENT;
1957 }
1958
1959 /* Check if LDT descriptor is not present. */
1960 if (Desc.Gen.u1Present == 0)
1961 return VERR_SELECTOR_NOT_PRESENT;
1962
1963 /* Check LDT descriptor type. */
1964 if ( Desc.Gen.u1DescType == 1
1965 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1966 return VERR_INVALID_SELECTOR;
1967
1968 /* LDT descriptor is ok. */
1969 if (ppvLdt)
1970 {
1971 *ppvLdt = (RTGCPTR)X86DESC_BASE(Desc);
1972 *pcbLimit = X86DESC_LIMIT(Desc);
1973 }
1974 return VINF_SUCCESS;
1975}
1976
1977
1978/**
1979 * Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper.
1980 *
1981 * See SELMR3GetSelectorInfo for details.
1982 *
1983 * @returns VBox status code, see SELMR3GetSelectorInfo for details.
1984 *
1985 * @param pVM VM handle.
1986 * @param pVCpu VMCPU handle.
1987 * @param Sel The selector to get info about.
1988 * @param pSelInfo Where to store the information.
1989 */
1990static int selmR3GetSelectorInfo64(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo)
1991{
1992 /*
1993 * Read it from the guest descriptor table.
1994 */
1995 X86DESC64 Desc;
1996 VBOXGDTR Gdtr;
1997 RTGCPTR GCPtrDesc;
1998 CPUMGetGuestGDTR(pVCpu, &Gdtr);
1999 if (!(Sel & X86_SEL_LDT))
2000 {
2001 /* GDT */
2002 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
2003 return VERR_INVALID_SELECTOR;
2004 GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
2005 }
2006 else
2007 {
2008 /*
2009 * LDT - must locate the LDT first.
2010 */
2011 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
2012 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */
2013 || (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
2014 return VERR_INVALID_SELECTOR;
2015 GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);
2016 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
2017 if (RT_FAILURE(rc))
2018 return rc;
2019
2020 /* validate the LDT descriptor. */
2021 if (Desc.Gen.u1Present == 0)
2022 return VERR_SELECTOR_NOT_PRESENT;
2023 if ( Desc.Gen.u1DescType == 1
2024 || Desc.Gen.u4Type != AMD64_SEL_TYPE_SYS_LDT)
2025 return VERR_INVALID_SELECTOR;
2026
2027 uint32_t cbLimit = X86DESC_LIMIT(Desc);
2028 if (Desc.Gen.u1Granularity)
2029 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2030 if ((uint32_t)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)
2031 return VERR_INVALID_SELECTOR;
2032
2033 /* calc the descriptor location. */
2034 GCPtrDesc = X86DESC64_BASE(Desc);
2035 GCPtrDesc += (Sel & X86_SEL_MASK);
2036 }
2037
2038 /* read the descriptor. */
2039 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
2040 if (RT_FAILURE(rc))
2041 {
2042 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(X86DESC));
2043 if (RT_FAILURE(rc))
2044 return rc;
2045 Desc.au64[1] = 0;
2046 }
2047
2048 /*
2049 * Extract the base and limit
2050 * (We ignore the present bit here, which is probably a bit silly...)
2051 */
2052 pSelInfo->Sel = Sel;
2053 pSelInfo->fFlags = DBGFSELINFO_FLAGS_LONG_MODE;
2054 pSelInfo->u.Raw64 = Desc;
2055 if (Desc.Gen.u1DescType)
2056 {
2057 if ( Desc.Gen.u1Long
2058 && Desc.Gen.u1DefBig
2059 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
2060 {
2061 /* 64-bit code selectors are wide open. It's not possible to
2062 detect 64-bit data or stack selectors without also dragging
2063 in assumptions about current CS. So, the selinfo user needs
2064 to deal with this in the context the info is used unfortunately.
2065 Note. We ignore the segment limit hacks that was added by AMD. */
2066 pSelInfo->GCPtrBase = 0;
2067 pSelInfo->cbLimit = ~(RTGCUINTPTR)0;
2068 }
2069 else
2070 {
2071 pSelInfo->cbLimit = X86DESC_LIMIT(Desc);
2072 if (Desc.Gen.u1Granularity)
2073 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2074 pSelInfo->GCPtrBase = X86DESC_BASE(Desc);
2075 }
2076 pSelInfo->SelGate = 0;
2077 }
2078 else if ( Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_LDT
2079 || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_TSS_AVAIL
2080 || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY)
2081 {
2082 /* Note. LDT descriptors are weird in long mode, we ignore the footnote
2083 in the AMD manual here as a simplification. */
2084 pSelInfo->GCPtrBase = X86DESC64_BASE(Desc);
2085 pSelInfo->cbLimit = X86DESC_LIMIT(Desc);
2086 if (Desc.Gen.u1Granularity)
2087 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2088 pSelInfo->SelGate = 0;
2089 }
2090 else if ( Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE
2091 || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_TRAP_GATE
2092 || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_INT_GATE)
2093 {
2094 pSelInfo->cbLimit = X86DESC64_BASE(Desc);
2095 pSelInfo->GCPtrBase = Desc.Gate.u16OffsetLow
2096 | ((uint32_t)Desc.Gate.u16OffsetHigh << 16)
2097 | ((uint64_t)Desc.Gate.u32OffsetTop << 32);
2098 pSelInfo->SelGate = Desc.Gate.u16Sel;
2099 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_GATE;
2100 }
2101 else
2102 {
2103 pSelInfo->cbLimit = 0;
2104 pSelInfo->GCPtrBase = 0;
2105 pSelInfo->SelGate = 0;
2106 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_INVALID;
2107 }
2108 if (!Desc.Gen.u1Present)
2109 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_NOT_PRESENT;
2110
2111 return VINF_SUCCESS;
2112}
2113
2114
2115/**
2116 * Worker for selmR3GetSelectorInfo32 and SELMR3GetShadowSelectorInfo that
2117 * interprets a legacy descriptor table entry and fills in the selector info
2118 * structure from it.
2119 *
2120 * @param pSelInfo Where to store the selector info. Only the fFlags and
2121 * Sel members have been initialized.
2122 * @param pDesc The legacy descriptor to parse.
2123 */
2124DECLINLINE(void) selmR3SelInfoFromDesc32(PDBGFSELINFO pSelInfo, PCX86DESC pDesc)
2125{
2126 pSelInfo->u.Raw64.au64[1] = 0;
2127 pSelInfo->u.Raw = *pDesc;
2128 if ( pDesc->Gen.u1DescType
2129 || !(pDesc->Gen.u4Type & 4))
2130 {
2131 pSelInfo->cbLimit = X86DESC_LIMIT(*pDesc);
2132 if (pDesc->Gen.u1Granularity)
2133 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2134 pSelInfo->GCPtrBase = X86DESC_BASE(*pDesc);
2135 pSelInfo->SelGate = 0;
2136 }
2137 else if (pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_UNDEFINED4)
2138 {
2139 pSelInfo->cbLimit = 0;
2140 if (pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_TASK_GATE)
2141 pSelInfo->GCPtrBase = 0;
2142 else
2143 pSelInfo->GCPtrBase = pDesc->Gate.u16OffsetLow
2144 | (uint32_t)pDesc->Gate.u16OffsetHigh << 16;
2145 pSelInfo->SelGate = pDesc->Gate.u16Sel;
2146 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_GATE;
2147 }
2148 else
2149 {
2150 pSelInfo->cbLimit = 0;
2151 pSelInfo->GCPtrBase = 0;
2152 pSelInfo->SelGate = 0;
2153 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_INVALID;
2154 }
2155 if (!pDesc->Gen.u1Present)
2156 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_NOT_PRESENT;
2157}
2158
2159
2160/**
2161 * Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper.
2162 *
2163 * See SELMR3GetSelectorInfo for details.
2164 *
2165 * @returns VBox status code, see SELMR3GetSelectorInfo for details.
2166 *
2167 * @param pVM VM handle.
2168 * @param pVCpu VMCPU handle.
2169 * @param Sel The selector to get info about.
2170 * @param pSelInfo Where to store the information.
2171 */
2172static int selmR3GetSelectorInfo32(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo)
2173{
2174 /*
2175 * Read the descriptor entry
2176 */
2177 pSelInfo->fFlags = 0;
2178 X86DESC Desc;
2179 if ( !(Sel & X86_SEL_LDT)
2180 && ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)
2181 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)
2182 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)
2183 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)
2184 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK))
2185 )
2186 {
2187 /*
2188 * Hypervisor descriptor.
2189 */
2190 pSelInfo->fFlags = DBGFSELINFO_FLAGS_HYPER;
2191 if (CPUMIsGuestInProtectedMode(pVCpu))
2192 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_PROT_MODE;
2193 else
2194 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_REAL_MODE;
2195
2196 Desc = pVM->selm.s.paGdtR3[Sel >> X86_SEL_SHIFT];
2197 }
2198 else if (CPUMIsGuestInProtectedMode(pVCpu))
2199 {
2200 /*
2201 * Read it from the guest descriptor table.
2202 */
2203 pSelInfo->fFlags = DBGFSELINFO_FLAGS_PROT_MODE;
2204
2205 VBOXGDTR Gdtr;
2206 RTGCPTR GCPtrDesc;
2207 CPUMGetGuestGDTR(pVCpu, &Gdtr);
2208 if (!(Sel & X86_SEL_LDT))
2209 {
2210 /* GDT */
2211 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
2212 return VERR_INVALID_SELECTOR;
2213 GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
2214 }
2215 else
2216 {
2217 /*
2218 * LDT - must locate the LDT first...
2219 */
2220 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
2221 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */
2222 || (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
2223 return VERR_INVALID_SELECTOR;
2224 GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);
2225 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
2226 if (RT_FAILURE(rc))
2227 return rc;
2228
2229 /* validate the LDT descriptor. */
2230 if (Desc.Gen.u1Present == 0)
2231 return VERR_SELECTOR_NOT_PRESENT;
2232 if ( Desc.Gen.u1DescType == 1
2233 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2234 return VERR_INVALID_SELECTOR;
2235
2236 unsigned cbLimit = X86DESC_LIMIT(Desc);
2237 if (Desc.Gen.u1Granularity)
2238 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2239 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)
2240 return VERR_INVALID_SELECTOR;
2241
2242 /* calc the descriptor location. */
2243 GCPtrDesc = X86DESC_BASE(Desc);
2244 GCPtrDesc += (Sel & X86_SEL_MASK);
2245 }
2246
2247 /* read the descriptor. */
2248 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
2249 if (RT_FAILURE(rc))
2250 return rc;
2251 }
2252 else
2253 {
2254 /*
2255 * We're in real mode.
2256 */
2257 pSelInfo->Sel = Sel;
2258 pSelInfo->GCPtrBase = Sel << 4;
2259 pSelInfo->cbLimit = 0xffff;
2260 pSelInfo->fFlags = DBGFSELINFO_FLAGS_REAL_MODE;
2261 pSelInfo->u.Raw64.au64[0] = 0;
2262 pSelInfo->u.Raw64.au64[1] = 0;
2263 pSelInfo->SelGate = 0;
2264 return VINF_SUCCESS;
2265 }
2266
2267 /*
2268 * Extract the base and limit or sel:offset for gates.
2269 */
2270 pSelInfo->Sel = Sel;
2271 selmR3SelInfoFromDesc32(pSelInfo, &Desc);
2272
2273 return VINF_SUCCESS;
2274}
2275
2276
2277/**
2278 * Gets information about a selector.
2279 *
2280 * Intended for the debugger mostly and will prefer the guest descriptor tables
2281 * over the shadow ones.
2282 *
2283 * @retval VINF_SUCCESS on success.
2284 * @retval VERR_INVALID_SELECTOR if the selector isn't fully inside the
2285 * descriptor table.
2286 * @retval VERR_SELECTOR_NOT_PRESENT if the LDT is invalid or not present. This
2287 * is not returned if the selector itself isn't present, you have to
2288 * check that for yourself (see DBGFSELINFO::fFlags).
2289 * @retval VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the
2290 * pagetable or page backing the selector table wasn't present.
2291 * @returns Other VBox status code on other errors.
2292 *
2293 * @param pVM VM handle.
2294 * @param pVCpu The virtual CPU handle.
2295 * @param Sel The selector to get info about.
2296 * @param pSelInfo Where to store the information.
2297 */
2298VMMR3DECL(int) SELMR3GetSelectorInfo(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo)
2299{
2300 AssertPtr(pSelInfo);
2301 if (CPUMIsGuestInLongMode(pVCpu))
2302 return selmR3GetSelectorInfo64(pVM, pVCpu, Sel, pSelInfo);
2303 return selmR3GetSelectorInfo32(pVM, pVCpu, Sel, pSelInfo);
2304}
2305
2306
2307/**
2308 * Gets information about a selector from the shadow tables.
2309 *
2310 * This is intended to be faster than the SELMR3GetSelectorInfo() method, but
2311 * requires that the caller ensures that the shadow tables are up to date.
2312 *
2313 * @retval VINF_SUCCESS on success.
2314 * @retval VERR_INVALID_SELECTOR if the selector isn't fully inside the
2315 * descriptor table.
2316 * @retval VERR_SELECTOR_NOT_PRESENT if the LDT is invalid or not present. This
2317 * is not returned if the selector itself isn't present, you have to
2318 * check that for yourself (see DBGFSELINFO::fFlags).
2319 * @retval VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the
2320 * pagetable or page backing the selector table wasn't present.
2321 * @returns Other VBox status code on other errors.
2322 *
2323 * @param pVM VM handle.
2324 * @param Sel The selector to get info about.
2325 * @param pSelInfo Where to store the information.
2326 *
2327 * @remarks Don't use this when in hardware assisted virtualization mode.
2328 */
2329VMMR3DECL(int) SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PDBGFSELINFO pSelInfo)
2330{
2331 Assert(pSelInfo);
2332
2333 /*
2334 * Read the descriptor entry
2335 */
2336 X86DESC Desc;
2337 if (!(Sel & X86_SEL_LDT))
2338 {
2339 /*
2340 * Global descriptor.
2341 */
2342 Desc = pVM->selm.s.paGdtR3[Sel >> X86_SEL_SHIFT];
2343 pSelInfo->fFlags = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)
2344 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)
2345 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)
2346 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)
2347 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK)
2348 ? DBGFSELINFO_FLAGS_HYPER
2349 : 0;
2350 /** @todo check that the GDT offset is valid. */
2351 }
2352 else
2353 {
2354 /*
2355 * Local Descriptor.
2356 */
2357 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.pvLdtR3 + pVM->selm.s.offLdtHyper);
2358 Desc = paLDT[Sel >> X86_SEL_SHIFT];
2359 /** @todo check if the LDT page is actually available. */
2360 /** @todo check that the LDT offset is valid. */
2361 pSelInfo->fFlags = 0;
2362 }
2363 if (CPUMIsGuestInProtectedMode(VMMGetCpu0(pVM)))
2364 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_PROT_MODE;
2365 else
2366 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_REAL_MODE;
2367
2368 /*
2369 * Extract the base and limit or sel:offset for gates.
2370 */
2371 pSelInfo->Sel = Sel;
2372 selmR3SelInfoFromDesc32(pSelInfo, &Desc);
2373
2374 return VINF_SUCCESS;
2375}
2376
2377
2378/**
2379 * Formats a descriptor.
2380 *
2381 * @param Desc Descriptor to format.
2382 * @param Sel Selector number.
2383 * @param pszOutput Output buffer.
2384 * @param cchOutput Size of output buffer.
2385 */
2386static void selmR3FormatDescriptor(X86DESC Desc, RTSEL Sel, char *pszOutput, size_t cchOutput)
2387{
2388 /*
2389 * Make variable description string.
2390 */
2391 static struct
2392 {
2393 unsigned cch;
2394 const char *psz;
2395 } const aTypes[32] =
2396 {
2397#define STRENTRY(str) { sizeof(str) - 1, str }
2398 /* system */
2399 STRENTRY("Reserved0 "), /* 0x00 */
2400 STRENTRY("TSS16Avail "), /* 0x01 */
2401 STRENTRY("LDT "), /* 0x02 */
2402 STRENTRY("TSS16Busy "), /* 0x03 */
2403 STRENTRY("Call16 "), /* 0x04 */
2404 STRENTRY("Task "), /* 0x05 */
2405 STRENTRY("Int16 "), /* 0x06 */
2406 STRENTRY("Trap16 "), /* 0x07 */
2407 STRENTRY("Reserved8 "), /* 0x08 */
2408 STRENTRY("TSS32Avail "), /* 0x09 */
2409 STRENTRY("ReservedA "), /* 0x0a */
2410 STRENTRY("TSS32Busy "), /* 0x0b */
2411 STRENTRY("Call32 "), /* 0x0c */
2412 STRENTRY("ReservedD "), /* 0x0d */
2413 STRENTRY("Int32 "), /* 0x0e */
2414 STRENTRY("Trap32 "), /* 0x0f */
2415 /* non system */
2416 STRENTRY("DataRO "), /* 0x10 */
2417 STRENTRY("DataRO Accessed "), /* 0x11 */
2418 STRENTRY("DataRW "), /* 0x12 */
2419 STRENTRY("DataRW Accessed "), /* 0x13 */
2420 STRENTRY("DataDownRO "), /* 0x14 */
2421 STRENTRY("DataDownRO Accessed "), /* 0x15 */
2422 STRENTRY("DataDownRW "), /* 0x16 */
2423 STRENTRY("DataDownRW Accessed "), /* 0x17 */
2424 STRENTRY("CodeEO "), /* 0x18 */
2425 STRENTRY("CodeEO Accessed "), /* 0x19 */
2426 STRENTRY("CodeER "), /* 0x1a */
2427 STRENTRY("CodeER Accessed "), /* 0x1b */
2428 STRENTRY("CodeConfEO "), /* 0x1c */
2429 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
2430 STRENTRY("CodeConfER "), /* 0x1e */
2431 STRENTRY("CodeConfER Accessed ") /* 0x1f */
2432#undef SYSENTRY
2433 };
2434#define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
2435 char szMsg[128];
2436 char *psz = &szMsg[0];
2437 unsigned i = Desc.Gen.u1DescType << 4 | Desc.Gen.u4Type;
2438 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
2439 psz += aTypes[i].cch;
2440
2441 if (Desc.Gen.u1Present)
2442 ADD_STR(psz, "Present ");
2443 else
2444 ADD_STR(psz, "Not-Present ");
2445 if (Desc.Gen.u1Granularity)
2446 ADD_STR(psz, "Page ");
2447 if (Desc.Gen.u1DefBig)
2448 ADD_STR(psz, "32-bit ");
2449 else
2450 ADD_STR(psz, "16-bit ");
2451#undef ADD_STR
2452 *psz = '\0';
2453
2454 /*
2455 * Limit and Base and format the output.
2456 */
2457 uint32_t u32Limit = X86DESC_LIMIT(Desc);
2458 if (Desc.Gen.u1Granularity)
2459 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
2460 uint32_t u32Base = X86DESC_BASE(Desc);
2461
2462 RTStrPrintf(pszOutput, cchOutput, "%04x - %08x %08x - base=%08x limit=%08x dpl=%d %s",
2463 Sel, Desc.au32[0], Desc.au32[1], u32Base, u32Limit, Desc.Gen.u2Dpl, szMsg);
2464}
2465
2466
2467/**
2468 * Dumps a descriptor.
2469 *
2470 * @param Desc Descriptor to dump.
2471 * @param Sel Selector number.
2472 * @param pszMsg Message to prepend the log entry with.
2473 */
2474VMMR3DECL(void) SELMR3DumpDescriptor(X86DESC Desc, RTSEL Sel, const char *pszMsg)
2475{
2476 char szOutput[128];
2477 selmR3FormatDescriptor(Desc, Sel, &szOutput[0], sizeof(szOutput));
2478 Log(("%s: %s\n", pszMsg, szOutput));
2479 NOREF(szOutput[0]);
2480}
2481
2482
2483/**
2484 * Display the shadow gdt.
2485 *
2486 * @param pVM VM Handle.
2487 * @param pHlp The info helpers.
2488 * @param pszArgs Arguments, ignored.
2489 */
2490static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2491{
2492 pHlp->pfnPrintf(pHlp, "Shadow GDT (GCAddr=%RRv):\n", MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3));
2493 for (unsigned iGDT = 0; iGDT < SELM_GDT_ELEMENTS; iGDT++)
2494 {
2495 if (pVM->selm.s.paGdtR3[iGDT].Gen.u1Present)
2496 {
2497 char szOutput[128];
2498 selmR3FormatDescriptor(pVM->selm.s.paGdtR3[iGDT], iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2499 const char *psz = "";
2500 if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] >> X86_SEL_SHIFT))
2501 psz = " HyperCS";
2502 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] >> X86_SEL_SHIFT))
2503 psz = " HyperDS";
2504 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] >> X86_SEL_SHIFT))
2505 psz = " HyperCS64";
2506 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> X86_SEL_SHIFT))
2507 psz = " HyperTSS";
2508 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> X86_SEL_SHIFT))
2509 psz = " HyperTSSTrap08";
2510 pHlp->pfnPrintf(pHlp, "%s%s\n", szOutput, psz);
2511 }
2512 }
2513}
2514
2515
2516/**
2517 * Display the guest gdt.
2518 *
2519 * @param pVM VM Handle.
2520 * @param pHlp The info helpers.
2521 * @param pszArgs Arguments, ignored.
2522 */
2523static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2524{
2525 /** @todo SMP support! */
2526 PVMCPU pVCpu = &pVM->aCpus[0];
2527
2528 VBOXGDTR GDTR;
2529 CPUMGetGuestGDTR(pVCpu, &GDTR);
2530 RTGCPTR GCPtrGDT = GDTR.pGdt;
2531 unsigned cGDTs = ((unsigned)GDTR.cbGdt + 1) / sizeof(X86DESC);
2532
2533 pHlp->pfnPrintf(pHlp, "Guest GDT (GCAddr=%RGv limit=%x):\n", GCPtrGDT, GDTR.cbGdt);
2534 for (unsigned iGDT = 0; iGDT < cGDTs; iGDT++, GCPtrGDT += sizeof(X86DESC))
2535 {
2536 X86DESC GDTE;
2537 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &GDTE, GCPtrGDT, sizeof(GDTE));
2538 if (RT_SUCCESS(rc))
2539 {
2540 if (GDTE.Gen.u1Present)
2541 {
2542 char szOutput[128];
2543 selmR3FormatDescriptor(GDTE, iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2544 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2545 }
2546 }
2547 else if (rc == VERR_PAGE_NOT_PRESENT)
2548 {
2549 if ((GCPtrGDT & PAGE_OFFSET_MASK) + sizeof(X86DESC) - 1 < sizeof(X86DESC))
2550 pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%RGv)\n", iGDT << X86_SEL_SHIFT, GCPtrGDT);
2551 }
2552 else
2553 pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%RGv\n", iGDT << X86_SEL_SHIFT, rc, GCPtrGDT);
2554 }
2555}
2556
2557
2558/**
2559 * Display the shadow ldt.
2560 *
2561 * @param pVM VM Handle.
2562 * @param pHlp The info helpers.
2563 * @param pszArgs Arguments, ignored.
2564 */
2565static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2566{
2567 unsigned cLDTs = ((unsigned)pVM->selm.s.cbLdtLimit + 1) >> X86_SEL_SHIFT;
2568 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.pvLdtR3 + pVM->selm.s.offLdtHyper);
2569 pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=%RRv limit=%#x):\n", pVM->selm.s.pvLdtRC + pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit);
2570 for (unsigned iLDT = 0; iLDT < cLDTs; iLDT++)
2571 {
2572 if (paLDT[iLDT].Gen.u1Present)
2573 {
2574 char szOutput[128];
2575 selmR3FormatDescriptor(paLDT[iLDT], (iLDT << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2576 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2577 }
2578 }
2579}
2580
2581
2582/**
2583 * Display the guest ldt.
2584 *
2585 * @param pVM VM Handle.
2586 * @param pHlp The info helpers.
2587 * @param pszArgs Arguments, ignored.
2588 */
2589static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2590{
2591 /** @todo SMP support! */
2592 PVMCPU pVCpu = &pVM->aCpus[0];
2593
2594 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
2595 if (!(SelLdt & X86_SEL_MASK))
2596 {
2597 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): Null-Selector\n", SelLdt);
2598 return;
2599 }
2600
2601 RTGCPTR GCPtrLdt;
2602 unsigned cbLdt;
2603 int rc = SELMGetLDTFromSel(pVM, SelLdt, &GCPtrLdt, &cbLdt);
2604 if (RT_FAILURE(rc))
2605 {
2606 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): rc=%Rrc\n", SelLdt, rc);
2607 return;
2608 }
2609
2610 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%RGv limit=%x):\n", SelLdt, GCPtrLdt, cbLdt);
2611 unsigned cLdts = (cbLdt + 1) >> X86_SEL_SHIFT;
2612 for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, GCPtrLdt += sizeof(X86DESC))
2613 {
2614 X86DESC LdtE;
2615 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &LdtE, GCPtrLdt, sizeof(LdtE));
2616 if (RT_SUCCESS(rc))
2617 {
2618 if (LdtE.Gen.u1Present)
2619 {
2620 char szOutput[128];
2621 selmR3FormatDescriptor(LdtE, (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2622 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2623 }
2624 }
2625 else if (rc == VERR_PAGE_NOT_PRESENT)
2626 {
2627 if ((GCPtrLdt & PAGE_OFFSET_MASK) + sizeof(X86DESC) - 1 < sizeof(X86DESC))
2628 pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%RGv)\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, GCPtrLdt);
2629 }
2630 else
2631 pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%RGv\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, rc, GCPtrLdt);
2632 }
2633}
2634
2635
2636/**
2637 * Dumps the hypervisor GDT
2638 *
2639 * @param pVM VM handle.
2640 */
2641VMMR3DECL(void) SELMR3DumpHyperGDT(PVM pVM)
2642{
2643 DBGFR3Info(pVM, "gdt", NULL, NULL);
2644}
2645
2646
2647/**
2648 * Dumps the hypervisor LDT
2649 *
2650 * @param pVM VM handle.
2651 */
2652VMMR3DECL(void) SELMR3DumpHyperLDT(PVM pVM)
2653{
2654 DBGFR3Info(pVM, "ldt", NULL, NULL);
2655}
2656
2657
2658/**
2659 * Dumps the guest GDT
2660 *
2661 * @param pVM VM handle.
2662 */
2663VMMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM)
2664{
2665 DBGFR3Info(pVM, "gdtguest", NULL, NULL);
2666}
2667
2668
2669/**
2670 * Dumps the guest LDT
2671 *
2672 * @param pVM VM handle.
2673 */
2674VMMR3DECL(void) SELMR3DumpGuestLDT(PVM pVM)
2675{
2676 DBGFR3Info(pVM, "ldtguest", NULL, NULL);
2677}
2678
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette