VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/SELM.cpp@ 37423

Last change on this file since 37423 was 35346, checked in by vboxsync, 14 years ago

VMM reorg: Moving the public include files from include/VBox to include/VBox/vmm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 101.6 KB
Line 
1/* $Id: SELM.cpp 35346 2010-12-27 16:13:13Z vboxsync $ */
2/** @file
3 * SELM - The Selector Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_selm SELM - The Selector Manager
19 *
20 * SELM takes care of GDT, LDT and TSS shadowing in raw-mode, and the injection
21 * of a few hyper selector for the raw-mode context. In the hardware assisted
22 * virtualization mode its only task is to decode entries in the guest GDT or
23 * LDT once in a while.
24 *
25 * @see grp_selm
26 *
27 *
28 * @section seg_selm_shadowing Shadowing
29 *
30 * SELMR3UpdateFromCPUM() and SELMR3SyncTSS() does the bulk synchronization
31 * work. The three structures (GDT, LDT, TSS) are all shadowed wholesale atm.
32 * The idea is to do it in a more on-demand fashion when we get time. There
33 * also a whole bunch of issues with the current synchronization of all three
34 * tables, see notes and todos in the code.
35 *
36 * When the guest makes changes to the GDT we will try update the shadow copy
37 * without involving SELMR3UpdateFromCPUM(), see selmGCSyncGDTEntry().
38 *
39 * When the guest make LDT changes we'll trigger a full resync of the LDT
40 * (SELMR3UpdateFromCPUM()), which, needless to say, isn't optimal.
41 *
42 * The TSS shadowing is limited to the fields we need to care about, namely SS0
43 * and ESP0. The Patch Manager makes use of these. We monitor updates to the
44 * guest TSS and will try keep our SS0 and ESP0 copies up to date this way
45 * rather than go the SELMR3SyncTSS() route.
46 *
47 * When in raw-mode SELM also injects a few extra GDT selectors which are used
48 * by the raw-mode (hyper) context. These start their life at the high end of
49 * the table and will be relocated when the guest tries to make use of them...
50 * Well, that was that idea at least, only the code isn't quite there yet which
51 * is why we have trouble with guests which actually have a full sized GDT.
52 *
53 * So, the summary of the current GDT, LDT and TSS shadowing is that there is a
54 * lot of relatively simple and enjoyable work to be done, see @bugref{3267}.
55 *
56 */
57
58/*******************************************************************************
59* Header Files *
60*******************************************************************************/
61#define LOG_GROUP LOG_GROUP_SELM
62#include <VBox/vmm/selm.h>
63#include <VBox/vmm/cpum.h>
64#include <VBox/vmm/stam.h>
65#include <VBox/vmm/mm.h>
66#include <VBox/vmm/ssm.h>
67#include <VBox/vmm/pgm.h>
68#include <VBox/vmm/trpm.h>
69#include <VBox/vmm/dbgf.h>
70#include "SELMInternal.h"
71#include <VBox/vmm/vm.h>
72#include <VBox/err.h>
73#include <VBox/param.h>
74
75#include <iprt/assert.h>
76#include <VBox/log.h>
77#include <iprt/asm.h>
78#include <iprt/string.h>
79#include <iprt/thread.h>
80#include <iprt/string.h>
81
82
83/**
84 * Enable or disable tracking of Guest's GDT/LDT/TSS.
85 * @{
86 */
87#define SELM_TRACK_GUEST_GDT_CHANGES
88#define SELM_TRACK_GUEST_LDT_CHANGES
89#define SELM_TRACK_GUEST_TSS_CHANGES
90/** @} */
91
92/**
93 * Enable or disable tracking of Shadow GDT/LDT/TSS.
94 * @{
95 */
96#define SELM_TRACK_SHADOW_GDT_CHANGES
97#define SELM_TRACK_SHADOW_LDT_CHANGES
98#define SELM_TRACK_SHADOW_TSS_CHANGES
99/** @} */
100
101
102/** SELM saved state version. */
103#define SELM_SAVED_STATE_VERSION 5
104
105
106/*******************************************************************************
107* Internal Functions *
108*******************************************************************************/
109static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM);
110static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
111static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
112static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
113static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
114static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
115static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
116static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
117static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
118static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
119//static DECLCALLBACK(void) selmR3InfoTss(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
120//static DECLCALLBACK(void) selmR3InfoTssGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
121
122
123
124/**
125 * Initializes the SELM.
126 *
127 * @returns VBox status code.
128 * @param pVM The VM to operate on.
129 */
130VMMR3DECL(int) SELMR3Init(PVM pVM)
131{
132 LogFlow(("SELMR3Init\n"));
133
134 /*
135 * Assert alignment and sizes.
136 * (The TSS block requires contiguous back.)
137 */
138 AssertCompile(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding)); AssertRelease(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding));
139 AssertCompileMemberAlignment(VM, selm.s, 32); AssertRelease(!(RT_OFFSETOF(VM, selm.s) & 31));
140#if 0 /* doesn't work */
141 AssertCompile((RT_OFFSETOF(VM, selm.s.Tss) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.Tss));
142 AssertCompile((RT_OFFSETOF(VM, selm.s.TssTrap08) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.TssTrap08));
143#endif
144 AssertRelease((RT_OFFSETOF(VM, selm.s.Tss) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.Tss));
145 AssertRelease((RT_OFFSETOF(VM, selm.s.TssTrap08) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.TssTrap08));
146 AssertRelease(sizeof(pVM->selm.s.Tss.IntRedirBitmap) == 0x20);
147
148 /*
149 * Init the structure.
150 */
151 pVM->selm.s.offVM = RT_OFFSETOF(VM, selm);
152 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] = (SELM_GDT_ELEMENTS - 0x1) << 3;
153 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] = (SELM_GDT_ELEMENTS - 0x2) << 3;
154 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] = (SELM_GDT_ELEMENTS - 0x3) << 3;
155 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = (SELM_GDT_ELEMENTS - 0x4) << 3;
156 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = (SELM_GDT_ELEMENTS - 0x5) << 3;
157
158 /*
159 * Allocate GDT table.
160 */
161 int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtR3[0]) * SELM_GDT_ELEMENTS,
162 PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdtR3);
163 AssertRCReturn(rc, rc);
164
165 /*
166 * Allocate LDT area.
167 */
168 rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.pvLdtR3);
169 AssertRCReturn(rc, rc);
170
171 /*
172 * Init Guest's and Shadow GDT, LDT, TSS changes control variables.
173 */
174 pVM->selm.s.cbEffGuestGdtLimit = 0;
175 pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX;
176 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
177 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
178
179 pVM->selm.s.paGdtRC = NIL_RTRCPTR; /* Must be set in SELMR3Relocate because of monitoring. */
180 pVM->selm.s.pvLdtRC = RTRCPTR_MAX;
181 pVM->selm.s.pvMonShwTssRC = RTRCPTR_MAX;
182 pVM->selm.s.GCSelTss = RTSEL_MAX;
183
184 pVM->selm.s.fDisableMonitoring = false;
185 pVM->selm.s.fSyncTSSRing0Stack = false;
186
187 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. Outside the TSS on purpose; the CPU will not check it
188 * for I/O operations. */
189 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
190 /* bit set to 1 means no redirection */
191 memset(pVM->selm.s.Tss.IntRedirBitmap, 0xff, sizeof(pVM->selm.s.Tss.IntRedirBitmap));
192
193 /*
194 * Register the saved state data unit.
195 */
196 rc = SSMR3RegisterInternal(pVM, "selm", 1, SELM_SAVED_STATE_VERSION, sizeof(SELM),
197 NULL, NULL, NULL,
198 NULL, selmR3Save, NULL,
199 NULL, selmR3Load, selmR3LoadDone);
200 if (RT_FAILURE(rc))
201 return rc;
202
203 /*
204 * Statistics.
205 */
206 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT.");
207 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT.");
208 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected.");
209 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS.");
210 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSRedir, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES, "The number of handled redir bitmap writes to the Guest TSS.");
211 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
212 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS.");
213 STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
214 STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
215
216 STAM_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors.");
217 STAM_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors.");
218
219 /*
220 * Default action when entering raw mode for the first time
221 */
222 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
223 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
224 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
225 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
226
227 /*
228 * Register info handlers.
229 */
230 DBGFR3InfoRegisterInternal(pVM, "gdt", "Displays the shadow GDT. No arguments.", &selmR3InfoGdt);
231 DBGFR3InfoRegisterInternal(pVM, "gdtguest", "Displays the guest GDT. No arguments.", &selmR3InfoGdtGuest);
232 DBGFR3InfoRegisterInternal(pVM, "ldt", "Displays the shadow LDT. No arguments.", &selmR3InfoLdt);
233 DBGFR3InfoRegisterInternal(pVM, "ldtguest", "Displays the guest LDT. No arguments.", &selmR3InfoLdtGuest);
234 //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss);
235 //DBGFR3InfoRegisterInternal(pVM, "tssguest", "Displays the guest TSS. No arguments.", &selmR3InfoTssGuest);
236
237 return rc;
238}
239
240
241/**
242 * Finalizes HMA page attributes.
243 *
244 * @returns VBox status code.
245 * @param pVM The VM handle.
246 */
247VMMR3DECL(int) SELMR3InitFinalize(PVM pVM)
248{
249 /** @cfgm{/DoubleFault,bool,false}
250 * Enables catching of double faults in the raw-mode context VMM code. This can
251 * be used when the triple faults or hangs occur and one suspect an unhandled
252 * double fault. This is not enabled by default because it means making the
253 * hyper selectors writeable for all supervisor code, including the guest's.
254 * The double fault is a task switch and thus requires write access to the GDT
255 * of the TSS (to set it busy), to the old TSS (to store state), and to the Trap
256 * 8 TSS for the back link.
257 */
258 bool f;
259#if defined(DEBUG_bird)
260 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, true);
261#else
262 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, false);
263#endif
264 AssertLogRelRCReturn(rc, rc);
265 if (f)
266 {
267 PX86DESC paGdt = pVM->selm.s.paGdtR3;
268 rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3]), sizeof(paGdt[0]),
269 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
270 AssertRC(rc);
271 rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3]), sizeof(paGdt[0]),
272 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
273 AssertRC(rc);
274 rc = PGMMapSetPage(pVM, VM_RC_ADDR(pVM, &pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]), sizeof(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]),
275 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
276 AssertRC(rc);
277 rc = PGMMapSetPage(pVM, VM_RC_ADDR(pVM, &pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]), sizeof(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]),
278 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
279 AssertRC(rc);
280 }
281 return VINF_SUCCESS;
282}
283
284
285/**
286 * Setup the hypervisor GDT selectors in our shadow table
287 *
288 * @param pVM The VM handle.
289 */
290static void selmR3SetupHyperGDTSelectors(PVM pVM)
291{
292 PX86DESC paGdt = pVM->selm.s.paGdtR3;
293
294 /*
295 * Set up global code and data descriptors for use in the guest context.
296 * Both are wide open (base 0, limit 4GB)
297 */
298 PX86DESC pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] >> 3];
299 pDesc->Gen.u16LimitLow = 0xffff;
300 pDesc->Gen.u4LimitHigh = 0xf;
301 pDesc->Gen.u16BaseLow = 0;
302 pDesc->Gen.u8BaseHigh1 = 0;
303 pDesc->Gen.u8BaseHigh2 = 0;
304 pDesc->Gen.u4Type = X86_SEL_TYPE_ER_ACC;
305 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
306 pDesc->Gen.u2Dpl = 0; /* supervisor */
307 pDesc->Gen.u1Present = 1;
308 pDesc->Gen.u1Available = 0;
309 pDesc->Gen.u1Long = 0;
310 pDesc->Gen.u1DefBig = 1; /* def 32 bit */
311 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
312
313 /* data */
314 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] >> 3];
315 pDesc->Gen.u16LimitLow = 0xffff;
316 pDesc->Gen.u4LimitHigh = 0xf;
317 pDesc->Gen.u16BaseLow = 0;
318 pDesc->Gen.u8BaseHigh1 = 0;
319 pDesc->Gen.u8BaseHigh2 = 0;
320 pDesc->Gen.u4Type = X86_SEL_TYPE_RW_ACC;
321 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
322 pDesc->Gen.u2Dpl = 0; /* supervisor */
323 pDesc->Gen.u1Present = 1;
324 pDesc->Gen.u1Available = 0;
325 pDesc->Gen.u1Long = 0;
326 pDesc->Gen.u1DefBig = 1; /* big */
327 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
328
329 /* 64-bit mode code (& data?) */
330 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] >> 3];
331 pDesc->Gen.u16LimitLow = 0xffff;
332 pDesc->Gen.u4LimitHigh = 0xf;
333 pDesc->Gen.u16BaseLow = 0;
334 pDesc->Gen.u8BaseHigh1 = 0;
335 pDesc->Gen.u8BaseHigh2 = 0;
336 pDesc->Gen.u4Type = X86_SEL_TYPE_ER_ACC;
337 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
338 pDesc->Gen.u2Dpl = 0; /* supervisor */
339 pDesc->Gen.u1Present = 1;
340 pDesc->Gen.u1Available = 0;
341 pDesc->Gen.u1Long = 1; /* The Long (L) attribute bit. */
342 pDesc->Gen.u1DefBig = 0; /* With L=1 this must be 0. */
343 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
344
345 /*
346 * TSS descriptor
347 */
348 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3];
349 RTRCPTR RCPtrTSS = VM_RC_ADDR(pVM, &pVM->selm.s.Tss);
350 pDesc->Gen.u16BaseLow = RT_LOWORD(RCPtrTSS);
351 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(RCPtrTSS);
352 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(RCPtrTSS);
353 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
354 pDesc->Gen.u4LimitHigh = 0;
355 pDesc->Gen.u4Type = X86_SEL_TYPE_SYS_386_TSS_AVAIL;
356 pDesc->Gen.u1DescType = 0; /* system */
357 pDesc->Gen.u2Dpl = 0; /* supervisor */
358 pDesc->Gen.u1Present = 1;
359 pDesc->Gen.u1Available = 0;
360 pDesc->Gen.u1Long = 0;
361 pDesc->Gen.u1DefBig = 0;
362 pDesc->Gen.u1Granularity = 0; /* byte limit */
363
364 /*
365 * TSS descriptor for trap 08
366 */
367 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3];
368 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
369 pDesc->Gen.u4LimitHigh = 0;
370 RCPtrTSS = VM_RC_ADDR(pVM, &pVM->selm.s.TssTrap08);
371 pDesc->Gen.u16BaseLow = RT_LOWORD(RCPtrTSS);
372 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(RCPtrTSS);
373 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(RCPtrTSS);
374 pDesc->Gen.u4Type = X86_SEL_TYPE_SYS_386_TSS_AVAIL;
375 pDesc->Gen.u1DescType = 0; /* system */
376 pDesc->Gen.u2Dpl = 0; /* supervisor */
377 pDesc->Gen.u1Present = 1;
378 pDesc->Gen.u1Available = 0;
379 pDesc->Gen.u1Long = 0;
380 pDesc->Gen.u1DefBig = 0;
381 pDesc->Gen.u1Granularity = 0; /* byte limit */
382}
383
384/**
385 * Applies relocations to data and code managed by this
386 * component. This function will be called at init and
387 * whenever the VMM need to relocate it self inside the GC.
388 *
389 * @param pVM The VM.
390 */
391VMMR3DECL(void) SELMR3Relocate(PVM pVM)
392{
393 PX86DESC paGdt = pVM->selm.s.paGdtR3;
394 LogFlow(("SELMR3Relocate\n"));
395
396 for (VMCPUID i = 0; i < pVM->cCpus; i++)
397 {
398 PVMCPU pVCpu = &pVM->aCpus[i];
399
400 /*
401 * Update GDTR and selector.
402 */
403 CPUMSetHyperGDTR(pVCpu, MMHyperR3ToRC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1);
404
405 /** @todo selector relocations should be a separate operation? */
406 CPUMSetHyperCS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]);
407 CPUMSetHyperDS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
408 CPUMSetHyperES(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
409 CPUMSetHyperSS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
410 CPUMSetHyperTR(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]);
411 }
412
413 selmR3SetupHyperGDTSelectors(pVM);
414
415/** @todo SELM must be called when any of the CR3s changes during a cpu mode change. */
416/** @todo PGM knows the proper CR3 values these days, not CPUM. */
417 /*
418 * Update the TSSes.
419 */
420 /* Only applies to raw mode which supports only 1 VCPU */
421 PVMCPU pVCpu = &pVM->aCpus[0];
422
423 /* Current TSS */
424 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
425 pVM->selm.s.Tss.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
426 pVM->selm.s.Tss.esp0 = VMMGetStackRC(pVCpu);
427 pVM->selm.s.Tss.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
428 pVM->selm.s.Tss.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
429 pVM->selm.s.Tss.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
430 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
431
432 /* trap 08 */
433 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu); /* this should give use better survival chances. */
434 pVM->selm.s.TssTrap08.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
435 pVM->selm.s.TssTrap08.ss = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
436 pVM->selm.s.TssTrap08.esp0 = VMMGetStackRC(pVCpu) - PAGE_SIZE / 2; /* upper half can be analysed this way. */
437 pVM->selm.s.TssTrap08.esp = pVM->selm.s.TssTrap08.esp0;
438 pVM->selm.s.TssTrap08.ebp = pVM->selm.s.TssTrap08.esp0;
439 pVM->selm.s.TssTrap08.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
440 pVM->selm.s.TssTrap08.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
441 pVM->selm.s.TssTrap08.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
442 pVM->selm.s.TssTrap08.fs = 0;
443 pVM->selm.s.TssTrap08.gs = 0;
444 pVM->selm.s.TssTrap08.selLdt = 0;
445 pVM->selm.s.TssTrap08.eflags = 0x2; /* all cleared */
446 pVM->selm.s.TssTrap08.ecx = VM_RC_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */
447 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.ecx;
448 pVM->selm.s.TssTrap08.eax = pVM->selm.s.TssTrap08.ecx;
449 pVM->selm.s.TssTrap08.edx = VM_RC_ADDR(pVM, pVM); /* setup edx VM address. */
450 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.edx;
451 pVM->selm.s.TssTrap08.ebx = pVM->selm.s.TssTrap08.edx;
452 pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);
453 /* TRPM will be updating the eip */
454
455 if ( !pVM->selm.s.fDisableMonitoring
456 && !VMMIsHwVirtExtForced(pVM))
457 {
458 /*
459 * Update shadow GDT/LDT/TSS write access handlers.
460 */
461 int rc;
462#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
463 if (pVM->selm.s.paGdtRC != NIL_RTRCPTR)
464 {
465 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtRC);
466 AssertRC(rc);
467 }
468 pVM->selm.s.paGdtRC = MMHyperR3ToRC(pVM, paGdt);
469 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.paGdtRC,
470 pVM->selm.s.paGdtRC + SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1,
471 0, 0, "selmRCShadowGDTWriteHandler", 0, "Shadow GDT write access handler");
472 AssertRC(rc);
473#endif
474#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
475 if (pVM->selm.s.pvMonShwTssRC != RTRCPTR_MAX)
476 {
477 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvMonShwTssRC);
478 AssertRC(rc);
479 }
480 pVM->selm.s.pvMonShwTssRC = VM_RC_ADDR(pVM, &pVM->selm.s.Tss);
481 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.pvMonShwTssRC,
482 pVM->selm.s.pvMonShwTssRC + sizeof(pVM->selm.s.Tss) - 1,
483 0, 0, "selmRCShadowTSSWriteHandler", 0, "Shadow TSS write access handler");
484 AssertRC(rc);
485#endif
486
487 /*
488 * Update the GC LDT region handler and address.
489 */
490#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
491 if (pVM->selm.s.pvLdtRC != RTRCPTR_MAX)
492 {
493 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvLdtRC);
494 AssertRC(rc);
495 }
496#endif
497 pVM->selm.s.pvLdtRC = MMHyperR3ToRC(pVM, pVM->selm.s.pvLdtR3);
498#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
499 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.pvLdtRC,
500 pVM->selm.s.pvLdtRC + _64K + PAGE_SIZE - 1,
501 0, 0, "selmRCShadowLDTWriteHandler", 0, "Shadow LDT write access handler");
502 AssertRC(rc);
503#endif
504 }
505}
506
507
508/**
509 * Terminates the SELM.
510 *
511 * Termination means cleaning up and freeing all resources,
512 * the VM it self is at this point powered off or suspended.
513 *
514 * @returns VBox status code.
515 * @param pVM The VM to operate on.
516 */
517VMMR3DECL(int) SELMR3Term(PVM pVM)
518{
519 return 0;
520}
521
522
523/**
524 * The VM is being reset.
525 *
526 * For the SELM component this means that any GDT/LDT/TSS monitors
527 * needs to be removed.
528 *
529 * @param pVM VM handle.
530 */
531VMMR3DECL(void) SELMR3Reset(PVM pVM)
532{
533 LogFlow(("SELMR3Reset:\n"));
534 VM_ASSERT_EMT(pVM);
535
536 /*
537 * Uninstall guest GDT/LDT/TSS write access handlers.
538 */
539 int rc;
540#ifdef SELM_TRACK_GUEST_GDT_CHANGES
541 if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)
542 {
543 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
544 AssertRC(rc);
545 pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX;
546 pVM->selm.s.GuestGdtr.cbGdt = 0;
547 }
548 pVM->selm.s.fGDTRangeRegistered = false;
549#endif
550#ifdef SELM_TRACK_GUEST_LDT_CHANGES
551 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
552 {
553 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
554 AssertRC(rc);
555 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
556 }
557#endif
558#ifdef SELM_TRACK_GUEST_TSS_CHANGES
559 if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX)
560 {
561 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
562 AssertRC(rc);
563 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
564 pVM->selm.s.GCSelTss = RTSEL_MAX;
565 }
566#endif
567
568 /*
569 * Re-initialize other members.
570 */
571 pVM->selm.s.cbLdtLimit = 0;
572 pVM->selm.s.offLdtHyper = 0;
573 pVM->selm.s.cbMonitoredGuestTss = 0;
574
575 pVM->selm.s.fSyncTSSRing0Stack = false;
576
577 /*
578 * Default action when entering raw mode for the first time
579 */
580 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
581 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
582 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
583 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
584}
585
586/**
587 * Disable GDT/LDT/TSS monitoring and syncing
588 *
589 * @param pVM The VM to operate on.
590 */
591VMMR3DECL(void) SELMR3DisableMonitoring(PVM pVM)
592{
593 /*
594 * Uninstall guest GDT/LDT/TSS write access handlers.
595 */
596 int rc;
597#ifdef SELM_TRACK_GUEST_GDT_CHANGES
598 if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)
599 {
600 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
601 AssertRC(rc);
602 pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX;
603 pVM->selm.s.GuestGdtr.cbGdt = 0;
604 }
605 pVM->selm.s.fGDTRangeRegistered = false;
606#endif
607#ifdef SELM_TRACK_GUEST_LDT_CHANGES
608 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
609 {
610 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
611 AssertRC(rc);
612 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
613 }
614#endif
615#ifdef SELM_TRACK_GUEST_TSS_CHANGES
616 if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX)
617 {
618 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
619 AssertRC(rc);
620 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
621 pVM->selm.s.GCSelTss = RTSEL_MAX;
622 }
623#endif
624
625 /*
626 * Unregister shadow GDT/LDT/TSS write access handlers.
627 */
628#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
629 if (pVM->selm.s.paGdtRC != NIL_RTRCPTR)
630 {
631 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtRC);
632 AssertRC(rc);
633 pVM->selm.s.paGdtRC = NIL_RTRCPTR;
634 }
635#endif
636#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
637 if (pVM->selm.s.pvMonShwTssRC != RTRCPTR_MAX)
638 {
639 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvMonShwTssRC);
640 AssertRC(rc);
641 pVM->selm.s.pvMonShwTssRC = RTRCPTR_MAX;
642 }
643#endif
644#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
645 if (pVM->selm.s.pvLdtRC != RTRCPTR_MAX)
646 {
647 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvLdtRC);
648 AssertRC(rc);
649 pVM->selm.s.pvLdtRC = RTRCPTR_MAX;
650 }
651#endif
652
653 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
654 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
655 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
656 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
657
658 pVM->selm.s.fDisableMonitoring = true;
659}
660
661
662/**
663 * Execute state save operation.
664 *
665 * @returns VBox status code.
666 * @param pVM VM Handle.
667 * @param pSSM SSM operation handle.
668 */
669static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM)
670{
671 LogFlow(("selmR3Save:\n"));
672
673 /*
674 * Save the basic bits - fortunately all the other things can be resynced on load.
675 */
676 PSELM pSelm = &pVM->selm.s;
677
678 SSMR3PutBool(pSSM, pSelm->fDisableMonitoring);
679 SSMR3PutBool(pSSM, pSelm->fSyncTSSRing0Stack);
680 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS]);
681 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_DS]);
682 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]);
683 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]); /* reserved for DS64. */
684 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_TSS]);
685 return SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]);
686}
687
688
689/**
690 * Execute state load operation.
691 *
692 * @returns VBox status code.
693 * @param pVM VM Handle.
694 * @param pSSM SSM operation handle.
695 * @param uVersion Data layout version.
696 * @param uPass The data pass.
697 */
698static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
699{
700 LogFlow(("selmR3Load:\n"));
701 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
702
703 /*
704 * Validate version.
705 */
706 if (uVersion != SELM_SAVED_STATE_VERSION)
707 {
708 AssertMsgFailed(("selmR3Load: Invalid version uVersion=%d!\n", uVersion));
709 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
710 }
711
712 /*
713 * Do a reset.
714 */
715 SELMR3Reset(pVM);
716
717 /* Get the monitoring flag. */
718 SSMR3GetBool(pSSM, &pVM->selm.s.fDisableMonitoring);
719
720 /* Get the TSS state flag. */
721 SSMR3GetBool(pSSM, &pVM->selm.s.fSyncTSSRing0Stack);
722
723 /*
724 * Get the selectors.
725 */
726 RTSEL SelCS;
727 SSMR3GetSel(pSSM, &SelCS);
728 RTSEL SelDS;
729 SSMR3GetSel(pSSM, &SelDS);
730 RTSEL SelCS64;
731 SSMR3GetSel(pSSM, &SelCS64);
732 RTSEL SelDS64;
733 SSMR3GetSel(pSSM, &SelDS64);
734 RTSEL SelTSS;
735 SSMR3GetSel(pSSM, &SelTSS);
736 RTSEL SelTSSTrap08;
737 SSMR3GetSel(pSSM, &SelTSSTrap08);
738
739 /* Copy the selectors; they will be checked during relocation. */
740 PSELM pSelm = &pVM->selm.s;
741 pSelm->aHyperSel[SELM_HYPER_SEL_CS] = SelCS;
742 pSelm->aHyperSel[SELM_HYPER_SEL_DS] = SelDS;
743 pSelm->aHyperSel[SELM_HYPER_SEL_CS64] = SelCS64;
744 pSelm->aHyperSel[SELM_HYPER_SEL_TSS] = SelTSS;
745 pSelm->aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SelTSSTrap08;
746
747 return VINF_SUCCESS;
748}
749
750
751/**
752 * Sync the GDT, LDT and TSS after loading the state.
753 *
754 * Just to play save, we set the FFs to force syncing before
755 * executing GC code.
756 *
757 * @returns VBox status code.
758 * @param pVM VM Handle.
759 * @param pSSM SSM operation handle.
760 */
761static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
762{
763 PVMCPU pVCpu = VMMGetCpu(pVM);
764
765 LogFlow(("selmR3LoadDone:\n"));
766
767 /*
768 * Don't do anything if it's a load failure.
769 */
770 int rc = SSMR3HandleGetStatus(pSSM);
771 if (RT_FAILURE(rc))
772 return VINF_SUCCESS;
773
774 /*
775 * Do the syncing if we're in protected mode.
776 */
777 if (PGMGetGuestMode(pVCpu) != PGMMODE_REAL)
778 {
779 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
780 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
781 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
782 SELMR3UpdateFromCPUM(pVM, pVCpu);
783 }
784
785 /*
786 * Flag everything for resync on next raw mode entry.
787 */
788 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
789 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
790 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
791
792 return VINF_SUCCESS;
793}
794
795
796/**
797 * Updates the Guest GDT & LDT virtualization based on current CPU state.
798 *
799 * @returns VBox status code.
800 * @param pVM The VM to operate on.
801 * @param pVCpu The VMCPU to operate on.
802 */
803VMMR3DECL(int) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu)
804{
805 int rc = VINF_SUCCESS;
806
807 if (pVM->selm.s.fDisableMonitoring)
808 {
809 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
810 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
811 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
812
813 return VINF_SUCCESS;
814 }
815
816 STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a);
817
818 /*
819 * GDT sync
820 */
821 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
822 {
823 /*
824 * Always assume the best
825 */
826 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
827
828 /* If the GDT was changed, then make sure the LDT is checked too */
829 /** @todo only do this if the actual ldtr selector was changed; this is a bit excessive */
830 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
831 /* Same goes for the TSS selector */
832 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
833
834 /*
835 * Get the GDTR and check if there is anything to do (there usually is).
836 */
837 VBOXGDTR GDTR;
838 CPUMGetGuestGDTR(pVCpu, &GDTR);
839 if (GDTR.cbGdt < sizeof(X86DESC))
840 {
841 Log(("No GDT entries...\n"));
842 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
843 return VINF_SUCCESS;
844 }
845
846 /*
847 * Read the Guest GDT.
848 * ASSUMES that the entire GDT is in memory.
849 */
850 RTUINT cbEffLimit = GDTR.cbGdt;
851 PX86DESC pGDTE = &pVM->selm.s.paGdtR3[1];
852 rc = PGMPhysSimpleReadGCPtr(pVCpu, pGDTE, GDTR.pGdt + sizeof(X86DESC), cbEffLimit + 1 - sizeof(X86DESC));
853 if (RT_FAILURE(rc))
854 {
855 /*
856 * Read it page by page.
857 *
858 * Keep track of the last valid page and delay memsets and
859 * adjust cbEffLimit to reflect the effective size. The latter
860 * is something we do in the belief that the guest will probably
861 * never actually commit the last page, thus allowing us to keep
862 * our selectors in the high end of the GDT.
863 */
864 RTUINT cbLeft = cbEffLimit + 1 - sizeof(X86DESC);
865 RTGCPTR GCPtrSrc = (RTGCPTR)GDTR.pGdt + sizeof(X86DESC);
866 uint8_t *pu8Dst = (uint8_t *)&pVM->selm.s.paGdtR3[1];
867 uint8_t *pu8DstInvalid = pu8Dst;
868
869 while (cbLeft)
870 {
871 RTUINT cb = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
872 cb = RT_MIN(cb, cbLeft);
873 rc = PGMPhysSimpleReadGCPtr(pVCpu, pu8Dst, GCPtrSrc, cb);
874 if (RT_SUCCESS(rc))
875 {
876 if (pu8DstInvalid != pu8Dst)
877 memset(pu8DstInvalid, 0, pu8Dst - pu8DstInvalid);
878 GCPtrSrc += cb;
879 pu8Dst += cb;
880 pu8DstInvalid = pu8Dst;
881 }
882 else if ( rc == VERR_PAGE_NOT_PRESENT
883 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
884 {
885 GCPtrSrc += cb;
886 pu8Dst += cb;
887 }
888 else
889 {
890 AssertReleaseMsgFailed(("Couldn't read GDT at %016RX64, rc=%Rrc!\n", GDTR.pGdt, rc));
891 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
892 return VERR_NOT_IMPLEMENTED;
893 }
894 cbLeft -= cb;
895 }
896
897 /* any invalid pages at the end? */
898 if (pu8DstInvalid != pu8Dst)
899 {
900 cbEffLimit = pu8DstInvalid - (uint8_t *)pVM->selm.s.paGdtR3 - 1;
901 /* If any GDTEs was invalidated, zero them. */
902 if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit)
903 memset(pu8DstInvalid + cbEffLimit + 1, 0, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit);
904 }
905
906 /* keep track of the effective limit. */
907 if (cbEffLimit != pVM->selm.s.cbEffGuestGdtLimit)
908 {
909 Log(("SELMR3UpdateFromCPUM: cbEffGuestGdtLimit=%#x -> %#x (actual %#x)\n",
910 pVM->selm.s.cbEffGuestGdtLimit, cbEffLimit, GDTR.cbGdt));
911 pVM->selm.s.cbEffGuestGdtLimit = cbEffLimit;
912 }
913 }
914
915 /*
916 * Check if the Guest GDT intrudes on our GDT entries.
917 */
918 /** @todo we should try to minimize relocations by making sure our current selectors can be reused. */
919 RTSEL aHyperSel[SELM_HYPER_SEL_MAX];
920 if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
921 {
922 PX86DESC pGDTEStart = pVM->selm.s.paGdtR3;
923 PX86DESC pGDTECur = (PX86DESC)((char *)pGDTEStart + GDTR.cbGdt + 1 - sizeof(X86DESC));
924 int iGDT = 0;
925
926 Log(("Internal SELM GDT conflict: use non-present entries\n"));
927 STAM_COUNTER_INC(&pVM->selm.s.StatScanForHyperSels);
928 while (pGDTECur > pGDTEStart)
929 {
930 /* We can reuse non-present entries */
931 if (!pGDTECur->Gen.u1Present)
932 {
933 aHyperSel[iGDT] = ((uintptr_t)pGDTECur - (uintptr_t)pVM->selm.s.paGdtR3) / sizeof(X86DESC);
934 aHyperSel[iGDT] = aHyperSel[iGDT] << X86_SEL_SHIFT;
935 Log(("SELM: Found unused GDT %04X\n", aHyperSel[iGDT]));
936 iGDT++;
937 if (iGDT >= SELM_HYPER_SEL_MAX)
938 break;
939 }
940
941 pGDTECur--;
942 }
943 if (iGDT != SELM_HYPER_SEL_MAX)
944 {
945 AssertReleaseMsgFailed(("Internal SELM GDT conflict.\n"));
946 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
947 return VERR_NOT_IMPLEMENTED;
948 }
949 }
950 else
951 {
952 aHyperSel[SELM_HYPER_SEL_CS] = SELM_HYPER_DEFAULT_SEL_CS;
953 aHyperSel[SELM_HYPER_SEL_DS] = SELM_HYPER_DEFAULT_SEL_DS;
954 aHyperSel[SELM_HYPER_SEL_CS64] = SELM_HYPER_DEFAULT_SEL_CS64;
955 aHyperSel[SELM_HYPER_SEL_TSS] = SELM_HYPER_DEFAULT_SEL_TSS;
956 aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SELM_HYPER_DEFAULT_SEL_TSS_TRAP08;
957 }
958
959 /*
960 * Work thru the copied GDT entries adjusting them for correct virtualization.
961 */
962 PX86DESC pGDTEEnd = (PX86DESC)((char *)pGDTE + cbEffLimit + 1 - sizeof(X86DESC));
963 while (pGDTE < pGDTEEnd)
964 {
965 if (pGDTE->Gen.u1Present)
966 {
967 /*
968 * Code and data selectors are generally 1:1, with the
969 * 'little' adjustment we do for DPL 0 selectors.
970 */
971 if (pGDTE->Gen.u1DescType)
972 {
973 /*
974 * Hack for A-bit against Trap E on read-only GDT.
975 */
976 /** @todo Fix this by loading ds and cs before turning off WP. */
977 pGDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
978
979 /*
980 * All DPL 0 code and data segments are squeezed into DPL 1.
981 *
982 * We're skipping conforming segments here because those
983 * cannot give us any trouble.
984 */
985 if ( pGDTE->Gen.u2Dpl == 0
986 && (pGDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
987 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
988 pGDTE->Gen.u2Dpl = 1;
989 }
990 else
991 {
992 /*
993 * System type selectors are marked not present.
994 * Recompiler or special handling is required for these.
995 */
996 /** @todo what about interrupt gates and rawr0? */
997 pGDTE->Gen.u1Present = 0;
998 }
999 }
1000
1001 /* Next GDT entry. */
1002 pGDTE++;
1003 }
1004
1005 /*
1006 * Check if our hypervisor selectors were changed.
1007 */
1008 if ( aHyperSel[SELM_HYPER_SEL_CS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]
1009 || aHyperSel[SELM_HYPER_SEL_DS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]
1010 || aHyperSel[SELM_HYPER_SEL_CS64] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64]
1011 || aHyperSel[SELM_HYPER_SEL_TSS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]
1012 || aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08])
1013 {
1014 /* Reinitialize our hypervisor GDTs */
1015 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] = aHyperSel[SELM_HYPER_SEL_CS];
1016 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] = aHyperSel[SELM_HYPER_SEL_DS];
1017 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] = aHyperSel[SELM_HYPER_SEL_CS64];
1018 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = aHyperSel[SELM_HYPER_SEL_TSS];
1019 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1020
1021 STAM_COUNTER_INC(&pVM->selm.s.StatHyperSelsChanged);
1022
1023 /*
1024 * Do the relocation callbacks to let everyone update their hyper selector dependencies.
1025 * (SELMR3Relocate will call selmR3SetupHyperGDTSelectors() for us.)
1026 */
1027 VMR3Relocate(pVM, 0);
1028 }
1029 else if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
1030 /* We overwrote all entries above, so we have to save them again. */
1031 selmR3SetupHyperGDTSelectors(pVM);
1032
1033 /*
1034 * Adjust the cached GDT limit.
1035 * Any GDT entries which have been removed must be cleared.
1036 */
1037 if (pVM->selm.s.GuestGdtr.cbGdt != GDTR.cbGdt)
1038 {
1039 if (pVM->selm.s.GuestGdtr.cbGdt > GDTR.cbGdt)
1040 memset(pGDTE, 0, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt);
1041#ifndef SELM_TRACK_GUEST_GDT_CHANGES
1042 pVM->selm.s.GuestGdtr.cbGdt = GDTR.cbGdt;
1043#endif
1044 }
1045
1046#ifdef SELM_TRACK_GUEST_GDT_CHANGES
1047 /*
1048 * Check if Guest's GDTR is changed.
1049 */
1050 if ( GDTR.pGdt != pVM->selm.s.GuestGdtr.pGdt
1051 || GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
1052 {
1053 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%016RX64 cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt));
1054
1055 /*
1056 * [Re]Register write virtual handler for guest's GDT.
1057 */
1058 if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)
1059 {
1060 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
1061 AssertRC(rc);
1062 }
1063
1064 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,
1065 0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0, "Guest GDT write access handler");
1066 if (RT_FAILURE(rc))
1067 return rc;
1068
1069 /* Update saved Guest GDTR. */
1070 pVM->selm.s.GuestGdtr = GDTR;
1071 pVM->selm.s.fGDTRangeRegistered = true;
1072 }
1073#endif
1074 }
1075
1076 /*
1077 * TSS sync
1078 */
1079 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
1080 {
1081 SELMR3SyncTSS(pVM, pVCpu);
1082 }
1083
1084 /*
1085 * LDT sync
1086 */
1087 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_LDT))
1088 {
1089 /*
1090 * Always assume the best
1091 */
1092 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
1093
1094 /*
1095 * LDT handling is done similarly to the GDT handling with a shadow
1096 * array. However, since the LDT is expected to be swappable (at least
1097 * some ancient OSes makes it swappable) it must be floating and
1098 * synced on a per-page basis.
1099 *
1100 * Eventually we will change this to be fully on demand. Meaning that
1101 * we will only sync pages containing LDT selectors actually used and
1102 * let the #PF handler lazily sync pages as they are used.
1103 * (This applies to GDT too, when we start making OS/2 fast.)
1104 */
1105
1106 /*
1107 * First, determine the current LDT selector.
1108 */
1109 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
1110 if ((SelLdt & X86_SEL_MASK) == 0)
1111 {
1112 /* ldtr = 0 - update hyper LDTR and deregister any active handler. */
1113 CPUMSetHyperLDTR(pVCpu, 0);
1114#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1115 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
1116 {
1117 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1118 AssertRC(rc);
1119 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
1120 }
1121#endif
1122 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1123 return VINF_SUCCESS;
1124 }
1125
1126 /*
1127 * Get the LDT selector.
1128 */
1129 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelLdt >> X86_SEL_SHIFT];
1130 RTGCPTR GCPtrLdt = X86DESC_BASE(*pDesc);
1131 unsigned cbLdt = X86DESC_LIMIT(*pDesc);
1132 if (pDesc->Gen.u1Granularity)
1133 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1134
1135 /*
1136 * Validate it.
1137 */
1138 if ( !cbLdt
1139 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt
1140 || pDesc->Gen.u1DescType
1141 || pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1142 {
1143 AssertMsg(!cbLdt, ("Invalid LDT %04x!\n", SelLdt));
1144
1145 /* cbLdt > 0:
1146 * This is quite impossible, so we do as most people do when faced with
1147 * the impossible, we simply ignore it.
1148 */
1149 CPUMSetHyperLDTR(pVCpu, 0);
1150#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1151 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
1152 {
1153 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1154 AssertRC(rc);
1155 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
1156 }
1157#endif
1158 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1159 return VINF_SUCCESS;
1160 }
1161 /** @todo check what intel does about odd limits. */
1162 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(X86DESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1163
1164 /*
1165 * Use the cached guest ldt address if the descriptor has already been modified (see below)
1166 * (this is necessary due to redundant LDT updates; see todo above at GDT sync)
1167 */
1168 if (MMHyperIsInsideArea(pVM, GCPtrLdt))
1169 GCPtrLdt = pVM->selm.s.GCPtrGuestLdt; /* use the old one */
1170
1171
1172#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1173 /** @todo Handle only present LDT segments. */
1174 // if (pDesc->Gen.u1Present)
1175 {
1176 /*
1177 * Check if Guest's LDT address/limit is changed.
1178 */
1179 if ( GCPtrLdt != pVM->selm.s.GCPtrGuestLdt
1180 || cbLdt != pVM->selm.s.cbLdtLimit)
1181 {
1182 Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %RGv:%04x to %RGv:%04x. (GDTR=%016RX64:%04x)\n",
1183 pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1184
1185 /*
1186 * [Re]Register write virtual handler for guest's GDT.
1187 * In the event of LDT overlapping something, don't install it just assume it's being updated.
1188 */
1189 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
1190 {
1191 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1192 AssertRC(rc);
1193 }
1194#ifdef DEBUG
1195 if (pDesc->Gen.u1Present)
1196 Log(("LDT selector marked not present!!\n"));
1197#endif
1198 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */,
1199 0, selmR3GuestLDTWriteHandler, "selmRCGuestLDTWriteHandler", 0, "Guest LDT write access handler");
1200 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
1201 {
1202 /** @todo investigate the various cases where conflicts happen and try avoid them by enh. the instruction emulation. */
1203 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
1204 Log(("WARNING: Guest LDT (%RGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=%016RX64:%04x)\n",
1205 GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1206 }
1207 else if (RT_SUCCESS(rc))
1208 pVM->selm.s.GCPtrGuestLdt = GCPtrLdt;
1209 else
1210 {
1211 CPUMSetHyperLDTR(pVCpu, 0);
1212 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1213 return rc;
1214 }
1215
1216 pVM->selm.s.cbLdtLimit = cbLdt;
1217 }
1218 }
1219#else
1220 pVM->selm.s.cbLdtLimit = cbLdt;
1221#endif
1222
1223 /*
1224 * Calc Shadow LDT base.
1225 */
1226 unsigned off;
1227 pVM->selm.s.offLdtHyper = off = (GCPtrLdt & PAGE_OFFSET_MASK);
1228 RTGCPTR GCPtrShadowLDT = (RTGCPTR)((RTGCUINTPTR)pVM->selm.s.pvLdtRC + off);
1229 PX86DESC pShadowLDT = (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off);
1230
1231 /*
1232 * Enable the LDT selector in the shadow GDT.
1233 */
1234 pDesc->Gen.u1Present = 1;
1235 pDesc->Gen.u16BaseLow = RT_LOWORD(GCPtrShadowLDT);
1236 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(GCPtrShadowLDT);
1237 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(GCPtrShadowLDT);
1238 pDesc->Gen.u1Available = 0;
1239 pDesc->Gen.u1Long = 0;
1240 if (cbLdt > 0xffff)
1241 {
1242 cbLdt = 0xffff;
1243 pDesc->Gen.u4LimitHigh = 0;
1244 pDesc->Gen.u16LimitLow = pDesc->Gen.u1Granularity ? 0xf : 0xffff;
1245 }
1246
1247 /*
1248 * Set Hyper LDTR and notify TRPM.
1249 */
1250 CPUMSetHyperLDTR(pVCpu, SelLdt);
1251
1252 /*
1253 * Loop synchronising the LDT page by page.
1254 */
1255 /** @todo investigate how intel handle various operations on half present cross page entries. */
1256 off = GCPtrLdt & (sizeof(X86DESC) - 1);
1257 AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt));
1258
1259 /* Note: Do not skip the first selector; unlike the GDT, a zero LDT selector is perfectly valid. */
1260 unsigned cbLeft = cbLdt + 1;
1261 PX86DESC pLDTE = pShadowLDT;
1262 while (cbLeft)
1263 {
1264 /*
1265 * Read a chunk.
1266 */
1267 unsigned cbChunk = PAGE_SIZE - ((RTGCUINTPTR)GCPtrLdt & PAGE_OFFSET_MASK);
1268 if (cbChunk > cbLeft)
1269 cbChunk = cbLeft;
1270 rc = PGMPhysSimpleReadGCPtr(pVCpu, pShadowLDT, GCPtrLdt, cbChunk);
1271 if (RT_SUCCESS(rc))
1272 {
1273 /*
1274 * Mark page
1275 */
1276 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D);
1277 AssertRC(rc);
1278
1279 /*
1280 * Loop thru the available LDT entries.
1281 * Figure out where to start and end and the potential cross pageness of
1282 * things adds a little complexity. pLDTE is updated there and not in the
1283 * 'next' part of the loop. The pLDTEEnd is inclusive.
1284 */
1285 PX86DESC pLDTEEnd = (PX86DESC)((uintptr_t)pShadowLDT + cbChunk) - 1;
1286 if (pLDTE + 1 < pShadowLDT)
1287 pLDTE = (PX86DESC)((uintptr_t)pShadowLDT + off);
1288 while (pLDTE <= pLDTEEnd)
1289 {
1290 if (pLDTE->Gen.u1Present)
1291 {
1292 /*
1293 * Code and data selectors are generally 1:1, with the
1294 * 'little' adjustment we do for DPL 0 selectors.
1295 */
1296 if (pLDTE->Gen.u1DescType)
1297 {
1298 /*
1299 * Hack for A-bit against Trap E on read-only GDT.
1300 */
1301 /** @todo Fix this by loading ds and cs before turning off WP. */
1302 if (!(pLDTE->Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1303 pLDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1304
1305 /*
1306 * All DPL 0 code and data segments are squeezed into DPL 1.
1307 *
1308 * We're skipping conforming segments here because those
1309 * cannot give us any trouble.
1310 */
1311 if ( pLDTE->Gen.u2Dpl == 0
1312 && (pLDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1313 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
1314 pLDTE->Gen.u2Dpl = 1;
1315 }
1316 else
1317 {
1318 /*
1319 * System type selectors are marked not present.
1320 * Recompiler or special handling is required for these.
1321 */
1322 /** @todo what about interrupt gates and rawr0? */
1323 pLDTE->Gen.u1Present = 0;
1324 }
1325 }
1326
1327 /* Next LDT entry. */
1328 pLDTE++;
1329 }
1330 }
1331 else
1332 {
1333 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%Rrc\n", rc));
1334 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0);
1335 AssertRC(rc);
1336 }
1337
1338 /*
1339 * Advance to the next page.
1340 */
1341 cbLeft -= cbChunk;
1342 GCPtrShadowLDT += cbChunk;
1343 pShadowLDT = (PX86DESC)((char *)pShadowLDT + cbChunk);
1344 GCPtrLdt += cbChunk;
1345 }
1346 }
1347
1348 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1349 return VINF_SUCCESS;
1350}
1351
1352
1353/**
1354 * \#PF Handler callback for virtual access handler ranges.
1355 *
1356 * Important to realize that a physical page in a range can have aliases, and
1357 * for ALL and WRITE handlers these will also trigger.
1358 *
1359 * @returns VINF_SUCCESS if the handler have carried out the operation.
1360 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1361 * @param pVM VM Handle.
1362 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1363 * @param pvPtr The HC mapping of that address.
1364 * @param pvBuf What the guest is reading/writing.
1365 * @param cbBuf How much it's reading/writing.
1366 * @param enmAccessType The access type.
1367 * @param pvUser User argument.
1368 */
1369static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1370{
1371 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1372 Log(("selmR3GuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf));
1373
1374 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_GDT);
1375 return VINF_PGM_HANDLER_DO_DEFAULT;
1376}
1377
1378
1379/**
1380 * \#PF Handler callback for virtual access handler ranges.
1381 *
1382 * Important to realize that a physical page in a range can have aliases, and
1383 * for ALL and WRITE handlers these will also trigger.
1384 *
1385 * @returns VINF_SUCCESS if the handler have carried out the operation.
1386 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1387 * @param pVM VM Handle.
1388 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1389 * @param pvPtr The HC mapping of that address.
1390 * @param pvBuf What the guest is reading/writing.
1391 * @param cbBuf How much it's reading/writing.
1392 * @param enmAccessType The access type.
1393 * @param pvUser User argument.
1394 */
1395static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1396{
1397 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1398 Log(("selmR3GuestLDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf));
1399 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_LDT);
1400 return VINF_PGM_HANDLER_DO_DEFAULT;
1401}
1402
1403
1404/**
1405 * \#PF Handler callback for virtual access handler ranges.
1406 *
1407 * Important to realize that a physical page in a range can have aliases, and
1408 * for ALL and WRITE handlers these will also trigger.
1409 *
1410 * @returns VINF_SUCCESS if the handler have carried out the operation.
1411 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1412 * @param pVM VM Handle.
1413 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1414 * @param pvPtr The HC mapping of that address.
1415 * @param pvBuf What the guest is reading/writing.
1416 * @param cbBuf How much it's reading/writing.
1417 * @param enmAccessType The access type.
1418 * @param pvUser User argument.
1419 */
1420static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1421{
1422 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1423 Log(("selmR3GuestTSSWriteHandler: write %.*Rhxs to %RGv size %d\n", RT_MIN(8, cbBuf), pvBuf, GCPtr, cbBuf));
1424
1425 /** @todo This can be optimized by checking for the ESP0 offset and tracking TR
1426 * reloads in REM (setting VM_FF_SELM_SYNC_TSS if TR is reloaded). We
1427 * should probably also deregister the virtual handler if TR.base/size
1428 * changes while we're in REM. */
1429
1430 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_TSS);
1431
1432 return VINF_PGM_HANDLER_DO_DEFAULT;
1433}
1434
1435
1436/**
1437 * Synchronize the shadowed fields in the TSS.
1438 *
1439 * At present we're shadowing the ring-0 stack selector & pointer, and the
1440 * interrupt redirection bitmap (if present). We take the lazy approach wrt to
1441 * REM and this function is called both if REM made any changes to the TSS or
1442 * loaded TR.
1443 *
1444 * @returns VBox status code.
1445 * @param pVM The VM to operate on.
1446 * @param pVCpu The VMCPU to operate on.
1447 */
1448VMMR3DECL(int) SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu)
1449{
1450 int rc;
1451
1452 if (pVM->selm.s.fDisableMonitoring)
1453 {
1454 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1455 return VINF_SUCCESS;
1456 }
1457
1458 STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a);
1459 Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS));
1460
1461 /*
1462 * Get TR and extract and store the basic info.
1463 *
1464 * Note! The TSS limit is not checked by the LTR code, so we
1465 * have to be a bit careful with it. We make sure cbTss
1466 * won't be zero if TR is valid and if it's NULL we'll
1467 * make sure cbTss is 0.
1468 */
1469 CPUMSELREGHID trHid;
1470 RTSEL SelTss = CPUMGetGuestTR(pVCpu, &trHid);
1471 RTGCPTR GCPtrTss = trHid.u64Base;
1472 uint32_t cbTss = trHid.u32Limit;
1473 Assert( (SelTss & X86_SEL_MASK)
1474 || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
1475 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */));
1476 if (SelTss & X86_SEL_MASK)
1477 {
1478 Assert(!(SelTss & X86_SEL_LDT));
1479 Assert(trHid.Attr.n.u1DescType == 0);
1480 Assert( trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY
1481 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY);
1482 if (!++cbTss)
1483 cbTss = UINT32_MAX;
1484 }
1485 else
1486 {
1487 Assert( (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
1488 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */));
1489 cbTss = 0; /* the reset case. */
1490 }
1491 pVM->selm.s.cbGuestTss = cbTss;
1492 pVM->selm.s.fGuestTss32Bit = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1493 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1494
1495 /*
1496 * Figure out the size of what need to monitor.
1497 */
1498 /* We're not interested in any 16-bit TSSes. */
1499 uint32_t cbMonitoredTss = cbTss;
1500 if ( trHid.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL
1501 && trHid.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
1502 cbMonitoredTss = 0;
1503
1504 pVM->selm.s.offGuestIoBitmap = 0;
1505 bool fNoRing1Stack = true;
1506 if (cbMonitoredTss)
1507 {
1508 /*
1509 * 32-bit TSS. What we're really keen on is the SS0 and ESP0 fields.
1510 * If VME is enabled we also want to keep an eye on the interrupt
1511 * redirection bitmap.
1512 */
1513 VBOXTSS Tss;
1514 uint32_t cr4 = CPUMGetGuestCR4(pVCpu);
1515 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Tss, GCPtrTss, RT_OFFSETOF(VBOXTSS, IntRedirBitmap));
1516 if ( !(cr4 & X86_CR4_VME)
1517 || ( RT_SUCCESS(rc)
1518 && Tss.offIoBitmap < sizeof(VBOXTSS) /* too small */
1519 && Tss.offIoBitmap > cbTss) /* beyond the end */ /** @todo not sure how the partial case is handled; probably not allowed. */
1520 )
1521 /* No interrupt redirection bitmap, just ESP0 and SS0. */
1522 cbMonitoredTss = RT_UOFFSETOF(VBOXTSS, padding_ss0);
1523 else if (RT_SUCCESS(rc))
1524 {
1525 /*
1526 * Everything up to and including the interrupt redirection bitmap. Unfortunately
1527 * this can be quite a large chunk. We use to skip it earlier and just hope it
1528 * was kind of static...
1529 *
1530 * Update the virtual interrupt redirection bitmap while we're here.
1531 * (It is located in the 32 bytes before TR:offIoBitmap.)
1532 */
1533 cbMonitoredTss = Tss.offIoBitmap;
1534 pVM->selm.s.offGuestIoBitmap = Tss.offIoBitmap;
1535
1536 uint32_t offRedirBitmap = Tss.offIoBitmap - sizeof(Tss.IntRedirBitmap);
1537 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pVM->selm.s.Tss.IntRedirBitmap,
1538 GCPtrTss + offRedirBitmap, sizeof(Tss.IntRedirBitmap));
1539 AssertRC(rc);
1540 /** @todo memset the bitmap on failure? */
1541 Log2(("Redirection bitmap:\n"));
1542 Log2(("%.*Rhxd\n", sizeof(Tss.IntRedirBitmap), &pVM->selm.s.Tss.IntRedirBitmap));
1543 }
1544 else
1545 {
1546 cbMonitoredTss = RT_OFFSETOF(VBOXTSS, IntRedirBitmap);
1547 pVM->selm.s.offGuestIoBitmap = 0;
1548 /** @todo memset the bitmap? */
1549 }
1550
1551 /*
1552 * Update the ring 0 stack selector and base address.
1553 */
1554 if (RT_SUCCESS(rc))
1555 {
1556#ifdef LOG_ENABLED
1557 if (LogIsEnabled())
1558 {
1559 uint32_t ssr0, espr0;
1560 SELMGetRing1Stack(pVM, &ssr0, &espr0);
1561 if ((ssr0 & ~1) != Tss.ss0 || espr0 != Tss.esp0)
1562 {
1563 RTGCPHYS GCPhys = NIL_RTGCPHYS;
1564 rc = PGMGstGetPage(pVCpu, GCPtrTss, NULL, &GCPhys); AssertRC(rc);
1565 Log(("SELMR3SyncTSS: Updating TSS ring 0 stack to %04X:%08X from %04X:%08X; TSS Phys=%RGp)\n",
1566 Tss.ss0, Tss.esp0, (ssr0 & ~1), espr0, GCPhys));
1567 AssertMsg(ssr0 != Tss.ss0,
1568 ("ring-1 leak into TSS.SS0! %04X:%08X from %04X:%08X; TSS Phys=%RGp)\n",
1569 Tss.ss0, Tss.esp0, (ssr0 & ~1), espr0, GCPhys));
1570 }
1571 Log(("offIoBitmap=%#x\n", Tss.offIoBitmap));
1572 }
1573#endif /* LOG_ENABLED */
1574 AssertMsg(!(Tss.ss0 & 3), ("ring-1 leak into TSS.SS0? %04X:%08X\n", Tss.ss0, Tss.esp0));
1575
1576 /* Update our TSS structure for the guest's ring 1 stack */
1577 selmSetRing1Stack(pVM, Tss.ss0 | 1, Tss.esp0);
1578 pVM->selm.s.fSyncTSSRing0Stack = fNoRing1Stack = false;
1579 }
1580 }
1581
1582 /*
1583 * Flush the ring-1 stack and the direct syscall dispatching if we
1584 * cannot obtain SS0:ESP0.
1585 */
1586 if (fNoRing1Stack)
1587 {
1588 selmSetRing1Stack(pVM, 0 /* invalid SS */, 0);
1589 pVM->selm.s.fSyncTSSRing0Stack = cbMonitoredTss != 0;
1590
1591 /** @todo handle these dependencies better! */
1592 TRPMR3SetGuestTrapHandler(pVM, 0x2E, TRPM_INVALID_HANDLER);
1593 TRPMR3SetGuestTrapHandler(pVM, 0x80, TRPM_INVALID_HANDLER);
1594 }
1595
1596 /*
1597 * Check for monitor changes and apply them.
1598 */
1599 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss
1600 || cbMonitoredTss != pVM->selm.s.cbMonitoredGuestTss)
1601 {
1602 Log(("SELMR3SyncTSS: Guest's TSS is changed to pTss=%RGv cbMonitoredTss=%08X cbGuestTss=%#08x\n",
1603 GCPtrTss, cbMonitoredTss, pVM->selm.s.cbGuestTss));
1604
1605 /* Release the old range first. */
1606 if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX)
1607 {
1608 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
1609 AssertRC(rc);
1610 }
1611
1612 /* Register the write handler if TS != 0. */
1613 if (cbMonitoredTss != 0)
1614 {
1615 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbMonitoredTss - 1,
1616 0, selmR3GuestTSSWriteHandler,
1617 "selmRCGuestTSSWriteHandler", 0, "Guest TSS write access handler");
1618 if (RT_FAILURE(rc))
1619 {
1620 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1621 return rc;
1622 }
1623
1624 /* Update saved Guest TSS info. */
1625 pVM->selm.s.GCPtrGuestTss = GCPtrTss;
1626 pVM->selm.s.cbMonitoredGuestTss = cbMonitoredTss;
1627 pVM->selm.s.GCSelTss = SelTss;
1628 }
1629 else
1630 {
1631 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
1632 pVM->selm.s.cbMonitoredGuestTss = 0;
1633 pVM->selm.s.GCSelTss = 0;
1634 }
1635 }
1636
1637 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1638
1639 STAM_PROFILE_STOP(&pVM->selm.s.StatTSSSync, a);
1640 return VINF_SUCCESS;
1641}
1642
1643
1644/**
1645 * Compares the Guest GDT and LDT with the shadow tables.
1646 * This is a VBOX_STRICT only function.
1647 *
1648 * @returns VBox status code.
1649 * @param pVM The VM Handle.
1650 */
1651VMMR3DECL(int) SELMR3DebugCheck(PVM pVM)
1652{
1653#ifdef VBOX_STRICT
1654 PVMCPU pVCpu = VMMGetCpu(pVM);
1655
1656 /*
1657 * Get GDTR and check for conflict.
1658 */
1659 VBOXGDTR GDTR;
1660 CPUMGetGuestGDTR(pVCpu, &GDTR);
1661 if (GDTR.cbGdt == 0)
1662 return VINF_SUCCESS;
1663
1664 if (GDTR.cbGdt >= (unsigned)(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> X86_SEL_SHIFT))
1665 Log(("SELMR3DebugCheck: guest GDT size forced us to look for unused selectors.\n"));
1666
1667 if (GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
1668 Log(("SELMR3DebugCheck: limits have changed! new=%d old=%d\n", GDTR.cbGdt, pVM->selm.s.GuestGdtr.cbGdt));
1669
1670 /*
1671 * Loop thru the GDT checking each entry.
1672 */
1673 RTGCPTR GCPtrGDTEGuest = GDTR.pGdt;
1674 PX86DESC pGDTE = pVM->selm.s.paGdtR3;
1675 PX86DESC pGDTEEnd = (PX86DESC)((uintptr_t)pGDTE + GDTR.cbGdt);
1676 while (pGDTE < pGDTEEnd)
1677 {
1678 X86DESC GDTEGuest;
1679 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &GDTEGuest, GCPtrGDTEGuest, sizeof(GDTEGuest));
1680 if (RT_SUCCESS(rc))
1681 {
1682 if (pGDTE->Gen.u1DescType || pGDTE->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1683 {
1684 if ( pGDTE->Gen.u16LimitLow != GDTEGuest.Gen.u16LimitLow
1685 || pGDTE->Gen.u4LimitHigh != GDTEGuest.Gen.u4LimitHigh
1686 || pGDTE->Gen.u16BaseLow != GDTEGuest.Gen.u16BaseLow
1687 || pGDTE->Gen.u8BaseHigh1 != GDTEGuest.Gen.u8BaseHigh1
1688 || pGDTE->Gen.u8BaseHigh2 != GDTEGuest.Gen.u8BaseHigh2
1689 || pGDTE->Gen.u1DefBig != GDTEGuest.Gen.u1DefBig
1690 || pGDTE->Gen.u1DescType != GDTEGuest.Gen.u1DescType)
1691 {
1692 unsigned iGDT = pGDTE - pVM->selm.s.paGdtR3;
1693 SELMR3DumpDescriptor(*pGDTE, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, shadow");
1694 SELMR3DumpDescriptor(GDTEGuest, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, guest");
1695 }
1696 }
1697 }
1698
1699 /* Advance to the next descriptor. */
1700 GCPtrGDTEGuest += sizeof(X86DESC);
1701 pGDTE++;
1702 }
1703
1704
1705 /*
1706 * LDT?
1707 */
1708 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
1709 if ((SelLdt & X86_SEL_MASK) == 0)
1710 return VINF_SUCCESS;
1711 if (SelLdt > GDTR.cbGdt)
1712 {
1713 Log(("SELMR3DebugCheck: ldt is out of bound SelLdt=%#x\n", SelLdt));
1714 return VERR_INTERNAL_ERROR;
1715 }
1716 X86DESC LDTDesc;
1717 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &LDTDesc, GDTR.pGdt + (SelLdt & X86_SEL_MASK), sizeof(LDTDesc));
1718 if (RT_FAILURE(rc))
1719 {
1720 Log(("SELMR3DebugCheck: Failed to read LDT descriptor. rc=%d\n", rc));
1721 return rc;
1722 }
1723 RTGCPTR GCPtrLDTEGuest = X86DESC_BASE(LDTDesc);
1724 unsigned cbLdt = X86DESC_LIMIT(LDTDesc);
1725 if (LDTDesc.Gen.u1Granularity)
1726 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1727
1728 /*
1729 * Validate it.
1730 */
1731 if (!cbLdt)
1732 return VINF_SUCCESS;
1733 /** @todo check what intel does about odd limits. */
1734 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(X86DESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1735 if ( LDTDesc.Gen.u1DescType
1736 || LDTDesc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT
1737 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt)
1738 {
1739 Log(("SELmR3DebugCheck: Invalid LDT %04x!\n", SelLdt));
1740 return VERR_INTERNAL_ERROR;
1741 }
1742
1743 /*
1744 * Loop thru the LDT checking each entry.
1745 */
1746 unsigned off = (GCPtrLDTEGuest & PAGE_OFFSET_MASK);
1747 PX86DESC pLDTE = (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off);
1748 PX86DESC pLDTEEnd = (PX86DESC)((uintptr_t)pGDTE + cbLdt);
1749 while (pLDTE < pLDTEEnd)
1750 {
1751 X86DESC LDTEGuest;
1752 rc = PGMPhysSimpleReadGCPtr(pVCpu, &LDTEGuest, GCPtrLDTEGuest, sizeof(LDTEGuest));
1753 if (RT_SUCCESS(rc))
1754 {
1755 if ( pLDTE->Gen.u16LimitLow != LDTEGuest.Gen.u16LimitLow
1756 || pLDTE->Gen.u4LimitHigh != LDTEGuest.Gen.u4LimitHigh
1757 || pLDTE->Gen.u16BaseLow != LDTEGuest.Gen.u16BaseLow
1758 || pLDTE->Gen.u8BaseHigh1 != LDTEGuest.Gen.u8BaseHigh1
1759 || pLDTE->Gen.u8BaseHigh2 != LDTEGuest.Gen.u8BaseHigh2
1760 || pLDTE->Gen.u1DefBig != LDTEGuest.Gen.u1DefBig
1761 || pLDTE->Gen.u1DescType != LDTEGuest.Gen.u1DescType)
1762 {
1763 unsigned iLDT = pLDTE - (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off);
1764 SELMR3DumpDescriptor(*pLDTE, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, shadow");
1765 SELMR3DumpDescriptor(LDTEGuest, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, guest");
1766 }
1767 }
1768
1769 /* Advance to the next descriptor. */
1770 GCPtrLDTEGuest += sizeof(X86DESC);
1771 pLDTE++;
1772 }
1773
1774#else /* !VBOX_STRICT */
1775 NOREF(pVM);
1776#endif /* !VBOX_STRICT */
1777
1778 return VINF_SUCCESS;
1779}
1780
1781
1782/**
1783 * Validates the RawR0 TSS values against the one in the Guest TSS.
1784 *
1785 * @returns true if it matches.
1786 * @returns false and assertions on mismatch..
1787 * @param pVM VM Handle.
1788 */
1789VMMR3DECL(bool) SELMR3CheckTSS(PVM pVM)
1790{
1791#ifdef VBOX_STRICT
1792 PVMCPU pVCpu = VMMGetCpu(pVM);
1793
1794 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
1795 return true;
1796
1797 /*
1798 * Get TR and extract the basic info.
1799 */
1800 CPUMSELREGHID trHid;
1801 RTSEL SelTss = CPUMGetGuestTR(pVCpu, &trHid);
1802 RTGCPTR GCPtrTss = trHid.u64Base;
1803 uint32_t cbTss = trHid.u32Limit;
1804 Assert( (SelTss & X86_SEL_MASK)
1805 || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
1806 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */));
1807 if (SelTss & X86_SEL_MASK)
1808 {
1809 AssertReturn(!(SelTss & X86_SEL_LDT), false);
1810 AssertReturn(trHid.Attr.n.u1DescType == 0, false);
1811 AssertReturn( trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY
1812 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY,
1813 false);
1814 if (!++cbTss)
1815 cbTss = UINT32_MAX;
1816 }
1817 else
1818 {
1819 AssertReturn( (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
1820 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */),
1821 false);
1822 cbTss = 0; /* the reset case. */
1823 }
1824 AssertMsgReturn(pVM->selm.s.cbGuestTss == cbTss, ("%#x %#x\n", pVM->selm.s.cbGuestTss, cbTss), false);
1825 AssertMsgReturn(pVM->selm.s.fGuestTss32Bit == ( trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1826 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY),
1827 ("%RTbool u4Type=%d\n", pVM->selm.s.fGuestTss32Bit, trHid.Attr.n.u4Type),
1828 false);
1829 AssertMsgReturn( pVM->selm.s.GCSelTss == SelTss
1830 || (!pVM->selm.s.GCSelTss && !(SelTss & X86_SEL_LDT)),
1831 ("%#x %#x\n", pVM->selm.s.GCSelTss, SelTss),
1832 false);
1833 AssertMsgReturn( pVM->selm.s.GCPtrGuestTss == GCPtrTss
1834 || (pVM->selm.s.GCPtrGuestTss == RTRCPTR_MAX && !GCPtrTss),
1835 ("%#RGv %#RGv\n", pVM->selm.s.GCPtrGuestTss, GCPtrTss),
1836 false);
1837
1838
1839 /*
1840 * Figure out the size of what need to monitor.
1841 */
1842 bool fNoRing1Stack = true;
1843 /* We're not interested in any 16-bit TSSes. */
1844 uint32_t cbMonitoredTss = cbTss;
1845 if ( trHid.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL
1846 && trHid.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
1847 cbMonitoredTss = 0;
1848 if (cbMonitoredTss)
1849 {
1850 VBOXTSS Tss;
1851 uint32_t cr4 = CPUMGetGuestCR4(pVCpu);
1852 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Tss, GCPtrTss, RT_OFFSETOF(VBOXTSS, IntRedirBitmap));
1853 AssertReturn( rc == VINF_SUCCESS
1854 /* Happens early in XP boot during page table switching. */
1855 || ( (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
1856 && !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF)),
1857 false);
1858 if ( !(cr4 & X86_CR4_VME)
1859 || ( RT_SUCCESS(rc)
1860 && Tss.offIoBitmap < sizeof(VBOXTSS) /* too small */
1861 && Tss.offIoBitmap > cbTss)
1862 )
1863 cbMonitoredTss = RT_UOFFSETOF(VBOXTSS, padding_ss0);
1864 else if (RT_SUCCESS(rc))
1865 {
1866 cbMonitoredTss = Tss.offIoBitmap;
1867 AssertMsgReturn(pVM->selm.s.offGuestIoBitmap == Tss.offIoBitmap,
1868 ("#x %#x\n", pVM->selm.s.offGuestIoBitmap, Tss.offIoBitmap),
1869 false);
1870
1871 /* check the bitmap */
1872 uint32_t offRedirBitmap = Tss.offIoBitmap - sizeof(Tss.IntRedirBitmap);
1873 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Tss.IntRedirBitmap,
1874 GCPtrTss + offRedirBitmap, sizeof(Tss.IntRedirBitmap));
1875 AssertRCReturn(rc, false);
1876 AssertMsgReturn(!memcmp(&Tss.IntRedirBitmap[0], &pVM->selm.s.Tss.IntRedirBitmap[0], sizeof(Tss.IntRedirBitmap)),
1877 ("offIoBitmap=%#x cbTss=%#x\n"
1878 " Guest: %.32Rhxs\n"
1879 "Shadow: %.32Rhxs\n",
1880 Tss.offIoBitmap, cbTss,
1881 &Tss.IntRedirBitmap[0],
1882 &pVM->selm.s.Tss.IntRedirBitmap[0]),
1883 false);
1884 }
1885 else
1886 cbMonitoredTss = RT_OFFSETOF(VBOXTSS, IntRedirBitmap);
1887
1888 /*
1889 * Check SS0 and ESP0.
1890 */
1891 if ( !pVM->selm.s.fSyncTSSRing0Stack
1892 && RT_SUCCESS(rc))
1893 {
1894 if ( Tss.esp0 != pVM->selm.s.Tss.esp1
1895 || Tss.ss0 != (pVM->selm.s.Tss.ss1 & ~1))
1896 {
1897 RTGCPHYS GCPhys;
1898 rc = PGMGstGetPage(pVCpu, GCPtrTss, NULL, &GCPhys); AssertRC(rc);
1899 AssertMsgFailed(("TSS out of sync!! (%04X:%08X vs %04X:%08X (guest)) Tss=%RGv Phys=%RGp\n",
1900 (pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1,
1901 Tss.ss1, Tss.esp1, GCPtrTss, GCPhys));
1902 return false;
1903 }
1904 }
1905 AssertMsgReturn(pVM->selm.s.cbMonitoredGuestTss == cbMonitoredTss, ("%#x %#x\n", pVM->selm.s.cbMonitoredGuestTss, cbMonitoredTss), false);
1906 }
1907 else
1908 {
1909 AssertMsgReturn(pVM->selm.s.Tss.ss1 == 0 && pVM->selm.s.Tss.esp1 == 0, ("%04x:%08x\n", pVM->selm.s.Tss.ss1, pVM->selm.s.Tss.esp1), false);
1910 AssertReturn(!pVM->selm.s.fSyncTSSRing0Stack, false);
1911 AssertMsgReturn(pVM->selm.s.cbMonitoredGuestTss == cbMonitoredTss, ("%#x %#x\n", pVM->selm.s.cbMonitoredGuestTss, cbMonitoredTss), false);
1912 }
1913
1914
1915
1916 return true;
1917
1918#else /* !VBOX_STRICT */
1919 NOREF(pVM);
1920 return true;
1921#endif /* !VBOX_STRICT */
1922}
1923
1924
1925/**
1926 * Returns flat address and limit of LDT by LDT selector from guest GDTR.
1927 *
1928 * Fully validate selector.
1929 *
1930 * @returns VBox status.
1931 * @param pVM VM Handle.
1932 * @param SelLdt LDT selector.
1933 * @param ppvLdt Where to store the flat address of LDT.
1934 * @param pcbLimit Where to store LDT limit.
1935 */
1936VMMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit)
1937{
1938 PVMCPU pVCpu = VMMGetCpu(pVM);
1939
1940 /* Get guest GDTR. */
1941 VBOXGDTR GDTR;
1942 CPUMGetGuestGDTR(pVCpu, &GDTR);
1943
1944 /* Check selector TI and GDT limit. */
1945 if ( (SelLdt & X86_SEL_LDT)
1946 || SelLdt > GDTR.cbGdt)
1947 return VERR_INVALID_SELECTOR;
1948
1949 /* Read descriptor from GC. */
1950 X86DESC Desc;
1951 int rc = PGMPhysSimpleReadGCPtr(pVCpu, (void *)&Desc, (RTGCPTR)(GDTR.pGdt + (SelLdt & X86_SEL_MASK)), sizeof(Desc));
1952 if (RT_FAILURE(rc))
1953 {
1954 /* fatal */
1955 Log(("Can't read LDT descriptor for selector=%04X\n", SelLdt));
1956 return VERR_SELECTOR_NOT_PRESENT;
1957 }
1958
1959 /* Check if LDT descriptor is not present. */
1960 if (Desc.Gen.u1Present == 0)
1961 return VERR_SELECTOR_NOT_PRESENT;
1962
1963 /* Check LDT descriptor type. */
1964 if ( Desc.Gen.u1DescType == 1
1965 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1966 return VERR_INVALID_SELECTOR;
1967
1968 /* LDT descriptor is ok. */
1969 if (ppvLdt)
1970 {
1971 *ppvLdt = (RTGCPTR)X86DESC_BASE(Desc);
1972 *pcbLimit = X86DESC_LIMIT(Desc);
1973 }
1974 return VINF_SUCCESS;
1975}
1976
1977
1978/**
1979 * Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper.
1980 *
1981 * See SELMR3GetSelectorInfo for details.
1982 *
1983 * @returns VBox status code, see SELMR3GetSelectorInfo for details.
1984 *
1985 * @param pVM VM handle.
1986 * @param pVCpu VMCPU handle.
1987 * @param Sel The selector to get info about.
1988 * @param pSelInfo Where to store the information.
1989 */
1990static int selmR3GetSelectorInfo64(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo)
1991{
1992 /*
1993 * Read it from the guest descriptor table.
1994 */
1995 X86DESC64 Desc;
1996 VBOXGDTR Gdtr;
1997 RTGCPTR GCPtrDesc;
1998 CPUMGetGuestGDTR(pVCpu, &Gdtr);
1999 if (!(Sel & X86_SEL_LDT))
2000 {
2001 /* GDT */
2002 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
2003 return VERR_INVALID_SELECTOR;
2004 GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
2005 }
2006 else
2007 {
2008 /*
2009 * LDT - must locate the LDT first.
2010 */
2011 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
2012 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */ /** @todo r=bird: No, I don't think so */
2013 || (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
2014 return VERR_INVALID_SELECTOR;
2015 GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);
2016 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
2017 if (RT_FAILURE(rc))
2018 return rc;
2019
2020 /* validate the LDT descriptor. */
2021 if (Desc.Gen.u1Present == 0)
2022 return VERR_SELECTOR_NOT_PRESENT;
2023 if ( Desc.Gen.u1DescType == 1
2024 || Desc.Gen.u4Type != AMD64_SEL_TYPE_SYS_LDT)
2025 return VERR_INVALID_SELECTOR;
2026
2027 uint32_t cbLimit = X86DESC_LIMIT(Desc);
2028 if (Desc.Gen.u1Granularity)
2029 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2030 if ((uint32_t)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)
2031 return VERR_INVALID_SELECTOR;
2032
2033 /* calc the descriptor location. */
2034 GCPtrDesc = X86DESC64_BASE(Desc);
2035 GCPtrDesc += (Sel & X86_SEL_MASK);
2036 }
2037
2038 /* read the descriptor. */
2039 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
2040 if (RT_FAILURE(rc))
2041 {
2042 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(X86DESC));
2043 if (RT_FAILURE(rc))
2044 return rc;
2045 Desc.au64[1] = 0;
2046 }
2047
2048 /*
2049 * Extract the base and limit
2050 * (We ignore the present bit here, which is probably a bit silly...)
2051 */
2052 pSelInfo->Sel = Sel;
2053 pSelInfo->fFlags = DBGFSELINFO_FLAGS_LONG_MODE;
2054 pSelInfo->u.Raw64 = Desc;
2055 if (Desc.Gen.u1DescType)
2056 {
2057 /*
2058 * 64-bit code selectors are wide open, it's not possible to detect
2059 * 64-bit data or stack selectors without also dragging in assumptions
2060 * about current CS (i.e. that's we're executing in 64-bit mode). So,
2061 * the selinfo user needs to deal with this in the context the info is
2062 * used unfortunately.
2063 */
2064 if ( Desc.Gen.u1Long
2065 && !Desc.Gen.u1DefBig
2066 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
2067 {
2068 /* Note! We ignore the segment limit hacks that was added by AMD. */
2069 pSelInfo->GCPtrBase = 0;
2070 pSelInfo->cbLimit = ~(RTGCUINTPTR)0;
2071 }
2072 else
2073 {
2074 pSelInfo->cbLimit = X86DESC_LIMIT(Desc);
2075 if (Desc.Gen.u1Granularity)
2076 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2077 pSelInfo->GCPtrBase = X86DESC_BASE(Desc);
2078 }
2079 pSelInfo->SelGate = 0;
2080 }
2081 else if ( Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_LDT
2082 || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_TSS_AVAIL
2083 || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY)
2084 {
2085 /* Note. LDT descriptors are weird in long mode, we ignore the footnote
2086 in the AMD manual here as a simplification. */
2087 pSelInfo->GCPtrBase = X86DESC64_BASE(Desc);
2088 pSelInfo->cbLimit = X86DESC_LIMIT(Desc);
2089 if (Desc.Gen.u1Granularity)
2090 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2091 pSelInfo->SelGate = 0;
2092 }
2093 else if ( Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE
2094 || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_TRAP_GATE
2095 || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_INT_GATE)
2096 {
2097 pSelInfo->cbLimit = X86DESC64_BASE(Desc);
2098 pSelInfo->GCPtrBase = Desc.Gate.u16OffsetLow
2099 | ((uint32_t)Desc.Gate.u16OffsetHigh << 16)
2100 | ((uint64_t)Desc.Gate.u32OffsetTop << 32);
2101 pSelInfo->SelGate = Desc.Gate.u16Sel;
2102 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_GATE;
2103 }
2104 else
2105 {
2106 pSelInfo->cbLimit = 0;
2107 pSelInfo->GCPtrBase = 0;
2108 pSelInfo->SelGate = 0;
2109 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_INVALID;
2110 }
2111 if (!Desc.Gen.u1Present)
2112 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_NOT_PRESENT;
2113
2114 return VINF_SUCCESS;
2115}
2116
2117
2118/**
2119 * Worker for selmR3GetSelectorInfo32 and SELMR3GetShadowSelectorInfo that
2120 * interprets a legacy descriptor table entry and fills in the selector info
2121 * structure from it.
2122 *
2123 * @param pSelInfo Where to store the selector info. Only the fFlags and
2124 * Sel members have been initialized.
2125 * @param pDesc The legacy descriptor to parse.
2126 */
2127DECLINLINE(void) selmR3SelInfoFromDesc32(PDBGFSELINFO pSelInfo, PCX86DESC pDesc)
2128{
2129 pSelInfo->u.Raw64.au64[1] = 0;
2130 pSelInfo->u.Raw = *pDesc;
2131 if ( pDesc->Gen.u1DescType
2132 || !(pDesc->Gen.u4Type & 4))
2133 {
2134 pSelInfo->cbLimit = X86DESC_LIMIT(*pDesc);
2135 if (pDesc->Gen.u1Granularity)
2136 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2137 pSelInfo->GCPtrBase = X86DESC_BASE(*pDesc);
2138 pSelInfo->SelGate = 0;
2139 }
2140 else if (pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_UNDEFINED4)
2141 {
2142 pSelInfo->cbLimit = 0;
2143 if (pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_TASK_GATE)
2144 pSelInfo->GCPtrBase = 0;
2145 else
2146 pSelInfo->GCPtrBase = pDesc->Gate.u16OffsetLow
2147 | (uint32_t)pDesc->Gate.u16OffsetHigh << 16;
2148 pSelInfo->SelGate = pDesc->Gate.u16Sel;
2149 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_GATE;
2150 }
2151 else
2152 {
2153 pSelInfo->cbLimit = 0;
2154 pSelInfo->GCPtrBase = 0;
2155 pSelInfo->SelGate = 0;
2156 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_INVALID;
2157 }
2158 if (!pDesc->Gen.u1Present)
2159 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_NOT_PRESENT;
2160}
2161
2162
2163/**
2164 * Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper.
2165 *
2166 * See SELMR3GetSelectorInfo for details.
2167 *
2168 * @returns VBox status code, see SELMR3GetSelectorInfo for details.
2169 *
2170 * @param pVM VM handle.
2171 * @param pVCpu VMCPU handle.
2172 * @param Sel The selector to get info about.
2173 * @param pSelInfo Where to store the information.
2174 */
2175static int selmR3GetSelectorInfo32(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo)
2176{
2177 /*
2178 * Read the descriptor entry
2179 */
2180 pSelInfo->fFlags = 0;
2181 X86DESC Desc;
2182 if ( !(Sel & X86_SEL_LDT)
2183 && ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)
2184 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)
2185 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)
2186 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)
2187 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK))
2188 )
2189 {
2190 /*
2191 * Hypervisor descriptor.
2192 */
2193 pSelInfo->fFlags = DBGFSELINFO_FLAGS_HYPER;
2194 if (CPUMIsGuestInProtectedMode(pVCpu))
2195 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_PROT_MODE;
2196 else
2197 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_REAL_MODE;
2198
2199 Desc = pVM->selm.s.paGdtR3[Sel >> X86_SEL_SHIFT];
2200 }
2201 else if (CPUMIsGuestInProtectedMode(pVCpu))
2202 {
2203 /*
2204 * Read it from the guest descriptor table.
2205 */
2206 pSelInfo->fFlags = DBGFSELINFO_FLAGS_PROT_MODE;
2207
2208 VBOXGDTR Gdtr;
2209 RTGCPTR GCPtrDesc;
2210 CPUMGetGuestGDTR(pVCpu, &Gdtr);
2211 if (!(Sel & X86_SEL_LDT))
2212 {
2213 /* GDT */
2214 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
2215 return VERR_INVALID_SELECTOR;
2216 GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
2217 }
2218 else
2219 {
2220 /*
2221 * LDT - must locate the LDT first...
2222 */
2223 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
2224 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */ /** @todo r=bird: No, I don't think so */
2225 || (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
2226 return VERR_INVALID_SELECTOR;
2227 GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);
2228 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
2229 if (RT_FAILURE(rc))
2230 return rc;
2231
2232 /* validate the LDT descriptor. */
2233 if (Desc.Gen.u1Present == 0)
2234 return VERR_SELECTOR_NOT_PRESENT;
2235 if ( Desc.Gen.u1DescType == 1
2236 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2237 return VERR_INVALID_SELECTOR;
2238
2239 unsigned cbLimit = X86DESC_LIMIT(Desc);
2240 if (Desc.Gen.u1Granularity)
2241 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2242 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)
2243 return VERR_INVALID_SELECTOR;
2244
2245 /* calc the descriptor location. */
2246 GCPtrDesc = X86DESC_BASE(Desc);
2247 GCPtrDesc += (Sel & X86_SEL_MASK);
2248 }
2249
2250 /* read the descriptor. */
2251 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
2252 if (RT_FAILURE(rc))
2253 return rc;
2254 }
2255 else
2256 {
2257 /*
2258 * We're in real mode.
2259 */
2260 pSelInfo->Sel = Sel;
2261 pSelInfo->GCPtrBase = Sel << 4;
2262 pSelInfo->cbLimit = 0xffff;
2263 pSelInfo->fFlags = DBGFSELINFO_FLAGS_REAL_MODE;
2264 pSelInfo->u.Raw64.au64[0] = 0;
2265 pSelInfo->u.Raw64.au64[1] = 0;
2266 pSelInfo->SelGate = 0;
2267 return VINF_SUCCESS;
2268 }
2269
2270 /*
2271 * Extract the base and limit or sel:offset for gates.
2272 */
2273 pSelInfo->Sel = Sel;
2274 selmR3SelInfoFromDesc32(pSelInfo, &Desc);
2275
2276 return VINF_SUCCESS;
2277}
2278
2279
2280/**
2281 * Gets information about a selector.
2282 *
2283 * Intended for the debugger mostly and will prefer the guest descriptor tables
2284 * over the shadow ones.
2285 *
2286 * @retval VINF_SUCCESS on success.
2287 * @retval VERR_INVALID_SELECTOR if the selector isn't fully inside the
2288 * descriptor table.
2289 * @retval VERR_SELECTOR_NOT_PRESENT if the LDT is invalid or not present. This
2290 * is not returned if the selector itself isn't present, you have to
2291 * check that for yourself (see DBGFSELINFO::fFlags).
2292 * @retval VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the
2293 * pagetable or page backing the selector table wasn't present.
2294 * @returns Other VBox status code on other errors.
2295 *
2296 * @param pVM VM handle.
2297 * @param pVCpu The virtual CPU handle.
2298 * @param Sel The selector to get info about.
2299 * @param pSelInfo Where to store the information.
2300 */
2301VMMR3DECL(int) SELMR3GetSelectorInfo(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo)
2302{
2303 AssertPtr(pSelInfo);
2304 if (CPUMIsGuestInLongMode(pVCpu))
2305 return selmR3GetSelectorInfo64(pVM, pVCpu, Sel, pSelInfo);
2306 return selmR3GetSelectorInfo32(pVM, pVCpu, Sel, pSelInfo);
2307}
2308
2309
2310/**
2311 * Gets information about a selector from the shadow tables.
2312 *
2313 * This is intended to be faster than the SELMR3GetSelectorInfo() method, but
2314 * requires that the caller ensures that the shadow tables are up to date.
2315 *
2316 * @retval VINF_SUCCESS on success.
2317 * @retval VERR_INVALID_SELECTOR if the selector isn't fully inside the
2318 * descriptor table.
2319 * @retval VERR_SELECTOR_NOT_PRESENT if the LDT is invalid or not present. This
2320 * is not returned if the selector itself isn't present, you have to
2321 * check that for yourself (see DBGFSELINFO::fFlags).
2322 * @retval VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the
2323 * pagetable or page backing the selector table wasn't present.
2324 * @returns Other VBox status code on other errors.
2325 *
2326 * @param pVM VM handle.
2327 * @param Sel The selector to get info about.
2328 * @param pSelInfo Where to store the information.
2329 *
2330 * @remarks Don't use this when in hardware assisted virtualization mode.
2331 */
2332VMMR3DECL(int) SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PDBGFSELINFO pSelInfo)
2333{
2334 Assert(pSelInfo);
2335
2336 /*
2337 * Read the descriptor entry
2338 */
2339 X86DESC Desc;
2340 if (!(Sel & X86_SEL_LDT))
2341 {
2342 /*
2343 * Global descriptor.
2344 */
2345 Desc = pVM->selm.s.paGdtR3[Sel >> X86_SEL_SHIFT];
2346 pSelInfo->fFlags = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)
2347 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)
2348 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)
2349 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)
2350 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK)
2351 ? DBGFSELINFO_FLAGS_HYPER
2352 : 0;
2353 /** @todo check that the GDT offset is valid. */
2354 }
2355 else
2356 {
2357 /*
2358 * Local Descriptor.
2359 */
2360 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.pvLdtR3 + pVM->selm.s.offLdtHyper);
2361 Desc = paLDT[Sel >> X86_SEL_SHIFT];
2362 /** @todo check if the LDT page is actually available. */
2363 /** @todo check that the LDT offset is valid. */
2364 pSelInfo->fFlags = 0;
2365 }
2366 if (CPUMIsGuestInProtectedMode(VMMGetCpu0(pVM)))
2367 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_PROT_MODE;
2368 else
2369 pSelInfo->fFlags |= DBGFSELINFO_FLAGS_REAL_MODE;
2370
2371 /*
2372 * Extract the base and limit or sel:offset for gates.
2373 */
2374 pSelInfo->Sel = Sel;
2375 selmR3SelInfoFromDesc32(pSelInfo, &Desc);
2376
2377 return VINF_SUCCESS;
2378}
2379
2380
2381/**
2382 * Formats a descriptor.
2383 *
2384 * @param Desc Descriptor to format.
2385 * @param Sel Selector number.
2386 * @param pszOutput Output buffer.
2387 * @param cchOutput Size of output buffer.
2388 */
2389static void selmR3FormatDescriptor(X86DESC Desc, RTSEL Sel, char *pszOutput, size_t cchOutput)
2390{
2391 /*
2392 * Make variable description string.
2393 */
2394 static struct
2395 {
2396 unsigned cch;
2397 const char *psz;
2398 } const aTypes[32] =
2399 {
2400#define STRENTRY(str) { sizeof(str) - 1, str }
2401 /* system */
2402 STRENTRY("Reserved0 "), /* 0x00 */
2403 STRENTRY("TSS16Avail "), /* 0x01 */
2404 STRENTRY("LDT "), /* 0x02 */
2405 STRENTRY("TSS16Busy "), /* 0x03 */
2406 STRENTRY("Call16 "), /* 0x04 */
2407 STRENTRY("Task "), /* 0x05 */
2408 STRENTRY("Int16 "), /* 0x06 */
2409 STRENTRY("Trap16 "), /* 0x07 */
2410 STRENTRY("Reserved8 "), /* 0x08 */
2411 STRENTRY("TSS32Avail "), /* 0x09 */
2412 STRENTRY("ReservedA "), /* 0x0a */
2413 STRENTRY("TSS32Busy "), /* 0x0b */
2414 STRENTRY("Call32 "), /* 0x0c */
2415 STRENTRY("ReservedD "), /* 0x0d */
2416 STRENTRY("Int32 "), /* 0x0e */
2417 STRENTRY("Trap32 "), /* 0x0f */
2418 /* non system */
2419 STRENTRY("DataRO "), /* 0x10 */
2420 STRENTRY("DataRO Accessed "), /* 0x11 */
2421 STRENTRY("DataRW "), /* 0x12 */
2422 STRENTRY("DataRW Accessed "), /* 0x13 */
2423 STRENTRY("DataDownRO "), /* 0x14 */
2424 STRENTRY("DataDownRO Accessed "), /* 0x15 */
2425 STRENTRY("DataDownRW "), /* 0x16 */
2426 STRENTRY("DataDownRW Accessed "), /* 0x17 */
2427 STRENTRY("CodeEO "), /* 0x18 */
2428 STRENTRY("CodeEO Accessed "), /* 0x19 */
2429 STRENTRY("CodeER "), /* 0x1a */
2430 STRENTRY("CodeER Accessed "), /* 0x1b */
2431 STRENTRY("CodeConfEO "), /* 0x1c */
2432 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
2433 STRENTRY("CodeConfER "), /* 0x1e */
2434 STRENTRY("CodeConfER Accessed ") /* 0x1f */
2435#undef SYSENTRY
2436 };
2437#define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
2438 char szMsg[128];
2439 char *psz = &szMsg[0];
2440 unsigned i = Desc.Gen.u1DescType << 4 | Desc.Gen.u4Type;
2441 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
2442 psz += aTypes[i].cch;
2443
2444 if (Desc.Gen.u1Present)
2445 ADD_STR(psz, "Present ");
2446 else
2447 ADD_STR(psz, "Not-Present ");
2448 if (Desc.Gen.u1Granularity)
2449 ADD_STR(psz, "Page ");
2450 if (Desc.Gen.u1DefBig)
2451 ADD_STR(psz, "32-bit ");
2452 else
2453 ADD_STR(psz, "16-bit ");
2454#undef ADD_STR
2455 *psz = '\0';
2456
2457 /*
2458 * Limit and Base and format the output.
2459 */
2460 uint32_t u32Limit = X86DESC_LIMIT(Desc);
2461 if (Desc.Gen.u1Granularity)
2462 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
2463 uint32_t u32Base = X86DESC_BASE(Desc);
2464
2465 RTStrPrintf(pszOutput, cchOutput, "%04x - %08x %08x - base=%08x limit=%08x dpl=%d %s",
2466 Sel, Desc.au32[0], Desc.au32[1], u32Base, u32Limit, Desc.Gen.u2Dpl, szMsg);
2467}
2468
2469
2470/**
2471 * Dumps a descriptor.
2472 *
2473 * @param Desc Descriptor to dump.
2474 * @param Sel Selector number.
2475 * @param pszMsg Message to prepend the log entry with.
2476 */
2477VMMR3DECL(void) SELMR3DumpDescriptor(X86DESC Desc, RTSEL Sel, const char *pszMsg)
2478{
2479 char szOutput[128];
2480 selmR3FormatDescriptor(Desc, Sel, &szOutput[0], sizeof(szOutput));
2481 Log(("%s: %s\n", pszMsg, szOutput));
2482 NOREF(szOutput[0]);
2483}
2484
2485
2486/**
2487 * Display the shadow gdt.
2488 *
2489 * @param pVM VM Handle.
2490 * @param pHlp The info helpers.
2491 * @param pszArgs Arguments, ignored.
2492 */
2493static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2494{
2495 pHlp->pfnPrintf(pHlp, "Shadow GDT (GCAddr=%RRv):\n", MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3));
2496 for (unsigned iGDT = 0; iGDT < SELM_GDT_ELEMENTS; iGDT++)
2497 {
2498 if (pVM->selm.s.paGdtR3[iGDT].Gen.u1Present)
2499 {
2500 char szOutput[128];
2501 selmR3FormatDescriptor(pVM->selm.s.paGdtR3[iGDT], iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2502 const char *psz = "";
2503 if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] >> X86_SEL_SHIFT))
2504 psz = " HyperCS";
2505 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] >> X86_SEL_SHIFT))
2506 psz = " HyperDS";
2507 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] >> X86_SEL_SHIFT))
2508 psz = " HyperCS64";
2509 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> X86_SEL_SHIFT))
2510 psz = " HyperTSS";
2511 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> X86_SEL_SHIFT))
2512 psz = " HyperTSSTrap08";
2513 pHlp->pfnPrintf(pHlp, "%s%s\n", szOutput, psz);
2514 }
2515 }
2516}
2517
2518
2519/**
2520 * Display the guest gdt.
2521 *
2522 * @param pVM VM Handle.
2523 * @param pHlp The info helpers.
2524 * @param pszArgs Arguments, ignored.
2525 */
2526static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2527{
2528 /** @todo SMP support! */
2529 PVMCPU pVCpu = &pVM->aCpus[0];
2530
2531 VBOXGDTR GDTR;
2532 CPUMGetGuestGDTR(pVCpu, &GDTR);
2533 RTGCPTR GCPtrGDT = GDTR.pGdt;
2534 unsigned cGDTs = ((unsigned)GDTR.cbGdt + 1) / sizeof(X86DESC);
2535
2536 pHlp->pfnPrintf(pHlp, "Guest GDT (GCAddr=%RGv limit=%x):\n", GCPtrGDT, GDTR.cbGdt);
2537 for (unsigned iGDT = 0; iGDT < cGDTs; iGDT++, GCPtrGDT += sizeof(X86DESC))
2538 {
2539 X86DESC GDTE;
2540 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &GDTE, GCPtrGDT, sizeof(GDTE));
2541 if (RT_SUCCESS(rc))
2542 {
2543 if (GDTE.Gen.u1Present)
2544 {
2545 char szOutput[128];
2546 selmR3FormatDescriptor(GDTE, iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2547 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2548 }
2549 }
2550 else if (rc == VERR_PAGE_NOT_PRESENT)
2551 {
2552 if ((GCPtrGDT & PAGE_OFFSET_MASK) + sizeof(X86DESC) - 1 < sizeof(X86DESC))
2553 pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%RGv)\n", iGDT << X86_SEL_SHIFT, GCPtrGDT);
2554 }
2555 else
2556 pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%RGv\n", iGDT << X86_SEL_SHIFT, rc, GCPtrGDT);
2557 }
2558}
2559
2560
2561/**
2562 * Display the shadow ldt.
2563 *
2564 * @param pVM VM Handle.
2565 * @param pHlp The info helpers.
2566 * @param pszArgs Arguments, ignored.
2567 */
2568static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2569{
2570 unsigned cLDTs = ((unsigned)pVM->selm.s.cbLdtLimit + 1) >> X86_SEL_SHIFT;
2571 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.pvLdtR3 + pVM->selm.s.offLdtHyper);
2572 pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=%RRv limit=%#x):\n", pVM->selm.s.pvLdtRC + pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit);
2573 for (unsigned iLDT = 0; iLDT < cLDTs; iLDT++)
2574 {
2575 if (paLDT[iLDT].Gen.u1Present)
2576 {
2577 char szOutput[128];
2578 selmR3FormatDescriptor(paLDT[iLDT], (iLDT << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2579 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2580 }
2581 }
2582}
2583
2584
2585/**
2586 * Display the guest ldt.
2587 *
2588 * @param pVM VM Handle.
2589 * @param pHlp The info helpers.
2590 * @param pszArgs Arguments, ignored.
2591 */
2592static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2593{
2594 /** @todo SMP support! */
2595 PVMCPU pVCpu = &pVM->aCpus[0];
2596
2597 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
2598 if (!(SelLdt & X86_SEL_MASK))
2599 {
2600 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): Null-Selector\n", SelLdt);
2601 return;
2602 }
2603
2604 RTGCPTR GCPtrLdt;
2605 unsigned cbLdt;
2606 int rc = SELMGetLDTFromSel(pVM, SelLdt, &GCPtrLdt, &cbLdt);
2607 if (RT_FAILURE(rc))
2608 {
2609 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): rc=%Rrc\n", SelLdt, rc);
2610 return;
2611 }
2612
2613 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%RGv limit=%x):\n", SelLdt, GCPtrLdt, cbLdt);
2614 unsigned cLdts = (cbLdt + 1) >> X86_SEL_SHIFT;
2615 for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, GCPtrLdt += sizeof(X86DESC))
2616 {
2617 X86DESC LdtE;
2618 rc = PGMPhysSimpleReadGCPtr(pVCpu, &LdtE, GCPtrLdt, sizeof(LdtE));
2619 if (RT_SUCCESS(rc))
2620 {
2621 if (LdtE.Gen.u1Present)
2622 {
2623 char szOutput[128];
2624 selmR3FormatDescriptor(LdtE, (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2625 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2626 }
2627 }
2628 else if (rc == VERR_PAGE_NOT_PRESENT)
2629 {
2630 if ((GCPtrLdt & PAGE_OFFSET_MASK) + sizeof(X86DESC) - 1 < sizeof(X86DESC))
2631 pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%RGv)\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, GCPtrLdt);
2632 }
2633 else
2634 pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%RGv\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, rc, GCPtrLdt);
2635 }
2636}
2637
2638
2639/**
2640 * Dumps the hypervisor GDT
2641 *
2642 * @param pVM VM handle.
2643 */
2644VMMR3DECL(void) SELMR3DumpHyperGDT(PVM pVM)
2645{
2646 DBGFR3Info(pVM, "gdt", NULL, NULL);
2647}
2648
2649
2650/**
2651 * Dumps the hypervisor LDT
2652 *
2653 * @param pVM VM handle.
2654 */
2655VMMR3DECL(void) SELMR3DumpHyperLDT(PVM pVM)
2656{
2657 DBGFR3Info(pVM, "ldt", NULL, NULL);
2658}
2659
2660
2661/**
2662 * Dumps the guest GDT
2663 *
2664 * @param pVM VM handle.
2665 */
2666VMMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM)
2667{
2668 DBGFR3Info(pVM, "gdtguest", NULL, NULL);
2669}
2670
2671
2672/**
2673 * Dumps the guest LDT
2674 *
2675 * @param pVM VM handle.
2676 */
2677VMMR3DECL(void) SELMR3DumpGuestLDT(PVM pVM)
2678{
2679 DBGFR3Info(pVM, "ldtguest", NULL, NULL);
2680}
2681
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette