VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp@ 62478

Last change on this file since 62478 was 62478, checked in by vboxsync, 8 years ago

(C) 2016

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 42.9 KB
Line 
1/* $Id: VMMSwitcher.cpp 62478 2016-07-22 18:29:06Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor, World Switcher(s).
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/sup.h>
29#include "VMMInternal.h"
30#include "VMMSwitcher.h"
31#include <VBox/vmm/vm.h>
32#include <VBox/dis.h>
33
34#include <VBox/err.h>
35#include <VBox/param.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <iprt/asm-amd64-x86.h>
40#include <iprt/string.h>
41#include <iprt/ctype.h>
42
43
44/*********************************************************************************************************************************
45* Global Variables *
46*********************************************************************************************************************************/
47/** Array of switcher definitions.
48 * The type and index shall match!
49 */
50static PVMMSWITCHERDEF g_apRawModeSwitchers[VMMSWITCHER_MAX] =
51{
52 NULL, /* invalid entry */
53#ifdef VBOX_WITH_RAW_MODE
54# ifndef RT_ARCH_AMD64
55 &vmmR3Switcher32BitTo32Bit_Def,
56 &vmmR3Switcher32BitToPAE_Def,
57 NULL, //&vmmR3Switcher32BitToAMD64_Def,
58 &vmmR3SwitcherPAETo32Bit_Def,
59 &vmmR3SwitcherPAEToPAE_Def,
60 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
61 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
62 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
63 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
64# else /* RT_ARCH_AMD64 */
65 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
66 NULL, //&vmmR3Switcher32BitToPAE_Def,
67 NULL, //&vmmR3Switcher32BitToAMD64_Def,
68 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
69 NULL, //&vmmR3SwitcherPAEToPAE_Def,
70 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
71 &vmmR3SwitcherAMD64To32Bit_Def,
72 &vmmR3SwitcherAMD64ToPAE_Def,
73 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
74# endif /* RT_ARCH_AMD64 */
75#else /* !VBOX_WITH_RAW_MODE */
76 NULL,
77 NULL,
78 NULL,
79 NULL,
80 NULL,
81 NULL,
82 NULL,
83 NULL,
84 NULL,
85#endif /* !VBOX_WITH_RAW_MODE */
86#ifndef RT_ARCH_AMD64
87 &vmmR3SwitcherX86Stub_Def,
88 NULL,
89#else
90 NULL,
91 &vmmR3SwitcherAMD64Stub_Def,
92#endif
93};
94
95/** Array of switcher definitions.
96 * The type and index shall match!
97 */
98static PVMMSWITCHERDEF g_apHmSwitchers[VMMSWITCHER_MAX] =
99{
100 NULL, /* invalid entry */
101#if HC_ARCH_BITS == 32
102 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
103 NULL, //&vmmR3Switcher32BitToPAE_Def,
104 &vmmR3Switcher32BitToAMD64_Def,
105 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
106 NULL, //&vmmR3SwitcherPAEToPAE_Def,
107 &vmmR3SwitcherPAEToAMD64_Def,
108 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
109 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
110 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
111#else /* !VBOX_WITH_RAW_MODE */
112 NULL,
113 NULL,
114 NULL,
115 NULL,
116 NULL,
117 NULL,
118 NULL,
119 NULL,
120 NULL,
121#endif /* !VBOX_WITH_RAW_MODE */
122#ifndef RT_ARCH_AMD64
123 &vmmR3SwitcherX86Stub_Def,
124 NULL,
125#else
126 NULL,
127 &vmmR3SwitcherAMD64Stub_Def,
128#endif
129};
130
131
132# ifdef VBOX_WITH_64ON32_IDT
133/**
134 * Initializes the 64-bit IDT for 64-bit guest on 32-bit host switchers.
135 *
136 * This is only used as a debugging aid when we cannot find out why something
137 * goes haywire in the intermediate context.
138 *
139 * @param pVM The cross context VM structure.
140 * @param pSwitcher The switcher descriptor.
141 * @param pbDst Where the switcher code was just copied.
142 * @param HCPhysDst The host physical address corresponding to @a pbDst.
143 */
144static void vmmR3Switcher32On64IdtInit(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pbDst, RTHCPHYS HCPhysDst)
145{
146 AssertRelease(pSwitcher->offGCCode > 0 && pSwitcher->offGCCode < pSwitcher->cbCode);
147 AssertRelease(pSwitcher->cbCode < _64K);
148 RTSEL uCs64 = SELMGetHyperCS64(pVM);
149
150 PX86DESC64GATE paIdt = (PX86DESC64GATE)(pbDst + pSwitcher->offGCCode);
151 for (uint32_t i = 0 ; i < 256; i++)
152 {
153 AssertRelease(((uint64_t *)&paIdt[i])[0] < pSwitcher->cbCode);
154 AssertRelease(((uint64_t *)&paIdt[i])[1] == 0);
155 uint64_t uHandler = HCPhysDst + paIdt[i].u16OffsetLow;
156 paIdt[i].u16OffsetLow = (uint16_t)uHandler;
157 paIdt[i].u16Sel = uCs64;
158 paIdt[i].u3IST = 0;
159 paIdt[i].u5Reserved = 0;
160 paIdt[i].u4Type = AMD64_SEL_TYPE_SYS_INT_GATE;
161 paIdt[i].u1DescType = 0 /* system */;
162 paIdt[i].u2Dpl = 3;
163 paIdt[i].u1Present = 1;
164 paIdt[i].u16OffsetHigh = (uint16_t)(uHandler >> 16);
165 paIdt[i].u32Reserved = (uint32_t)(uHandler >> 32);
166 }
167
168 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
169 {
170 uint64_t uIdtr = HCPhysDst + pSwitcher->offGCCode; AssertRelease(uIdtr < UINT32_MAX);
171 CPUMSetHyperIDTR(&pVM->aCpus[iCpu], uIdtr, 16*256 + iCpu);
172 }
173}
174
175
176/**
177 * Relocates the 64-bit IDT for 64-bit guest on 32-bit host switchers.
178 *
179 * @param pVM The cross context VM structure.
180 * @param pSwitcher The switcher descriptor.
181 * @param pbDst Where the switcher code was just copied.
182 * @param HCPhysDst The host physical address corresponding to @a pbDst.
183 */
184static void vmmR3Switcher32On64IdtRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pbDst, RTHCPHYS HCPhysDst)
185{
186 AssertRelease(pSwitcher->offGCCode > 0 && pSwitcher->offGCCode < pSwitcher->cbCode && pSwitcher->cbCode < _64K);
187
188 /* The intermediate context doesn't move, but the CS may. */
189 RTSEL uCs64 = SELMGetHyperCS64(pVM);
190 PX86DESC64GATE paIdt = (PX86DESC64GATE)(pbDst + pSwitcher->offGCCode);
191 for (uint32_t i = 0 ; i < 256; i++)
192 paIdt[i].u16Sel = uCs64;
193
194 /* Just in case... */
195 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
196 {
197 uint64_t uIdtr = HCPhysDst + pSwitcher->offGCCode; AssertRelease(uIdtr < UINT32_MAX);
198 CPUMSetHyperIDTR(&pVM->aCpus[iCpu], uIdtr, 16*256 + iCpu);
199 }
200}
201# endif /* VBOX_WITH_64ON32_IDT */
202
203
204/**
205 * VMMR3Init worker that initiates the switcher code (aka core code).
206 *
207 * This is core per VM code which might need fixups and/or for ease of use are
208 * put on linear contiguous backing.
209 *
210 * @returns VBox status code.
211 * @param pVM The cross context VM structure.
212 */
213int vmmR3SwitcherInit(PVM pVM)
214{
215#if !defined(VBOX_WITH_RAW_MODE) && (HC_ARCH_BITS == 64)
216 return VINF_SUCCESS;
217#else
218
219 /*
220 * Calc the size.
221 */
222 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
223 unsigned cbCoreCode = 0;
224 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
225 {
226 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
227 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
228 if (pSwitcher)
229 {
230 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
231 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
232 }
233 }
234
235 /*
236 * Allocate contiguous pages for switchers and deal with
237 * conflicts in the intermediate mapping of the code.
238 */
239 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
240 pVM->vmm.s.pvCoreCodeR3 = SUPR3ContAlloc(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
241 int rc = VERR_NO_MEMORY;
242 if (pVM->vmm.s.pvCoreCodeR3)
243 {
244 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
245 if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
246 {
247 /* try more allocations - Solaris, Linux. */
248 const unsigned cTries = 8234;
249 struct VMMInitBadTry
250 {
251 RTR0PTR pvR0;
252 void *pvR3;
253 RTHCPHYS HCPhys;
254 RTUINT cb;
255 } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
256 AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
257 unsigned i = 0;
258 do
259 {
260 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
261 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
262 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
263 i++;
264 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
265 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
266 pVM->vmm.s.pvCoreCodeR3 = SUPR3ContAlloc(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
267 if (!pVM->vmm.s.pvCoreCodeR3)
268 break;
269 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
270 } while ( rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
271 && i < cTries - 1);
272
273 /* cleanup */
274 if (RT_FAILURE(rc))
275 {
276 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
277 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
278 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
279 paBadTries[i].cb = pVM->vmm.s.cbCoreCode;
280 i++;
281 LogRel(("VMM: Failed to allocated and map core code: rc=%Rrc\n", rc));
282 }
283 while (i-- > 0)
284 {
285 LogRel(("VMM: Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%RHp\n",
286 i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
287 SUPR3ContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
288 }
289 RTMemTmpFree(paBadTries);
290 }
291 }
292 if (RT_SUCCESS(rc))
293 {
294 /*
295 * Copy the code.
296 */
297 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
298 {
299 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
300 if (pSwitcher)
301 {
302 uint8_t *pbDst = (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher];
303 memcpy(pbDst, pSwitcher->pvCode, pSwitcher->cbCode);
304# ifdef VBOX_WITH_64ON32_IDT
305 if ( pSwitcher->enmType == VMMSWITCHER_32_TO_AMD64
306 || pSwitcher->enmType == VMMSWITCHER_PAE_TO_AMD64)
307 vmmR3Switcher32On64IdtInit(pVM, pSwitcher, pbDst,
308 pVM->vmm.s.HCPhysCoreCode + pVM->vmm.s.aoffSwitchers[iSwitcher]);
309# endif
310 }
311 }
312
313 /*
314 * Map the code into the GC address space.
315 */
316 RTGCPTR GCPtr;
317 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode,
318 cbCoreCode, "Core Code", &GCPtr);
319 if (RT_SUCCESS(rc))
320 {
321 pVM->vmm.s.pvCoreCodeRC = GCPtr;
322 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
323 LogRel(("VMM: CoreCode: R3=%RHv R0=%RHv RC=%RRv Phys=%RHp cb=%#x\n",
324 pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
325
326 /*
327 * Finally, PGM probably has selected a switcher already but we need
328 * to get the routine addresses, so we'll reselect it.
329 * This may legally fail so, we're ignoring the rc.
330 * Note! See HMIsEnabled hack in selector function.
331 */
332 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
333 return rc;
334 }
335
336 /* shit */
337 AssertMsgFailed(("PGMR3Map(,%RRv, %RHp, %#x, 0) failed with rc=%Rrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
338 SUPR3ContFree(pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
339 }
340 else
341 VMSetError(pVM, rc, RT_SRC_POS,
342 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
343 cbCoreCode);
344
345 pVM->vmm.s.pvCoreCodeR3 = NULL;
346 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
347 pVM->vmm.s.pvCoreCodeRC = 0;
348 return rc;
349#endif
350}
351
352/**
353 * Relocate the switchers, called by VMMR#Relocate.
354 *
355 * @param pVM The cross context VM structure.
356 * @param offDelta The relocation delta.
357 */
358void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
359{
360#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64)
361 /*
362 * Relocate all the switchers.
363 */
364 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
365 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
366 {
367 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
368 if (pSwitcher && pSwitcher->pfnRelocate)
369 {
370 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
371 pSwitcher->pfnRelocate(pVM,
372 pSwitcher,
373 pVM->vmm.s.pvCoreCodeR0 + off,
374 (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
375 pVM->vmm.s.pvCoreCodeRC + off,
376 pVM->vmm.s.HCPhysCoreCode + off);
377# ifdef VBOX_WITH_64ON32_IDT
378 if ( pSwitcher->enmType == VMMSWITCHER_32_TO_AMD64
379 || pSwitcher->enmType == VMMSWITCHER_PAE_TO_AMD64)
380 vmmR3Switcher32On64IdtRelocate(pVM, pSwitcher,
381 (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
382 pVM->vmm.s.HCPhysCoreCode + off);
383# endif
384 }
385 }
386
387 /*
388 * Recalc the RC address for the current switcher.
389 */
390 PVMMSWITCHERDEF pSwitcher = papSwitchers[pVM->vmm.s.enmSwitcher];
391 if (pSwitcher)
392 {
393 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
394 pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost;
395 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline;
396 pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm;
397 pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn;
398 }
399 else
400 AssertRelease(HMIsEnabled(pVM));
401
402#else
403 NOREF(pVM);
404#endif
405 NOREF(offDelta);
406}
407
408
409#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64)
410
411/**
412 * Generic switcher code relocator.
413 *
414 * @param pVM The cross context VM structure.
415 * @param pSwitcher The switcher definition.
416 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
417 * @param R0PtrCode Pointer to the core code block for the switcher, ring-0 mapping.
418 * @param GCPtrCode The guest context address corresponding to pu8Code.
419 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
420 * @param SelCS The hypervisor CS selector.
421 * @param SelDS The hypervisor DS selector.
422 * @param SelTSS The hypervisor TSS selector.
423 * @param GCPtrGDT The GC address of the hypervisor GDT.
424 * @param SelCS64 The 64-bit mode hypervisor CS selector.
425 */
426static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher,
427 RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
428 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
429{
430 union
431 {
432 const uint8_t *pu8;
433 const uint16_t *pu16;
434 const uint32_t *pu32;
435 const uint64_t *pu64;
436 const void *pv;
437 uintptr_t u;
438 } u;
439 u.pv = pSwitcher->pvFixups;
440
441 /*
442 * Process fixups.
443 */
444 uint8_t u8;
445 while ((u8 = *u.pu8++) != FIX_THE_END)
446 {
447 /*
448 * Get the source (where to write the fixup).
449 */
450 uint32_t offSrc = *u.pu32++;
451 Assert(offSrc < pSwitcher->cbCode);
452 union
453 {
454 uint8_t *pu8;
455 uint16_t *pu16;
456 uint32_t *pu32;
457 uint64_t *pu64;
458 uintptr_t u;
459 } uSrc;
460 uSrc.pu8 = pu8CodeR3 + offSrc;
461
462 /* The fixup target and method depends on the type. */
463 switch (u8)
464 {
465 /*
466 * 32-bit relative, source in HC and target in GC.
467 */
468 case FIX_HC_2_GC_NEAR_REL:
469 {
470 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
471 uint32_t offTrg = *u.pu32++;
472 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
473 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
474 break;
475 }
476
477 /*
478 * 32-bit relative, source in HC and target in ID.
479 */
480 case FIX_HC_2_ID_NEAR_REL:
481 {
482 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
483 uint32_t offTrg = *u.pu32++;
484 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
485 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (R0PtrCode + offSrc + 4));
486 break;
487 }
488
489 /*
490 * 32-bit relative, source in GC and target in HC.
491 */
492 case FIX_GC_2_HC_NEAR_REL:
493 {
494 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
495 uint32_t offTrg = *u.pu32++;
496 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
497 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (GCPtrCode + offSrc + 4));
498 break;
499 }
500
501 /*
502 * 32-bit relative, source in GC and target in ID.
503 */
504 case FIX_GC_2_ID_NEAR_REL:
505 {
506 AssertMsg(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode, ("%x - %x < %x\n", offSrc, pSwitcher->offGCCode, pSwitcher->cbGCCode));
507 uint32_t offTrg = *u.pu32++;
508 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
509 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
510 break;
511 }
512
513 /*
514 * 32-bit relative, source in ID and target in HC.
515 */
516 case FIX_ID_2_HC_NEAR_REL:
517 {
518 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
519 uint32_t offTrg = *u.pu32++;
520 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
521 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (u32IDCode + offSrc + 4));
522 break;
523 }
524
525 /*
526 * 32-bit relative, source in ID and target in HC.
527 */
528 case FIX_ID_2_GC_NEAR_REL:
529 {
530 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
531 uint32_t offTrg = *u.pu32++;
532 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
533 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
534 break;
535 }
536
537 /*
538 * 16:32 far jump, target in GC.
539 */
540 case FIX_GC_FAR32:
541 {
542 uint32_t offTrg = *u.pu32++;
543 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
544 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
545 *uSrc.pu16++ = SelCS;
546 break;
547 }
548
549 /*
550 * Make 32-bit GC pointer given CPUM offset.
551 */
552 case FIX_GC_CPUM_OFF:
553 {
554 uint32_t offCPUM = *u.pu32++;
555 Assert(offCPUM < sizeof(pVM->cpum));
556 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->cpum) + offCPUM);
557 break;
558 }
559
560 /*
561 * Make 32-bit GC pointer given CPUMCPU offset.
562 */
563 case FIX_GC_CPUMCPU_OFF:
564 {
565 uint32_t offCPUM = *u.pu32++;
566 Assert(offCPUM < sizeof(pVM->aCpus[0].cpum));
567 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->aCpus[0].cpum) + offCPUM);
568 break;
569 }
570
571 /*
572 * Make 32-bit GC pointer given VM offset.
573 */
574 case FIX_GC_VM_OFF:
575 {
576 uint32_t offVM = *u.pu32++;
577 Assert(offVM < sizeof(VM));
578 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, pVM) + offVM);
579 break;
580 }
581
582 /*
583 * Make 32-bit HC pointer given CPUM offset.
584 */
585 case FIX_HC_CPUM_OFF:
586 {
587 uint32_t offCPUM = *u.pu32++;
588 Assert(offCPUM < sizeof(pVM->cpum));
589 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
590 break;
591 }
592
593 /*
594 * Make 32-bit R0 pointer given VM offset.
595 */
596 case FIX_HC_VM_OFF:
597 {
598 uint32_t offVM = *u.pu32++;
599 Assert(offVM < sizeof(VM));
600 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
601 break;
602 }
603
604 /*
605 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
606 */
607 case FIX_INTER_32BIT_CR3:
608 {
609
610 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
611 break;
612 }
613
614 /*
615 * Store the PAE CR3 (32-bit) for the intermediate memory context.
616 */
617 case FIX_INTER_PAE_CR3:
618 {
619
620 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
621 break;
622 }
623
624 /*
625 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
626 */
627 case FIX_INTER_AMD64_CR3:
628 {
629
630 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
631 break;
632 }
633
634 /*
635 * Store Hypervisor CS (16-bit).
636 */
637 case FIX_HYPER_CS:
638 {
639 *uSrc.pu16 = SelCS;
640 break;
641 }
642
643 /*
644 * Store Hypervisor DS (16-bit).
645 */
646 case FIX_HYPER_DS:
647 {
648 *uSrc.pu16 = SelDS;
649 break;
650 }
651
652 /*
653 * Store Hypervisor TSS (16-bit).
654 */
655 case FIX_HYPER_TSS:
656 {
657 *uSrc.pu16 = SelTSS;
658 break;
659 }
660
661 /*
662 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
663 */
664 case FIX_GC_TSS_GDTE_DW2:
665 {
666 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
667 *uSrc.pu32 = (uint32_t)GCPtr;
668 break;
669 }
670
671 /*
672 * Store the EFER or mask for the 32->64 bit switcher.
673 */
674 case FIX_EFER_OR_MASK:
675 {
676 uint32_t u32OrMask = MSR_K6_EFER_LME | MSR_K6_EFER_SCE;
677 /*
678 * We don't care if cpuid 0x8000001 isn't supported as that implies
679 * long mode isn't supported either, so this switched would never be used.
680 */
681 if (!!(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
682 u32OrMask |= MSR_K6_EFER_NXE;
683
684 *uSrc.pu32 = u32OrMask;
685 break;
686 }
687
688#if 0 /* Reusable for XSAVE. */
689 /*
690 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
691 */
692 case FIX_NO_FXSAVE_JMP:
693 {
694 uint32_t offTrg = *u.pu32++;
695 Assert(offTrg < pSwitcher->cbCode);
696 if (!CPUMSupportsXSave(pVM))
697 {
698 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
699 *uSrc.pu32++ = offTrg - (offSrc + 5);
700 }
701 else
702 {
703 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
704 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
705 }
706 break;
707 }
708#endif
709
710 /*
711 * Insert relative jump to specified target it SYSENTER isn't used by the host.
712 */
713 case FIX_NO_SYSENTER_JMP:
714 {
715 uint32_t offTrg = *u.pu32++;
716 Assert(offTrg < pSwitcher->cbCode);
717 if (!CPUMIsHostUsingSysEnter(pVM))
718 {
719 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
720 *uSrc.pu32++ = offTrg - (offSrc + 5);
721 }
722 else
723 {
724 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
725 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
726 }
727 break;
728 }
729
730 /*
731 * Insert relative jump to specified target it SYSCALL isn't used by the host.
732 */
733 case FIX_NO_SYSCALL_JMP:
734 {
735 uint32_t offTrg = *u.pu32++;
736 Assert(offTrg < pSwitcher->cbCode);
737 if (!CPUMIsHostUsingSysCall(pVM))
738 {
739 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
740 *uSrc.pu32++ = offTrg - (offSrc + 5);
741 }
742 else
743 {
744 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
745 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
746 }
747 break;
748 }
749
750 /*
751 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
752 */
753 case FIX_HC_32BIT:
754 {
755 uint32_t offTrg = *u.pu32++;
756 Assert(offSrc < pSwitcher->cbCode);
757 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
758 *uSrc.pu32 = R0PtrCode + offTrg;
759 break;
760 }
761
762# if defined(RT_ARCH_AMD64)
763 /*
764 * 64-bit HC Code Selector (no argument).
765 */
766 case FIX_HC_64BIT_CS:
767 {
768 Assert(offSrc < pSwitcher->cbCode);
769 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
770 break;
771 }
772
773 /*
774 * 64-bit HC pointer to the CPUM instance data (no argument).
775 */
776 case FIX_HC_64BIT_CPUM:
777 {
778 Assert(offSrc < pSwitcher->cbCode);
779 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
780 break;
781 }
782# endif
783 /*
784 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
785 */
786 case FIX_HC_64BIT:
787 {
788 uint32_t offTrg = *u.pu32++;
789 Assert(offSrc < pSwitcher->cbCode);
790 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
791 *uSrc.pu64 = R0PtrCode + offTrg;
792 break;
793 }
794
795# ifdef RT_ARCH_X86
796 case FIX_GC_64_BIT_CPUM_OFF:
797 {
798 uint32_t offCPUM = *u.pu32++;
799 Assert(offCPUM < sizeof(pVM->cpum));
800 *uSrc.pu64 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->cpum) + offCPUM);
801 break;
802 }
803# endif
804
805 /*
806 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
807 */
808 case FIX_ID_32BIT:
809 {
810 uint32_t offTrg = *u.pu32++;
811 Assert(offSrc < pSwitcher->cbCode);
812 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
813 *uSrc.pu32 = u32IDCode + offTrg;
814 break;
815 }
816
817 /*
818 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
819 */
820 case FIX_ID_64BIT:
821 case FIX_HC_64BIT_NOCHECK:
822 {
823 uint32_t offTrg = *u.pu32++;
824 Assert(offSrc < pSwitcher->cbCode);
825 Assert(u8 == FIX_HC_64BIT_NOCHECK || offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
826 *uSrc.pu64 = u32IDCode + offTrg;
827 break;
828 }
829
830 /*
831 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
832 */
833 case FIX_ID_FAR32_TO_64BIT_MODE:
834 {
835 uint32_t offTrg = *u.pu32++;
836 Assert(offSrc < pSwitcher->cbCode);
837 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
838 *uSrc.pu32++ = u32IDCode + offTrg;
839 *uSrc.pu16 = SelCS64;
840 AssertRelease(SelCS64);
841 break;
842 }
843
844# ifdef VBOX_WITH_NMI
845 /*
846 * 32-bit address to the APIC base.
847 */
848 case FIX_GC_APIC_BASE_32BIT:
849 {
850 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
851 break;
852 }
853# endif
854
855 default:
856 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
857 break;
858 }
859 }
860
861# ifdef LOG_ENABLED
862 /*
863 * If Log2 is enabled disassemble the switcher code.
864 *
865 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
866 */
867 if (LogIs2Enabled())
868 {
869 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
870 " R0PtrCode = %p\n"
871 " pu8CodeR3 = %p\n"
872 " GCPtrCode = %RGv\n"
873 " u32IDCode = %08x\n"
874 " pVMRC = %RRv\n"
875 " pCPUMRC = %RRv\n"
876 " pVMR3 = %p\n"
877 " pCPUMR3 = %p\n"
878 " GCPtrGDT = %RGv\n"
879 " InterCR3s = %08RHp, %08RHp, %08RHp (32-Bit, PAE, AMD64)\n"
880 " HyperCR3s = %08RHp (32-Bit, PAE & AMD64)\n"
881 " SelCS = %04x\n"
882 " SelDS = %04x\n"
883 " SelCS64 = %04x\n"
884 " SelTSS = %04x\n",
885 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
886 R0PtrCode,
887 pu8CodeR3,
888 GCPtrCode,
889 u32IDCode,
890 VM_RC_ADDR(pVM, pVM),
891 VM_RC_ADDR(pVM, &pVM->cpum),
892 pVM,
893 &pVM->cpum,
894 GCPtrGDT,
895 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
896 PGMGetHyperCR3(VMMGetCpu(pVM)),
897 SelCS, SelDS, SelCS64, SelTSS);
898
899 uint32_t offCode = 0;
900 while (offCode < pSwitcher->cbCode)
901 {
902 /*
903 * Figure out where this is.
904 */
905 const char *pszDesc = NULL;
906 RTUINTPTR uBase;
907 uint32_t cbCode;
908 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
909 {
910 pszDesc = "HCCode0";
911 uBase = R0PtrCode;
912 offCode = pSwitcher->offHCCode0;
913 cbCode = pSwitcher->cbHCCode0;
914 }
915 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
916 {
917 pszDesc = "HCCode1";
918 uBase = R0PtrCode;
919 offCode = pSwitcher->offHCCode1;
920 cbCode = pSwitcher->cbHCCode1;
921 }
922 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
923 {
924 pszDesc = "GCCode";
925 uBase = GCPtrCode;
926 offCode = pSwitcher->offGCCode;
927 cbCode = pSwitcher->cbGCCode;
928 }
929 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
930 {
931 pszDesc = "IDCode0";
932 uBase = u32IDCode;
933 offCode = pSwitcher->offIDCode0;
934 cbCode = pSwitcher->cbIDCode0;
935 }
936 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
937 {
938 pszDesc = "IDCode1";
939 uBase = u32IDCode;
940 offCode = pSwitcher->offIDCode1;
941 cbCode = pSwitcher->cbIDCode1;
942 }
943 else
944 {
945 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
946 offCode, pu8CodeR3[offCode], RT_C_IS_PRINT(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
947 offCode++;
948 continue;
949 }
950
951 /*
952 * Disassemble it.
953 */
954 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
955
956 while (cbCode > 0)
957 {
958 /* try label it */
959 if (pSwitcher->offR0ToRawMode == offCode)
960 RTLogPrintf(" *R0ToRawMode:\n");
961 if (pSwitcher->offRCToHost == offCode)
962 RTLogPrintf(" *RCToHost:\n");
963 if (pSwitcher->offRCCallTrampoline == offCode)
964 RTLogPrintf(" *RCCallTrampoline:\n");
965 if (pSwitcher->offRCToHostAsm == offCode)
966 RTLogPrintf(" *RCToHostAsm:\n");
967 if (pSwitcher->offRCToHostAsmNoReturn == offCode)
968 RTLogPrintf(" *RCToHostAsmNoReturn:\n");
969
970 /* disas */
971 uint32_t cbInstr = 0;
972 DISCPUSTATE Cpu;
973 char szDisas[256];
974 int rc = DISInstr(pu8CodeR3 + offCode, DISCPUMODE_32BIT, &Cpu, &cbInstr);
975 if (RT_SUCCESS(rc))
976 {
977 Cpu.uInstrAddr += uBase - (uintptr_t)pu8CodeR3;
978 DISFormatYasmEx(&Cpu, szDisas, sizeof(szDisas),
979 DIS_FMT_FLAGS_ADDR_LEFT | DIS_FMT_FLAGS_BYTES_LEFT | DIS_FMT_FLAGS_BYTES_SPACED
980 | DIS_FMT_FLAGS_RELATIVE_BRANCH,
981 NULL, NULL);
982 }
983 if (RT_SUCCESS(rc))
984 RTLogPrintf(" %04x: %s\n", offCode, szDisas);
985 else
986 {
987 RTLogPrintf(" %04x: %02x '%c' (rc=%Rrc\n",
988 offCode, pu8CodeR3[offCode], RT_C_IS_PRINT(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ', rc);
989 cbInstr = 1;
990 }
991 offCode += cbInstr;
992 cbCode -= RT_MIN(cbInstr, cbCode);
993 }
994 }
995 }
996# endif
997}
998
999/**
1000 * Wrapper around SELMGetHyperGDT() that avoids calling it when raw-mode context
1001 * is not initialized.
1002 *
1003 * @returns Raw-mode contet GDT address. Null pointer if not applicable.
1004 * @param pVM The cross context VM structure.
1005 */
1006static RTRCPTR vmmR3SwitcherGetHyperGDT(PVM pVM)
1007{
1008 if (HMIsRawModeCtxNeeded(pVM))
1009 return SELMGetHyperGDT(pVM);
1010# if HC_ARCH_BITS != 32
1011 AssertFailed(); /* This path is only applicable to some 32-bit hosts. */
1012# endif
1013 return NIL_RTRCPTR;
1014}
1015
1016/**
1017 * Relocator for the 32-Bit to 32-Bit world switcher.
1018 */
1019DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1020{
1021 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
1022 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1023}
1024
1025
1026/**
1027 * Relocator for the 32-Bit to PAE world switcher.
1028 */
1029DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1030{
1031 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
1032 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1033}
1034
1035
1036/**
1037 * Relocator for the 32-Bit to AMD64 world switcher.
1038 */
1039DECLCALLBACK(void) vmmR3Switcher32BitToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1040{
1041 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
1042 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), vmmR3SwitcherGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1043}
1044
1045
1046/**
1047 * Relocator for the PAE to 32-Bit world switcher.
1048 */
1049DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1050{
1051 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
1052 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1053}
1054
1055
1056/**
1057 * Relocator for the PAE to PAE world switcher.
1058 */
1059DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1060{
1061 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
1062 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1063}
1064
1065/**
1066 * Relocator for the PAE to AMD64 world switcher.
1067 */
1068DECLCALLBACK(void) vmmR3SwitcherPAEToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1069{
1070 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
1071 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), vmmR3SwitcherGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1072}
1073
1074
1075/**
1076 * Relocator for the AMD64 to 32-bit world switcher.
1077 */
1078DECLCALLBACK(void) vmmR3SwitcherAMD64To32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1079{
1080 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
1081 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1082}
1083
1084
1085/**
1086 * Relocator for the AMD64 to PAE world switcher.
1087 */
1088DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1089{
1090 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
1091 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1092}
1093
1094
1095/**
1096 * Selects the switcher to be used for switching to raw-mode context.
1097 *
1098 * @returns VBox status code.
1099 * @param pVM The cross context VM structure.
1100 * @param enmSwitcher The new switcher.
1101 * @remark This function may be called before the VMM is initialized.
1102 */
1103VMMR3_INT_DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1104{
1105 /*
1106 * Validate input.
1107 */
1108 if ( enmSwitcher < VMMSWITCHER_INVALID
1109 || enmSwitcher >= VMMSWITCHER_MAX)
1110 {
1111 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1112 return VERR_INVALID_PARAMETER;
1113 }
1114
1115 /*
1116 * Override it if HM is active.
1117 */
1118 if (HMIsEnabled(pVM))
1119 pVM->vmm.s.enmSwitcher = HC_ARCH_BITS == 64 ? VMMSWITCHER_AMD64_STUB : VMMSWITCHER_X86_STUB;
1120
1121 /*
1122 * Select the new switcher.
1123 */
1124 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
1125 PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher];
1126 if (pSwitcher)
1127 {
1128 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
1129 pVM->vmm.s.enmSwitcher = enmSwitcher;
1130
1131 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
1132 pVM->vmm.s.pfnR0ToRawMode = pbCodeR0 + pSwitcher->offR0ToRawMode;
1133
1134 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1135 pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost;
1136 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline;
1137 pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm;
1138 pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn;
1139 return VINF_SUCCESS;
1140 }
1141
1142 return VERR_NOT_IMPLEMENTED;
1143}
1144
1145#endif /* #defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64) */
1146
1147
1148/**
1149 * Gets the switcher to be used for switching to GC.
1150 *
1151 * @returns host to guest ring 0 switcher entrypoint
1152 * @param pVM The cross context VM structure.
1153 * @param enmSwitcher The new switcher.
1154 */
1155VMMR3_INT_DECL(RTR0PTR) VMMR3GetHostToGuestSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1156{
1157 /*
1158 * Validate input.
1159 */
1160 AssertMsgReturn( enmSwitcher == VMMSWITCHER_32_TO_AMD64
1161 || enmSwitcher == VMMSWITCHER_PAE_TO_AMD64,
1162 ("%d\n", enmSwitcher),
1163 NIL_RTR0PTR);
1164 AssertReturn(HMIsEnabled(pVM), NIL_RTR0PTR);
1165
1166 /*
1167 * Select the new switcher.
1168 */
1169 const PVMMSWITCHERDEF *papSwitchers = g_apHmSwitchers;
1170 PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher];
1171 if (pSwitcher)
1172 {
1173 /** @todo fix the pvCoreCodeR0 type */
1174 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1175 return pbCodeR0 + pSwitcher->offR0ToRawMode;
1176 }
1177 return NIL_RTR0PTR;
1178}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette