VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp@ 47682

Last change on this file since 47682 was 45786, checked in by vboxsync, 12 years ago

Move HMRCA.asm into the switcher code so we don't need VMMRC.rc.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 39.4 KB
Line 
1/* $Id: VMMSwitcher.cpp 45786 2013-04-26 22:35:59Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor, World Switcher(s).
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/sup.h>
29#include "VMMInternal.h"
30#include "VMMSwitcher.h"
31#include <VBox/vmm/vm.h>
32#include <VBox/dis.h>
33
34#include <VBox/err.h>
35#include <VBox/param.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <iprt/asm-amd64-x86.h>
40#include <iprt/string.h>
41#include <iprt/ctype.h>
42
43
44/*******************************************************************************
45* Global Variables *
46*******************************************************************************/
47/** Array of switcher definitions.
48 * The type and index shall match!
49 */
50static PVMMSWITCHERDEF g_apRawModeSwitchers[VMMSWITCHER_MAX] =
51{
52 NULL, /* invalid entry */
53#ifdef VBOX_WITH_RAW_MODE
54# ifndef RT_ARCH_AMD64
55 &vmmR3Switcher32BitTo32Bit_Def,
56 &vmmR3Switcher32BitToPAE_Def,
57 NULL, //&vmmR3Switcher32BitToAMD64_Def,
58 &vmmR3SwitcherPAETo32Bit_Def,
59 &vmmR3SwitcherPAEToPAE_Def,
60 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
61 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
62# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
63 &vmmR3SwitcherAMD64ToPAE_Def,
64# else
65 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
66# endif
67 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
68# else /* RT_ARCH_AMD64 */
69 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
70 NULL, //&vmmR3Switcher32BitToPAE_Def,
71 NULL, //&vmmR3Switcher32BitToAMD64_Def,
72 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
73 NULL, //&vmmR3SwitcherPAEToPAE_Def,
74 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
75 &vmmR3SwitcherAMD64To32Bit_Def,
76 &vmmR3SwitcherAMD64ToPAE_Def,
77 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
78# endif /* RT_ARCH_AMD64 */
79#else /* !VBOX_WITH_RAW_MODE */
80 NULL,
81 NULL,
82 NULL,
83 NULL,
84 NULL,
85 NULL,
86 NULL,
87 NULL,
88 NULL,
89#endif /* !VBOX_WITH_RAW_MODE */
90#ifndef RT_ARCH_AMD64
91 &vmmR3SwitcherX86Stub_Def,
92 NULL,
93#else
94 NULL,
95 &vmmR3SwitcherAMD64Stub_Def,
96#endif
97};
98
99/** Array of switcher definitions.
100 * The type and index shall match!
101 */
102static PVMMSWITCHERDEF g_apHmSwitchers[VMMSWITCHER_MAX] =
103{
104 NULL, /* invalid entry */
105#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
106 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
107 NULL, //&vmmR3Switcher32BitToPAE_Def,
108 &vmmR3Switcher32BitToAMD64_Def,
109 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
110 NULL, //&vmmR3SwitcherPAEToPAE_Def,
111 &vmmR3SwitcherPAEToAMD64_Def,
112 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
113 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
114 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
115#else /* !VBOX_WITH_RAW_MODE */
116 NULL,
117 NULL,
118 NULL,
119 NULL,
120 NULL,
121 NULL,
122 NULL,
123 NULL,
124 NULL,
125#endif /* !VBOX_WITH_RAW_MODE */
126#ifndef RT_ARCH_AMD64
127 &vmmR3SwitcherX86Stub_Def,
128 NULL,
129#else
130 NULL,
131 &vmmR3SwitcherAMD64Stub_Def,
132#endif
133};
134
135
136/**
137 * VMMR3Init worker that initiates the switcher code (aka core code).
138 *
139 * This is core per VM code which might need fixups and/or for ease of use are
140 * put on linear contiguous backing.
141 *
142 * @returns VBox status code.
143 * @param pVM Pointer to the VM.
144 */
145int vmmR3SwitcherInit(PVM pVM)
146{
147#if !defined(VBOX_WITH_RAW_MODE) && (HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
148 return VINF_SUCCESS;
149#else
150
151 /*
152 * Calc the size.
153 */
154 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
155 unsigned cbCoreCode = 0;
156 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
157 {
158 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
159 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
160 if (pSwitcher)
161 {
162 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
163 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
164 }
165 }
166
167 /*
168 * Allocate contiguous pages for switchers and deal with
169 * conflicts in the intermediate mapping of the code.
170 */
171 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
172 pVM->vmm.s.pvCoreCodeR3 = SUPR3ContAlloc(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
173 int rc = VERR_NO_MEMORY;
174 if (pVM->vmm.s.pvCoreCodeR3)
175 {
176 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
177 if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
178 {
179 /* try more allocations - Solaris, Linux. */
180 const unsigned cTries = 8234;
181 struct VMMInitBadTry
182 {
183 RTR0PTR pvR0;
184 void *pvR3;
185 RTHCPHYS HCPhys;
186 RTUINT cb;
187 } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
188 AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
189 unsigned i = 0;
190 do
191 {
192 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
193 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
194 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
195 i++;
196 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
197 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
198 pVM->vmm.s.pvCoreCodeR3 = SUPR3ContAlloc(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
199 if (!pVM->vmm.s.pvCoreCodeR3)
200 break;
201 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
202 } while ( rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
203 && i < cTries - 1);
204
205 /* cleanup */
206 if (RT_FAILURE(rc))
207 {
208 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
209 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
210 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
211 paBadTries[i].cb = pVM->vmm.s.cbCoreCode;
212 i++;
213 LogRel(("Failed to allocated and map core code: rc=%Rrc\n", rc));
214 }
215 while (i-- > 0)
216 {
217 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%RHp\n",
218 i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
219 SUPR3ContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
220 }
221 RTMemTmpFree(paBadTries);
222 }
223 }
224 if (RT_SUCCESS(rc))
225 {
226 /*
227 * copy the code.
228 */
229 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
230 {
231 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
232 if (pSwitcher)
233 memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
234 pSwitcher->pvCode, pSwitcher->cbCode);
235 }
236
237 /*
238 * Map the code into the GC address space.
239 */
240 RTGCPTR GCPtr;
241 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode,
242 cbCoreCode, "Core Code", &GCPtr);
243 if (RT_SUCCESS(rc))
244 {
245 pVM->vmm.s.pvCoreCodeRC = GCPtr;
246 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
247 LogRel(("CoreCode: R3=%RHv R0=%RHv RC=%RRv Phys=%RHp cb=%#x\n",
248 pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
249
250 /*
251 * Finally, PGM probably has selected a switcher already but we need
252 * to get the routine addresses, so we'll reselect it.
253 * This may legally fail so, we're ignoring the rc.
254 * Note! See HMIsEnabled hack in selector function.
255 */
256 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
257 return rc;
258 }
259
260 /* shit */
261 AssertMsgFailed(("PGMR3Map(,%RRv, %RHp, %#x, 0) failed with rc=%Rrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
262 SUPR3ContFree(pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
263 }
264 else
265 VMSetError(pVM, rc, RT_SRC_POS,
266 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
267 cbCoreCode);
268
269 pVM->vmm.s.pvCoreCodeR3 = NULL;
270 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
271 pVM->vmm.s.pvCoreCodeRC = 0;
272 return rc;
273#endif
274}
275
276/**
277 * Relocate the switchers, called by VMMR#Relocate.
278 *
279 * @param pVM Pointer to the VM.
280 * @param offDelta The relocation delta.
281 */
282void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
283{
284#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
285 /*
286 * Relocate all the switchers.
287 */
288 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
289 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
290 {
291 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
292 if (pSwitcher && pSwitcher->pfnRelocate)
293 {
294 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
295 pSwitcher->pfnRelocate(pVM,
296 pSwitcher,
297 pVM->vmm.s.pvCoreCodeR0 + off,
298 (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
299 pVM->vmm.s.pvCoreCodeRC + off,
300 pVM->vmm.s.HCPhysCoreCode + off);
301 }
302 }
303
304 /*
305 * Recalc the RC address for the current switcher.
306 */
307 PVMMSWITCHERDEF pSwitcher = papSwitchers[pVM->vmm.s.enmSwitcher];
308 if (pSwitcher)
309 {
310 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
311 pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost;
312 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline;
313 pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm;
314 pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn;
315 }
316 else
317 AssertRelease(HMIsEnabled(pVM));
318
319#else
320 NOREF(pVM);
321#endif
322 NOREF(offDelta);
323}
324
325
326#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
327
328/**
329 * Generic switcher code relocator.
330 *
331 * @param pVM Pointer to the VM.
332 * @param pSwitcher The switcher definition.
333 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
334 * @param R0PtrCode Pointer to the core code block for the switcher, ring-0 mapping.
335 * @param GCPtrCode The guest context address corresponding to pu8Code.
336 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
337 * @param SelCS The hypervisor CS selector.
338 * @param SelDS The hypervisor DS selector.
339 * @param SelTSS The hypervisor TSS selector.
340 * @param GCPtrGDT The GC address of the hypervisor GDT.
341 * @param SelCS64 The 64-bit mode hypervisor CS selector.
342 */
343static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher,
344 RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
345 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
346{
347 union
348 {
349 const uint8_t *pu8;
350 const uint16_t *pu16;
351 const uint32_t *pu32;
352 const uint64_t *pu64;
353 const void *pv;
354 uintptr_t u;
355 } u;
356 u.pv = pSwitcher->pvFixups;
357
358 /*
359 * Process fixups.
360 */
361 uint8_t u8;
362 while ((u8 = *u.pu8++) != FIX_THE_END)
363 {
364 /*
365 * Get the source (where to write the fixup).
366 */
367 uint32_t offSrc = *u.pu32++;
368 Assert(offSrc < pSwitcher->cbCode);
369 union
370 {
371 uint8_t *pu8;
372 uint16_t *pu16;
373 uint32_t *pu32;
374 uint64_t *pu64;
375 uintptr_t u;
376 } uSrc;
377 uSrc.pu8 = pu8CodeR3 + offSrc;
378
379 /* The fixup target and method depends on the type. */
380 switch (u8)
381 {
382 /*
383 * 32-bit relative, source in HC and target in GC.
384 */
385 case FIX_HC_2_GC_NEAR_REL:
386 {
387 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
388 uint32_t offTrg = *u.pu32++;
389 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
390 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
391 break;
392 }
393
394 /*
395 * 32-bit relative, source in HC and target in ID.
396 */
397 case FIX_HC_2_ID_NEAR_REL:
398 {
399 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
400 uint32_t offTrg = *u.pu32++;
401 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
402 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (R0PtrCode + offSrc + 4));
403 break;
404 }
405
406 /*
407 * 32-bit relative, source in GC and target in HC.
408 */
409 case FIX_GC_2_HC_NEAR_REL:
410 {
411 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
412 uint32_t offTrg = *u.pu32++;
413 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
414 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (GCPtrCode + offSrc + 4));
415 break;
416 }
417
418 /*
419 * 32-bit relative, source in GC and target in ID.
420 */
421 case FIX_GC_2_ID_NEAR_REL:
422 {
423 AssertMsg(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode, ("%x - %x < %x\n", offSrc, pSwitcher->offGCCode, pSwitcher->cbGCCode));
424 uint32_t offTrg = *u.pu32++;
425 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
426 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
427 break;
428 }
429
430 /*
431 * 32-bit relative, source in ID and target in HC.
432 */
433 case FIX_ID_2_HC_NEAR_REL:
434 {
435 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
436 uint32_t offTrg = *u.pu32++;
437 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
438 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (u32IDCode + offSrc + 4));
439 break;
440 }
441
442 /*
443 * 32-bit relative, source in ID and target in HC.
444 */
445 case FIX_ID_2_GC_NEAR_REL:
446 {
447 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
448 uint32_t offTrg = *u.pu32++;
449 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
450 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
451 break;
452 }
453
454 /*
455 * 16:32 far jump, target in GC.
456 */
457 case FIX_GC_FAR32:
458 {
459 uint32_t offTrg = *u.pu32++;
460 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
461 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
462 *uSrc.pu16++ = SelCS;
463 break;
464 }
465
466 /*
467 * Make 32-bit GC pointer given CPUM offset.
468 */
469 case FIX_GC_CPUM_OFF:
470 {
471 uint32_t offCPUM = *u.pu32++;
472 Assert(offCPUM < sizeof(pVM->cpum));
473 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->cpum) + offCPUM);
474 break;
475 }
476
477 /*
478 * Make 32-bit GC pointer given CPUMCPU offset.
479 */
480 case FIX_GC_CPUMCPU_OFF:
481 {
482 uint32_t offCPUM = *u.pu32++;
483 Assert(offCPUM < sizeof(pVM->aCpus[0].cpum));
484 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->aCpus[0].cpum) + offCPUM);
485 break;
486 }
487
488 /*
489 * Make 32-bit GC pointer given VM offset.
490 */
491 case FIX_GC_VM_OFF:
492 {
493 uint32_t offVM = *u.pu32++;
494 Assert(offVM < sizeof(VM));
495 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, pVM) + offVM);
496 break;
497 }
498
499 /*
500 * Make 32-bit HC pointer given CPUM offset.
501 */
502 case FIX_HC_CPUM_OFF:
503 {
504 uint32_t offCPUM = *u.pu32++;
505 Assert(offCPUM < sizeof(pVM->cpum));
506 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
507 break;
508 }
509
510 /*
511 * Make 32-bit R0 pointer given VM offset.
512 */
513 case FIX_HC_VM_OFF:
514 {
515 uint32_t offVM = *u.pu32++;
516 Assert(offVM < sizeof(VM));
517 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
518 break;
519 }
520
521 /*
522 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
523 */
524 case FIX_INTER_32BIT_CR3:
525 {
526
527 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
528 break;
529 }
530
531 /*
532 * Store the PAE CR3 (32-bit) for the intermediate memory context.
533 */
534 case FIX_INTER_PAE_CR3:
535 {
536
537 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
538 break;
539 }
540
541 /*
542 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
543 */
544 case FIX_INTER_AMD64_CR3:
545 {
546
547 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
548 break;
549 }
550
551 /*
552 * Store Hypervisor CS (16-bit).
553 */
554 case FIX_HYPER_CS:
555 {
556 *uSrc.pu16 = SelCS;
557 break;
558 }
559
560 /*
561 * Store Hypervisor DS (16-bit).
562 */
563 case FIX_HYPER_DS:
564 {
565 *uSrc.pu16 = SelDS;
566 break;
567 }
568
569 /*
570 * Store Hypervisor TSS (16-bit).
571 */
572 case FIX_HYPER_TSS:
573 {
574 *uSrc.pu16 = SelTSS;
575 break;
576 }
577
578 /*
579 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
580 */
581 case FIX_GC_TSS_GDTE_DW2:
582 {
583 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
584 *uSrc.pu32 = (uint32_t)GCPtr;
585 break;
586 }
587
588 /*
589 * Store the EFER or mask for the 32->64 bit switcher.
590 */
591 case FIX_EFER_OR_MASK:
592 {
593 uint32_t u32OrMask = MSR_K6_EFER_LME | MSR_K6_EFER_SCE;
594 /*
595 * We don't care if cpuid 0x8000001 isn't supported as that implies
596 * long mode isn't supported either, so this switched would never be used.
597 */
598 if (!!(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
599 u32OrMask |= MSR_K6_EFER_NXE;
600
601 *uSrc.pu32 = u32OrMask;
602 break;
603 }
604
605 /*
606 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
607 */
608 case FIX_NO_FXSAVE_JMP:
609 {
610 uint32_t offTrg = *u.pu32++;
611 Assert(offTrg < pSwitcher->cbCode);
612 if (!CPUMSupportsFXSR(pVM))
613 {
614 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
615 *uSrc.pu32++ = offTrg - (offSrc + 5);
616 }
617 else
618 {
619 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
620 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
621 }
622 break;
623 }
624
625 /*
626 * Insert relative jump to specified target it SYSENTER isn't used by the host.
627 */
628 case FIX_NO_SYSENTER_JMP:
629 {
630 uint32_t offTrg = *u.pu32++;
631 Assert(offTrg < pSwitcher->cbCode);
632 if (!CPUMIsHostUsingSysEnter(pVM))
633 {
634 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
635 *uSrc.pu32++ = offTrg - (offSrc + 5);
636 }
637 else
638 {
639 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
640 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
641 }
642 break;
643 }
644
645 /*
646 * Insert relative jump to specified target it SYSCALL isn't used by the host.
647 */
648 case FIX_NO_SYSCALL_JMP:
649 {
650 uint32_t offTrg = *u.pu32++;
651 Assert(offTrg < pSwitcher->cbCode);
652 if (!CPUMIsHostUsingSysCall(pVM))
653 {
654 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
655 *uSrc.pu32++ = offTrg - (offSrc + 5);
656 }
657 else
658 {
659 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
660 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
661 }
662 break;
663 }
664
665 /*
666 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
667 */
668 case FIX_HC_32BIT:
669 {
670 uint32_t offTrg = *u.pu32++;
671 Assert(offSrc < pSwitcher->cbCode);
672 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
673 *uSrc.pu32 = R0PtrCode + offTrg;
674 break;
675 }
676
677# if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
678 /*
679 * 64-bit HC Code Selector (no argument).
680 */
681 case FIX_HC_64BIT_CS:
682 {
683 Assert(offSrc < pSwitcher->cbCode);
684# if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
685 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
686# else
687 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
688# endif
689 break;
690 }
691
692 /*
693 * 64-bit HC pointer to the CPUM instance data (no argument).
694 */
695 case FIX_HC_64BIT_CPUM:
696 {
697 Assert(offSrc < pSwitcher->cbCode);
698 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
699 break;
700 }
701# endif
702 /*
703 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
704 */
705 case FIX_HC_64BIT:
706 {
707 uint32_t offTrg = *u.pu32++;
708 Assert(offSrc < pSwitcher->cbCode);
709 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
710 *uSrc.pu64 = R0PtrCode + offTrg;
711 break;
712 }
713
714# ifdef RT_ARCH_X86
715 case FIX_GC_64_BIT_CPUM_OFF:
716 {
717 uint32_t offCPUM = *u.pu32++;
718 Assert(offCPUM < sizeof(pVM->cpum));
719 *uSrc.pu64 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->cpum) + offCPUM);
720 break;
721 }
722# endif
723
724 /*
725 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
726 */
727 case FIX_ID_32BIT:
728 {
729 uint32_t offTrg = *u.pu32++;
730 Assert(offSrc < pSwitcher->cbCode);
731 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
732 *uSrc.pu32 = u32IDCode + offTrg;
733 break;
734 }
735
736 /*
737 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
738 */
739 case FIX_ID_64BIT:
740 case FIX_HC_64BIT_NOCHECK:
741 {
742 uint32_t offTrg = *u.pu32++;
743 Assert(offSrc < pSwitcher->cbCode);
744 Assert(u8 == FIX_HC_64BIT_NOCHECK || offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
745 *uSrc.pu64 = u32IDCode + offTrg;
746 break;
747 }
748
749 /*
750 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
751 */
752 case FIX_ID_FAR32_TO_64BIT_MODE:
753 {
754 uint32_t offTrg = *u.pu32++;
755 Assert(offSrc < pSwitcher->cbCode);
756 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
757 *uSrc.pu32++ = u32IDCode + offTrg;
758 *uSrc.pu16 = SelCS64;
759 AssertRelease(SelCS64);
760 break;
761 }
762
763# ifdef VBOX_WITH_NMI
764 /*
765 * 32-bit address to the APIC base.
766 */
767 case FIX_GC_APIC_BASE_32BIT:
768 {
769 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
770 break;
771 }
772# endif
773
774 default:
775 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
776 break;
777 }
778 }
779
780# ifdef LOG_ENABLED
781 /*
782 * If Log2 is enabled disassemble the switcher code.
783 *
784 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
785 */
786 if (LogIs2Enabled())
787 {
788 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
789 " R0PtrCode = %p\n"
790 " pu8CodeR3 = %p\n"
791 " GCPtrCode = %RGv\n"
792 " u32IDCode = %08x\n"
793 " pVMRC = %RRv\n"
794 " pCPUMRC = %RRv\n"
795 " pVMR3 = %p\n"
796 " pCPUMR3 = %p\n"
797 " GCPtrGDT = %RGv\n"
798 " InterCR3s = %08RHp, %08RHp, %08RHp (32-Bit, PAE, AMD64)\n"
799 " HyperCR3s = %08RHp (32-Bit, PAE & AMD64)\n"
800 " SelCS = %04x\n"
801 " SelDS = %04x\n"
802 " SelCS64 = %04x\n"
803 " SelTSS = %04x\n",
804 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
805 R0PtrCode,
806 pu8CodeR3,
807 GCPtrCode,
808 u32IDCode,
809 VM_RC_ADDR(pVM, pVM),
810 VM_RC_ADDR(pVM, &pVM->cpum),
811 pVM,
812 &pVM->cpum,
813 GCPtrGDT,
814 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
815 PGMGetHyperCR3(VMMGetCpu(pVM)),
816 SelCS, SelDS, SelCS64, SelTSS);
817
818 uint32_t offCode = 0;
819 while (offCode < pSwitcher->cbCode)
820 {
821 /*
822 * Figure out where this is.
823 */
824 const char *pszDesc = NULL;
825 RTUINTPTR uBase;
826 uint32_t cbCode;
827 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
828 {
829 pszDesc = "HCCode0";
830 uBase = R0PtrCode;
831 offCode = pSwitcher->offHCCode0;
832 cbCode = pSwitcher->cbHCCode0;
833 }
834 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
835 {
836 pszDesc = "HCCode1";
837 uBase = R0PtrCode;
838 offCode = pSwitcher->offHCCode1;
839 cbCode = pSwitcher->cbHCCode1;
840 }
841 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
842 {
843 pszDesc = "GCCode";
844 uBase = GCPtrCode;
845 offCode = pSwitcher->offGCCode;
846 cbCode = pSwitcher->cbGCCode;
847 }
848 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
849 {
850 pszDesc = "IDCode0";
851 uBase = u32IDCode;
852 offCode = pSwitcher->offIDCode0;
853 cbCode = pSwitcher->cbIDCode0;
854 }
855 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
856 {
857 pszDesc = "IDCode1";
858 uBase = u32IDCode;
859 offCode = pSwitcher->offIDCode1;
860 cbCode = pSwitcher->cbIDCode1;
861 }
862 else
863 {
864 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
865 offCode, pu8CodeR3[offCode], RT_C_IS_PRINT(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
866 offCode++;
867 continue;
868 }
869
870 /*
871 * Disassemble it.
872 */
873 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
874
875 while (cbCode > 0)
876 {
877 /* try label it */
878 if (pSwitcher->offR0ToRawMode == offCode)
879 RTLogPrintf(" *R0ToRawMode:\n");
880 if (pSwitcher->offRCToHost == offCode)
881 RTLogPrintf(" *RCToHost:\n");
882 if (pSwitcher->offRCCallTrampoline == offCode)
883 RTLogPrintf(" *RCCallTrampoline:\n");
884 if (pSwitcher->offRCToHostAsm == offCode)
885 RTLogPrintf(" *RCToHostAsm:\n");
886 if (pSwitcher->offRCToHostAsmNoReturn == offCode)
887 RTLogPrintf(" *RCToHostAsmNoReturn:\n");
888
889 /* disas */
890 uint32_t cbInstr = 0;
891 DISCPUSTATE Cpu;
892 char szDisas[256];
893 int rc = DISInstr(pu8CodeR3 + offCode, DISCPUMODE_32BIT, &Cpu, &cbInstr);
894 if (RT_SUCCESS(rc))
895 {
896 Cpu.uInstrAddr += uBase - (uintptr_t)pu8CodeR3;
897 DISFormatYasmEx(&Cpu, szDisas, sizeof(szDisas),
898 DIS_FMT_FLAGS_ADDR_LEFT | DIS_FMT_FLAGS_BYTES_LEFT | DIS_FMT_FLAGS_BYTES_SPACED
899 | DIS_FMT_FLAGS_RELATIVE_BRANCH,
900 NULL, NULL);
901 }
902 if (RT_SUCCESS(rc))
903 RTLogPrintf(" %04x: %s\n", offCode, szDisas);
904 else
905 {
906 RTLogPrintf(" %04x: %02x '%c' (rc=%Rrc\n",
907 offCode, pu8CodeR3[offCode], RT_C_IS_PRINT(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ', rc);
908 cbInstr = 1;
909 }
910 offCode += cbInstr;
911 cbCode -= RT_MIN(cbInstr, cbCode);
912 }
913 }
914 }
915# endif
916}
917
918/**
919 * Wrapper around SELMGetHyperGDT() that avoids calling it when raw-mode context
920 * is not initialized.
921 *
922 * @returns Raw-mode contet GDT address. Null pointer if not applicable.
923 * @param pVM The cross context VM structure.
924 */
925static RTRCPTR vmmR3SwitcherGetHyperGDT(PVM pVM)
926{
927 if (HMIsRawModeCtxNeeded(pVM))
928 return SELMGetHyperGDT(pVM);
929# if HC_ARCH_BITS != 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
930 AssertFailed(); /* This path is only applicable to some 32-bit hosts. */
931# endif
932 return NIL_RTRCPTR;
933}
934
935/**
936 * Relocator for the 32-Bit to 32-Bit world switcher.
937 */
938DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
939{
940 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
941 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
942}
943
944
945/**
946 * Relocator for the 32-Bit to PAE world switcher.
947 */
948DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
949{
950 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
951 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
952}
953
954
955/**
956 * Relocator for the 32-Bit to AMD64 world switcher.
957 */
958DECLCALLBACK(void) vmmR3Switcher32BitToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
959{
960 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
961 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), vmmR3SwitcherGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
962}
963
964
965/**
966 * Relocator for the PAE to 32-Bit world switcher.
967 */
968DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
969{
970 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
971 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
972}
973
974
975/**
976 * Relocator for the PAE to PAE world switcher.
977 */
978DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
979{
980 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
981 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
982}
983
984/**
985 * Relocator for the PAE to AMD64 world switcher.
986 */
987DECLCALLBACK(void) vmmR3SwitcherPAEToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
988{
989 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
990 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), vmmR3SwitcherGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
991}
992
993
994/**
995 * Relocator for the AMD64 to 32-bit world switcher.
996 */
997DECLCALLBACK(void) vmmR3SwitcherAMD64To32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
998{
999 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
1000 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1001}
1002
1003
1004/**
1005 * Relocator for the AMD64 to PAE world switcher.
1006 */
1007DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1008{
1009 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
1010 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1011}
1012
1013
1014/**
1015 * Selects the switcher to be used for switching to raw-mode context.
1016 *
1017 * @returns VBox status code.
1018 * @param pVM Pointer to the VM.
1019 * @param enmSwitcher The new switcher.
1020 * @remark This function may be called before the VMM is initialized.
1021 */
1022VMMR3_INT_DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1023{
1024 /*
1025 * Validate input.
1026 */
1027 if ( enmSwitcher < VMMSWITCHER_INVALID
1028 || enmSwitcher >= VMMSWITCHER_MAX)
1029 {
1030 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1031 return VERR_INVALID_PARAMETER;
1032 }
1033
1034 /*
1035 * Override it if HM is active.
1036 */
1037 if (HMIsEnabled(pVM))
1038 pVM->vmm.s.enmSwitcher = HC_ARCH_BITS == 64 ? VMMSWITCHER_AMD64_STUB : VMMSWITCHER_X86_STUB;
1039
1040 /*
1041 * Select the new switcher.
1042 */
1043 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
1044 PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher];
1045 if (pSwitcher)
1046 {
1047 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
1048 pVM->vmm.s.enmSwitcher = enmSwitcher;
1049
1050 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
1051 pVM->vmm.s.pfnR0ToRawMode = pbCodeR0 + pSwitcher->offR0ToRawMode;
1052
1053 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1054 pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost;
1055 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline;
1056 pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm;
1057 pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn;
1058 return VINF_SUCCESS;
1059 }
1060
1061 return VERR_NOT_IMPLEMENTED;
1062}
1063
1064#endif /* #defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) */
1065
1066
1067/**
1068 * Gets the switcher to be used for switching to GC.
1069 *
1070 * @returns host to guest ring 0 switcher entrypoint
1071 * @param pVM Pointer to the VM.
1072 * @param enmSwitcher The new switcher.
1073 */
1074VMMR3_INT_DECL(RTR0PTR) VMMR3GetHostToGuestSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1075{
1076 /*
1077 * Validate input.
1078 */
1079 AssertMsgReturn( enmSwitcher == VMMSWITCHER_32_TO_AMD64
1080 || enmSwitcher == VMMSWITCHER_PAE_TO_AMD64,
1081 ("%d\n", enmSwitcher),
1082 NIL_RTR0PTR);
1083 AssertReturn(HMIsEnabled(pVM), NIL_RTR0PTR);
1084
1085 /*
1086 * Select the new switcher.
1087 */
1088 const PVMMSWITCHERDEF *papSwitchers = g_apHmSwitchers;
1089 PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher];
1090 if (pSwitcher)
1091 {
1092 /** @todo fix the pvCoreCodeR0 type */
1093 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1094 return pbCodeR0 + pSwitcher->offR0ToRawMode;
1095 }
1096 return NIL_RTR0PTR;
1097}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette