VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher.cpp@ 13813

Last change on this file since 13813 was 13813, checked in by vboxsync, 16 years ago

#1865: Final VMM cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 34.2 KB
Line 
1/* $Id: VMMSwitcher.cpp 13813 2008-11-04 21:55:34Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor, World Switcher(s).
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VMM
26#include <VBox/vmm.h>
27#include <VBox/pgm.h>
28#include <VBox/selm.h>
29#include <VBox/mm.h>
30#include <VBox/sup.h>
31#include "VMMInternal.h"
32#include "VMMSwitcher/VMMSwitcher.h"
33#include <VBox/vm.h>
34#include <VBox/dis.h>
35
36#include <VBox/err.h>
37#include <VBox/param.h>
38#include <iprt/assert.h>
39#include <iprt/alloc.h>
40#include <iprt/asm.h>
41#include <iprt/string.h>
42#include <iprt/ctype.h>
43
44
45/*******************************************************************************
46* Global Variables *
47*******************************************************************************/
48/** Array of switcher defininitions.
49 * The type and index shall match!
50 */
51static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
52{
53 NULL, /* invalid entry */
54#ifndef RT_ARCH_AMD64
55 &vmmR3Switcher32BitTo32Bit_Def,
56 &vmmR3Switcher32BitToPAE_Def,
57 NULL, //&vmmR3Switcher32BitToAMD64_Def,
58 &vmmR3SwitcherPAETo32Bit_Def,
59 &vmmR3SwitcherPAEToPAE_Def,
60 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
61# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
62 &vmmR3SwitcherAMD64ToPAE_Def,
63# else
64 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
65# endif
66 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
67#else /* RT_ARCH_AMD64 */
68 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
69 NULL, //&vmmR3Switcher32BitToPAE_Def,
70 NULL, //&vmmR3Switcher32BitToAMD64_Def,
71 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
72 NULL, //&vmmR3SwitcherPAEToPAE_Def,
73 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
74 &vmmR3SwitcherAMD64ToPAE_Def,
75 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
76#endif /* RT_ARCH_AMD64 */
77};
78
79
80/**
81 * VMMR3Init worker that initiates the switcher code (aka core code).
82 *
83 * This is core per VM code which might need fixups and/or for ease of use are
84 * put on linear contiguous backing.
85 *
86 * @returns VBox status code.
87 * @param pVM Pointer to the shared VM structure.
88 */
89int vmmR3SwitcherInit(PVM pVM)
90{
91 /*
92 * Calc the size.
93 */
94 unsigned cbCoreCode = 0;
95 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
96 {
97 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
98 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
99 if (pSwitcher)
100 {
101 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
102 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
103 }
104 }
105
106 /*
107 * Allocate continguous pages for switchers and deal with
108 * conflicts in the intermediate mapping of the code.
109 */
110 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
111 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
112 int rc = VERR_NO_MEMORY;
113 if (pVM->vmm.s.pvCoreCodeR3)
114 {
115 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
116 if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
117 {
118 /* try more allocations - Solaris, Linux. */
119 const unsigned cTries = 8234;
120 struct VMMInitBadTry
121 {
122 RTR0PTR pvR0;
123 void *pvR3;
124 RTHCPHYS HCPhys;
125 RTUINT cb;
126 } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
127 AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
128 unsigned i = 0;
129 do
130 {
131 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
132 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
133 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
134 i++;
135 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
136 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
137 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
138 if (!pVM->vmm.s.pvCoreCodeR3)
139 break;
140 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
141 } while ( rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
142 && i < cTries - 1);
143
144 /* cleanup */
145 if (VBOX_FAILURE(rc))
146 {
147 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
148 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
149 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
150 paBadTries[i].cb = pVM->vmm.s.cbCoreCode;
151 i++;
152 LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc));
153 }
154 while (i-- > 0)
155 {
156 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n",
157 i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
158 SUPContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
159 }
160 RTMemTmpFree(paBadTries);
161 }
162 }
163 if (VBOX_SUCCESS(rc))
164 {
165 /*
166 * copy the code.
167 */
168 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
169 {
170 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
171 if (pSwitcher)
172 memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
173 pSwitcher->pvCode, pSwitcher->cbCode);
174 }
175
176 /*
177 * Map the code into the GC address space.
178 */
179 RTGCPTR GCPtr;
180 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &GCPtr);
181 if (VBOX_SUCCESS(rc))
182 {
183 pVM->vmm.s.pvCoreCodeRC = GCPtr;
184 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
185 LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VRv Phys=%VHp cb=%#x\n",
186 pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
187
188 /*
189 * Finally, PGM probably have selected a switcher already but we need
190 * to get the routine addresses, so we'll reselect it.
191 * This may legally fail so, we're ignoring the rc.
192 */
193 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
194 return rc;
195 }
196
197 /* shit */
198 AssertMsgFailed(("PGMR3Map(,%VRv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
199 SUPContFree(pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
200 }
201 else
202 VMSetError(pVM, rc, RT_SRC_POS,
203 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
204 cbCoreCode);
205
206 pVM->vmm.s.pvCoreCodeR3 = NULL;
207 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
208 pVM->vmm.s.pvCoreCodeRC = 0;
209 return rc;
210}
211
212
213/**
214 * Relocate the switchers, called by VMMR#Relocate.
215 *
216 * @param pVM Pointer to the shared VM structure.
217 * @param offDelta The relocation delta.
218 */
219void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
220{
221 /*
222 * Relocate all the switchers.
223 */
224 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
225 {
226 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
227 if (pSwitcher && pSwitcher->pfnRelocate)
228 {
229 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
230 pSwitcher->pfnRelocate(pVM,
231 pSwitcher,
232 pVM->vmm.s.pvCoreCodeR0 + off,
233 (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
234 pVM->vmm.s.pvCoreCodeRC + off,
235 pVM->vmm.s.HCPhysCoreCode + off);
236 }
237 }
238
239 /*
240 * Recalc the RC address for the current switcher.
241 */
242 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
243 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
244 pVM->vmm.s.pfnGuestToHostRC = RCPtr + pSwitcher->offGCGuestToHost;
245 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offGCCallTrampoline;
246 pVM->pfnVMMGCGuestToHostAsm = RCPtr + pSwitcher->offGCGuestToHostAsm;
247 pVM->pfnVMMGCGuestToHostAsmHyperCtx = RCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
248 pVM->pfnVMMGCGuestToHostAsmGuestCtx = RCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
249
250}
251
252
253/**
254 * Generic switcher code relocator.
255 *
256 * @param pVM The VM handle.
257 * @param pSwitcher The switcher definition.
258 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
259 * @param R0PtrCode Pointer to the core code block for the switcher, ring-0 mapping.
260 * @param GCPtrCode The guest context address corresponding to pu8Code.
261 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
262 * @param SelCS The hypervisor CS selector.
263 * @param SelDS The hypervisor DS selector.
264 * @param SelTSS The hypervisor TSS selector.
265 * @param GCPtrGDT The GC address of the hypervisor GDT.
266 * @param SelCS64 The 64-bit mode hypervisor CS selector.
267 */
268static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
269 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
270{
271 union
272 {
273 const uint8_t *pu8;
274 const uint16_t *pu16;
275 const uint32_t *pu32;
276 const uint64_t *pu64;
277 const void *pv;
278 uintptr_t u;
279 } u;
280 u.pv = pSwitcher->pvFixups;
281
282 /*
283 * Process fixups.
284 */
285 uint8_t u8;
286 while ((u8 = *u.pu8++) != FIX_THE_END)
287 {
288 /*
289 * Get the source (where to write the fixup).
290 */
291 uint32_t offSrc = *u.pu32++;
292 Assert(offSrc < pSwitcher->cbCode);
293 union
294 {
295 uint8_t *pu8;
296 uint16_t *pu16;
297 uint32_t *pu32;
298 uint64_t *pu64;
299 uintptr_t u;
300 } uSrc;
301 uSrc.pu8 = pu8CodeR3 + offSrc;
302
303 /* The fixup target and method depends on the type. */
304 switch (u8)
305 {
306 /*
307 * 32-bit relative, source in HC and target in GC.
308 */
309 case FIX_HC_2_GC_NEAR_REL:
310 {
311 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
312 uint32_t offTrg = *u.pu32++;
313 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
314 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
315 break;
316 }
317
318 /*
319 * 32-bit relative, source in HC and target in ID.
320 */
321 case FIX_HC_2_ID_NEAR_REL:
322 {
323 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
324 uint32_t offTrg = *u.pu32++;
325 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
326 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (R0PtrCode + offSrc + 4));
327 break;
328 }
329
330 /*
331 * 32-bit relative, source in GC and target in HC.
332 */
333 case FIX_GC_2_HC_NEAR_REL:
334 {
335 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
336 uint32_t offTrg = *u.pu32++;
337 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
338 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (GCPtrCode + offSrc + 4));
339 break;
340 }
341
342 /*
343 * 32-bit relative, source in GC and target in ID.
344 */
345 case FIX_GC_2_ID_NEAR_REL:
346 {
347 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
348 uint32_t offTrg = *u.pu32++;
349 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
350 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
351 break;
352 }
353
354 /*
355 * 32-bit relative, source in ID and target in HC.
356 */
357 case FIX_ID_2_HC_NEAR_REL:
358 {
359 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
360 uint32_t offTrg = *u.pu32++;
361 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
362 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (u32IDCode + offSrc + 4));
363 break;
364 }
365
366 /*
367 * 32-bit relative, source in ID and target in HC.
368 */
369 case FIX_ID_2_GC_NEAR_REL:
370 {
371 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
372 uint32_t offTrg = *u.pu32++;
373 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
374 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
375 break;
376 }
377
378 /*
379 * 16:32 far jump, target in GC.
380 */
381 case FIX_GC_FAR32:
382 {
383 uint32_t offTrg = *u.pu32++;
384 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
385 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
386 *uSrc.pu16++ = SelCS;
387 break;
388 }
389
390 /*
391 * Make 32-bit GC pointer given CPUM offset.
392 */
393 case FIX_GC_CPUM_OFF:
394 {
395 uint32_t offCPUM = *u.pu32++;
396 Assert(offCPUM < sizeof(pVM->cpum));
397 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM);
398 break;
399 }
400
401 /*
402 * Make 32-bit GC pointer given VM offset.
403 */
404 case FIX_GC_VM_OFF:
405 {
406 uint32_t offVM = *u.pu32++;
407 Assert(offVM < sizeof(VM));
408 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM);
409 break;
410 }
411
412 /*
413 * Make 32-bit HC pointer given CPUM offset.
414 */
415 case FIX_HC_CPUM_OFF:
416 {
417 uint32_t offCPUM = *u.pu32++;
418 Assert(offCPUM < sizeof(pVM->cpum));
419 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
420 break;
421 }
422
423 /*
424 * Make 32-bit R0 pointer given VM offset.
425 */
426 case FIX_HC_VM_OFF:
427 {
428 uint32_t offVM = *u.pu32++;
429 Assert(offVM < sizeof(VM));
430 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
431 break;
432 }
433
434 /*
435 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
436 */
437 case FIX_INTER_32BIT_CR3:
438 {
439
440 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
441 break;
442 }
443
444 /*
445 * Store the PAE CR3 (32-bit) for the intermediate memory context.
446 */
447 case FIX_INTER_PAE_CR3:
448 {
449
450 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
451 break;
452 }
453
454 /*
455 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
456 */
457 case FIX_INTER_AMD64_CR3:
458 {
459
460 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
461 break;
462 }
463
464 /*
465 * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
466 */
467 case FIX_HYPER_32BIT_CR3:
468 {
469
470 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
471 break;
472 }
473
474 /*
475 * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
476 */
477 case FIX_HYPER_PAE_CR3:
478 {
479
480 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
481 break;
482 }
483
484 /*
485 * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
486 */
487 case FIX_HYPER_AMD64_CR3:
488 {
489
490 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
491 break;
492 }
493
494 /*
495 * Store Hypervisor CS (16-bit).
496 */
497 case FIX_HYPER_CS:
498 {
499 *uSrc.pu16 = SelCS;
500 break;
501 }
502
503 /*
504 * Store Hypervisor DS (16-bit).
505 */
506 case FIX_HYPER_DS:
507 {
508 *uSrc.pu16 = SelDS;
509 break;
510 }
511
512 /*
513 * Store Hypervisor TSS (16-bit).
514 */
515 case FIX_HYPER_TSS:
516 {
517 *uSrc.pu16 = SelTSS;
518 break;
519 }
520
521 /*
522 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
523 */
524 case FIX_GC_TSS_GDTE_DW2:
525 {
526 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
527 *uSrc.pu32 = (uint32_t)GCPtr;
528 break;
529 }
530
531
532 ///@todo case FIX_CR4_MASK:
533 ///@todo case FIX_CR4_OSFSXR:
534
535 /*
536 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
537 */
538 case FIX_NO_FXSAVE_JMP:
539 {
540 uint32_t offTrg = *u.pu32++;
541 Assert(offTrg < pSwitcher->cbCode);
542 if (!CPUMSupportsFXSR(pVM))
543 {
544 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
545 *uSrc.pu32++ = offTrg - (offSrc + 5);
546 }
547 else
548 {
549 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
550 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
551 }
552 break;
553 }
554
555 /*
556 * Insert relative jump to specified target it SYSENTER isn't used by the host.
557 */
558 case FIX_NO_SYSENTER_JMP:
559 {
560 uint32_t offTrg = *u.pu32++;
561 Assert(offTrg < pSwitcher->cbCode);
562 if (!CPUMIsHostUsingSysEnter(pVM))
563 {
564 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
565 *uSrc.pu32++ = offTrg - (offSrc + 5);
566 }
567 else
568 {
569 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
570 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
571 }
572 break;
573 }
574
575 /*
576 * Insert relative jump to specified target it SYSENTER isn't used by the host.
577 */
578 case FIX_NO_SYSCALL_JMP:
579 {
580 uint32_t offTrg = *u.pu32++;
581 Assert(offTrg < pSwitcher->cbCode);
582 if (!CPUMIsHostUsingSysEnter(pVM))
583 {
584 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
585 *uSrc.pu32++ = offTrg - (offSrc + 5);
586 }
587 else
588 {
589 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
590 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
591 }
592 break;
593 }
594
595 /*
596 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
597 */
598 case FIX_HC_32BIT:
599 {
600 uint32_t offTrg = *u.pu32++;
601 Assert(offSrc < pSwitcher->cbCode);
602 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
603 *uSrc.pu32 = R0PtrCode + offTrg;
604 break;
605 }
606
607#if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
608 /*
609 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
610 */
611 case FIX_HC_64BIT:
612 {
613 uint32_t offTrg = *u.pu32++;
614 Assert(offSrc < pSwitcher->cbCode);
615 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
616 *uSrc.pu64 = R0PtrCode + offTrg;
617 break;
618 }
619
620 /*
621 * 64-bit HC Code Selector (no argument).
622 */
623 case FIX_HC_64BIT_CS:
624 {
625 Assert(offSrc < pSwitcher->cbCode);
626#if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
627 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
628#else
629 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
630#endif
631 break;
632 }
633
634 /*
635 * 64-bit HC pointer to the CPUM instance data (no argument).
636 */
637 case FIX_HC_64BIT_CPUM:
638 {
639 Assert(offSrc < pSwitcher->cbCode);
640 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
641 break;
642 }
643#endif
644
645 /*
646 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
647 */
648 case FIX_ID_32BIT:
649 {
650 uint32_t offTrg = *u.pu32++;
651 Assert(offSrc < pSwitcher->cbCode);
652 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
653 *uSrc.pu32 = u32IDCode + offTrg;
654 break;
655 }
656
657 /*
658 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
659 */
660 case FIX_ID_64BIT:
661 {
662 uint32_t offTrg = *u.pu32++;
663 Assert(offSrc < pSwitcher->cbCode);
664 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
665 *uSrc.pu64 = u32IDCode + offTrg;
666 break;
667 }
668
669 /*
670 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
671 */
672 case FIX_ID_FAR32_TO_64BIT_MODE:
673 {
674 uint32_t offTrg = *u.pu32++;
675 Assert(offSrc < pSwitcher->cbCode);
676 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
677 *uSrc.pu32++ = u32IDCode + offTrg;
678 *uSrc.pu16 = SelCS64;
679 AssertRelease(SelCS64);
680 break;
681 }
682
683#ifdef VBOX_WITH_NMI
684 /*
685 * 32-bit address to the APIC base.
686 */
687 case FIX_GC_APIC_BASE_32BIT:
688 {
689 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
690 break;
691 }
692#endif
693
694 default:
695 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
696 break;
697 }
698 }
699
700#ifdef LOG_ENABLED
701 /*
702 * If Log2 is enabled disassemble the switcher code.
703 *
704 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
705 */
706 if (LogIs2Enabled())
707 {
708 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
709 " R0PtrCode = %p\n"
710 " pu8CodeR3 = %p\n"
711 " GCPtrCode = %VGv\n"
712 " u32IDCode = %08x\n"
713 " pVMGC = %VGv\n"
714 " pCPUMGC = %VGv\n"
715 " pVMHC = %p\n"
716 " pCPUMHC = %p\n"
717 " GCPtrGDT = %VGv\n"
718 " InterCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
719 " HyperCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
720 " SelCS = %04x\n"
721 " SelDS = %04x\n"
722 " SelCS64 = %04x\n"
723 " SelTSS = %04x\n",
724 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
725 R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM),
726 VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum,
727 GCPtrGDT,
728 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
729 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
730 SelCS, SelDS, SelCS64, SelTSS);
731
732 uint32_t offCode = 0;
733 while (offCode < pSwitcher->cbCode)
734 {
735 /*
736 * Figure out where this is.
737 */
738 const char *pszDesc = NULL;
739 RTUINTPTR uBase;
740 uint32_t cbCode;
741 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
742 {
743 pszDesc = "HCCode0";
744 uBase = R0PtrCode;
745 offCode = pSwitcher->offHCCode0;
746 cbCode = pSwitcher->cbHCCode0;
747 }
748 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
749 {
750 pszDesc = "HCCode1";
751 uBase = R0PtrCode;
752 offCode = pSwitcher->offHCCode1;
753 cbCode = pSwitcher->cbHCCode1;
754 }
755 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
756 {
757 pszDesc = "GCCode";
758 uBase = GCPtrCode;
759 offCode = pSwitcher->offGCCode;
760 cbCode = pSwitcher->cbGCCode;
761 }
762 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
763 {
764 pszDesc = "IDCode0";
765 uBase = u32IDCode;
766 offCode = pSwitcher->offIDCode0;
767 cbCode = pSwitcher->cbIDCode0;
768 }
769 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
770 {
771 pszDesc = "IDCode1";
772 uBase = u32IDCode;
773 offCode = pSwitcher->offIDCode1;
774 cbCode = pSwitcher->cbIDCode1;
775 }
776 else
777 {
778 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
779 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
780 offCode++;
781 continue;
782 }
783
784 /*
785 * Disassemble it.
786 */
787 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
788 DISCPUSTATE Cpu;
789
790 memset(&Cpu, 0, sizeof(Cpu));
791 Cpu.mode = CPUMODE_32BIT;
792 while (cbCode > 0)
793 {
794 /* try label it */
795 if (pSwitcher->offR0HostToGuest == offCode)
796 RTLogPrintf(" *R0HostToGuest:\n");
797 if (pSwitcher->offGCGuestToHost == offCode)
798 RTLogPrintf(" *GCGuestToHost:\n");
799 if (pSwitcher->offGCCallTrampoline == offCode)
800 RTLogPrintf(" *GCCallTrampoline:\n");
801 if (pSwitcher->offGCGuestToHostAsm == offCode)
802 RTLogPrintf(" *GCGuestToHostAsm:\n");
803 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
804 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
805 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
806 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
807
808 /* disas */
809 uint32_t cbInstr = 0;
810 char szDisas[256];
811 if (RT_SUCCESS(DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas)))
812 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
813 else
814 {
815 RTLogPrintf(" %04x: %02x '%c'\n",
816 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
817 cbInstr = 1;
818 }
819 offCode += cbInstr;
820 cbCode -= RT_MIN(cbInstr, cbCode);
821 }
822 }
823 }
824#endif
825}
826
827
828/**
829 * Relocator for the 32-Bit to 32-Bit world switcher.
830 */
831DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
832{
833 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
834 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
835}
836
837
838/**
839 * Relocator for the 32-Bit to PAE world switcher.
840 */
841DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
842{
843 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
844 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
845}
846
847
848/**
849 * Relocator for the PAE to 32-Bit world switcher.
850 */
851DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
852{
853 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
854 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
855}
856
857
858/**
859 * Relocator for the PAE to PAE world switcher.
860 */
861DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
862{
863 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
864 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
865}
866
867
868/**
869 * Relocator for the AMD64 to PAE world switcher.
870 */
871DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
872{
873 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
874 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
875}
876
877
878/**
879 * Selects the switcher to be used for switching to GC.
880 *
881 * @returns VBox status code.
882 * @param pVM VM handle.
883 * @param enmSwitcher The new switcher.
884 * @remark This function may be called before the VMM is initialized.
885 */
886VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
887{
888 /*
889 * Validate input.
890 */
891 if ( enmSwitcher < VMMSWITCHER_INVALID
892 || enmSwitcher >= VMMSWITCHER_MAX)
893 {
894 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
895 return VERR_INVALID_PARAMETER;
896 }
897
898 /* Do nothing if the switcher is disabled. */
899 if (pVM->vmm.s.fSwitcherDisabled)
900 return VINF_SUCCESS;
901
902 /*
903 * Select the new switcher.
904 */
905 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
906 if (pSwitcher)
907 {
908 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
909 pVM->vmm.s.enmSwitcher = enmSwitcher;
910
911 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
912 pVM->vmm.s.pfnHostToGuestR0 = pbCodeR0 + pSwitcher->offR0HostToGuest;
913
914 RTGCPTR GCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher];
915 pVM->vmm.s.pfnGuestToHostRC = GCPtr + pSwitcher->offGCGuestToHost;
916 pVM->vmm.s.pfnCallTrampolineRC = GCPtr + pSwitcher->offGCCallTrampoline;
917 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
918 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
919 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
920 return VINF_SUCCESS;
921 }
922
923 return VERR_NOT_IMPLEMENTED;
924}
925
926
927/**
928 * Disable the switcher logic permanently.
929 *
930 * @returns VBox status code.
931 * @param pVM VM handle.
932 */
933VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
934{
935/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
936 * @code
937 * mov eax, VERR_INTERNAL_ERROR
938 * ret
939 * @endcode
940 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
941 */
942 pVM->vmm.s.fSwitcherDisabled = true;
943 return VINF_SUCCESS;
944}
945
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette