VirtualBox

source: vbox/trunk/src/VBox/VMM/VMM.cpp@ 11280

Last change on this file since 11280 was 11141, checked in by vboxsync, 16 years ago

VMM: Try harder allocating the core code page(s). Solaris frequently throws up series of bad pages here, very little we can do about it when there is lots of memory on the system (without having to relocate during init).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 99.3 KB
Line 
1/* $Id: VMM.cpp 11141 2008-08-05 17:16:08Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22//#define NO_SUPCALLR0VMM
23
24/** @page pg_vmm VMM - The Virtual Machine Monitor
25 *
26 * !Revise this! It's already incorrect!
27 *
28 * The Virtual Machine Monitor (VMM) is the core of the virtual machine. It
29 * manages the alternate reality; controlling the virtualization, managing
30 * resources, tracking CPU state, it's resources and so on...
31 *
32 * We will split the VMM into smaller entities:
33 *
34 * - Virtual Machine Core Monitor (VMCM), which purpose it is to
35 * provide ring and world switching, that including routing
36 * interrupts to the host OS and traps to the appropriate trap
37 * handlers. It will implement an external interface for
38 * managing trap handlers.
39 *
40 * - CPU Monitor (CM), tracking the state of the CPU (in the alternate
41 * reality) and implementing external interfaces to read and change
42 * the state.
43 *
44 * - Memory Monitor (MM), which purpose it is to virtualize physical
45 * pages, segment descriptor tables, interrupt descriptor tables, task
46 * segments, and keep track of all memory providing external interfaces
47 * to access content and map pages. (Internally splitt into smaller entities!)
48 *
49 * - IO Monitor (IOM), which virtualizes in and out I/O operations. It
50 * interacts with the MM to implement memory mapped I/O. External
51 * interfaces for adding and removing I/O ranges are implemented.
52 *
53 * - External Interrupt Monitor (EIM), which purpose it is to manage
54 * interrupts generated by virtual devices. This monitor provides
55 * an interfaces for raising interrupts which is accessible at any
56 * time and from all thread.
57 * <p>
58 * A subentity of the EIM is the vitual Programmable Interrupt
59 * Controller Device (VPICD), and perhaps a virtual I/O Advanced
60 * Programmable Interrupt Controller Device (VAPICD).
61 *
62 * - Direct Memory Access Monitor (DMAM), which purpose it is to support
63 * virtual device using the DMA controller. Interfaces must be as the
64 * EIM interfaces independent and threadable.
65 * <p>
66 * A subentity of the DMAM is a virtual DMA Controller Device (VDMACD).
67 *
68 *
69 * Entities working on a higher level:
70 *
71 * - Device Manager (DM), which is a support facility for virtualized
72 * hardware. This provides generic facilities for efficient device
73 * virtualization. It will manage device attaching and detaching
74 * conversing with EIM and IOM.
75 *
76 * - Debugger Facility (DBGF) provides the basic features for
77 * debugging the alternate reality execution.
78 *
79 *
80 *
81 * @section pg_vmm_s_use_cases Use Cases
82 *
83 * @subsection pg_vmm_s_use_case_boot Bootstrap
84 *
85 * - Basic Init:
86 * - Init SUPDRV.
87 *
88 * - Init Virtual Machine Instance:
89 * - Load settings.
90 * - Check resource requirements (memory, com, stuff).
91 *
92 * - Init Host Ring 3 part:
93 * - Init Core code.
94 * - Load Pluggable Components.
95 * - Init Pluggable Components.
96 *
97 * - Init Host Ring 0 part:
98 * - Load Core (core = core components like VMM, RMI, CA, and so on) code.
99 * - Init Core code.
100 * - Load Pluggable Component code.
101 * - Init Pluggable Component code.
102 *
103 * - Allocate first chunk of memory and pin it down. This block of memory
104 * will fit the following pieces:
105 * - Virtual Machine Instance data. (Config, CPU state, VMM state, ++)
106 * (This is available from everywhere (at different addresses though)).
107 * - VMM Guest Context code.
108 * - Pluggable devices Guest Context code.
109 * - Page tables (directory and everything) for the VMM Guest
110 *
111 * - Setup Guest (Ring 0) part:
112 * - Setup initial page tables (i.e. directory all the stuff).
113 * - Load Core Guest Context code.
114 * - Load Pluggable Devices Guest Context code.
115 *
116 *
117 */
118
119
120/*******************************************************************************
121* Header Files *
122*******************************************************************************/
123#define LOG_GROUP LOG_GROUP_VMM
124#include <VBox/vmm.h>
125#include <VBox/vmapi.h>
126#include <VBox/pgm.h>
127#include <VBox/cfgm.h>
128#include <VBox/pdmqueue.h>
129#include <VBox/pdmapi.h>
130#include <VBox/cpum.h>
131#include <VBox/mm.h>
132#include <VBox/iom.h>
133#include <VBox/trpm.h>
134#include <VBox/selm.h>
135#include <VBox/em.h>
136#include <VBox/sup.h>
137#include <VBox/dbgf.h>
138#include <VBox/csam.h>
139#include <VBox/patm.h>
140#include <VBox/rem.h>
141#include <VBox/ssm.h>
142#include <VBox/tm.h>
143#include "VMMInternal.h"
144#include "VMMSwitcher/VMMSwitcher.h"
145#include <VBox/vm.h>
146#include <VBox/err.h>
147#include <VBox/param.h>
148#include <VBox/version.h>
149#include <VBox/x86.h>
150#include <VBox/hwaccm.h>
151#include <iprt/assert.h>
152#include <iprt/alloc.h>
153#include <iprt/asm.h>
154#include <iprt/time.h>
155#include <iprt/stream.h>
156#include <iprt/string.h>
157#include <iprt/stdarg.h>
158#include <iprt/ctype.h>
159
160
161
162/** The saved state version. */
163#define VMM_SAVED_STATE_VERSION 3
164
165
166/*******************************************************************************
167* Internal Functions *
168*******************************************************************************/
169static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
170static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
171static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
172static int vmmR3ServiceCallHostRequest(PVM pVM);
173static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
174
175
176/*******************************************************************************
177* Global Variables *
178*******************************************************************************/
179/** Array of switcher defininitions.
180 * The type and index shall match!
181 */
182static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
183{
184 NULL, /* invalid entry */
185#ifndef RT_ARCH_AMD64
186 &vmmR3Switcher32BitTo32Bit_Def,
187 &vmmR3Switcher32BitToPAE_Def,
188 NULL, //&vmmR3Switcher32BitToAMD64_Def,
189 &vmmR3SwitcherPAETo32Bit_Def,
190 &vmmR3SwitcherPAEToPAE_Def,
191 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
192# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
193 &vmmR3SwitcherAMD64ToPAE_Def,
194# else
195 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
196# endif
197 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
198#else
199 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
200 NULL, //&vmmR3Switcher32BitToPAE_Def,
201 NULL, //&vmmR3Switcher32BitToAMD64_Def,
202 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
203 NULL, //&vmmR3SwitcherPAEToPAE_Def,
204 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
205 &vmmR3SwitcherAMD64ToPAE_Def,
206 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
207#endif
208};
209
210
211
212/**
213 * Initiates the core code.
214 *
215 * This is core per VM code which might need fixups and/or for ease of use
216 * are put on linear contiguous backing.
217 *
218 * @returns VBox status code.
219 * @param pVM Pointer to VM structure.
220 */
221static int vmmR3InitCoreCode(PVM pVM)
222{
223 /*
224 * Calc the size.
225 */
226 unsigned cbCoreCode = 0;
227 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
228 {
229 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
230 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
231 if (pSwitcher)
232 {
233 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
234 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
235 }
236 }
237
238 /*
239 * Allocate continguous pages for switchers and deal with
240 * conflicts in the intermediate mapping of the code.
241 */
242 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
243 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
244 int rc = VERR_NO_MEMORY;
245 if (pVM->vmm.s.pvHCCoreCodeR3)
246 {
247 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
248 if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
249 {
250 /* try more allocations - Solaris */
251 const unsigned cTries = 4112;
252 struct VMMInitBadTry
253 {
254 RTR0PTR pvR0;
255 void *pvR3;
256 RTHCPHYS HCPhys;
257 RTUINT cb;
258 } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
259 AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
260 unsigned i = 0;
261 do
262 {
263 paBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
264 paBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
265 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
266 i++;
267 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
268 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
269 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
270 if (!pVM->vmm.s.pvHCCoreCodeR3)
271 break;
272 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
273 } while ( rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
274 && i < cTries - 1);
275
276 /* cleanup */
277 if (VBOX_FAILURE(rc))
278 {
279 paBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
280 paBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
281 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
282 paBadTries[i].cb = pVM->vmm.s.cbCoreCode;
283 i++;
284 LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc));
285 }
286 while (i-- > 0)
287 {
288 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n",
289 i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
290 SUPContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
291 }
292 RTMemTmpFree(paBadTries);
293 }
294 }
295 if (VBOX_SUCCESS(rc))
296 {
297 /*
298 * copy the code.
299 */
300 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
301 {
302 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
303 if (pSwitcher)
304 memcpy((uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
305 pSwitcher->pvCode, pSwitcher->cbCode);
306 }
307
308 /*
309 * Map the code into the GC address space.
310 */
311 RTGCPTR GCPtr;
312 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &GCPtr);
313 if (VBOX_SUCCESS(rc))
314 {
315 pVM->vmm.s.pvGCCoreCode = GCPtr;
316 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
317 LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VRv Phys=%VHp cb=%#x\n",
318 pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
319
320 /*
321 * Finally, PGM probably have selected a switcher already but we need
322 * to do get the addresses so we'll reselect it.
323 * This may legally fail so, we're ignoring the rc.
324 */
325 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
326 return rc;
327 }
328
329 /* shit */
330 AssertMsgFailed(("PGMR3Map(,%VRv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
331 SUPContFree(pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
332 }
333 else
334 VMSetError(pVM, rc, RT_SRC_POS,
335 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
336 cbCoreCode);
337
338 pVM->vmm.s.pvHCCoreCodeR3 = NULL;
339 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
340 pVM->vmm.s.pvGCCoreCode = 0;
341 return rc;
342}
343
344
345/**
346 * Initializes the VMM.
347 *
348 * @returns VBox status code.
349 * @param pVM The VM to operate on.
350 */
351VMMR3DECL(int) VMMR3Init(PVM pVM)
352{
353 LogFlow(("VMMR3Init\n"));
354
355 /*
356 * Assert alignment, sizes and order.
357 */
358 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
359 AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s),
360 ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n",
361 sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s)));
362
363 /*
364 * Init basic VM VMM members.
365 */
366 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
367 int rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies);
368 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
369 pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */
370 //pVM->vmm.s.cYieldEveryMillies = 8; //debugging
371 else
372 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Vrc\n", rc), rc);
373
374 /* GC switchers are enabled by default. Turned off by HWACCM. */
375 pVM->vmm.s.fSwitcherDisabled = false;
376
377 /*
378 * Register the saved state data unit.
379 */
380 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
381 NULL, vmmR3Save, NULL,
382 NULL, vmmR3Load, NULL);
383 if (VBOX_FAILURE(rc))
384 return rc;
385
386 /*
387 * Register the Ring-0 VM handle with the session for fast ioctl calls.
388 */
389 rc = SUPSetVMForFastIOCtl(pVM->pVMR0);
390 if (VBOX_FAILURE(rc))
391 return rc;
392
393 /*
394 * Init core code.
395 */
396 rc = vmmR3InitCoreCode(pVM);
397 if (VBOX_SUCCESS(rc))
398 {
399 /*
400 * Allocate & init VMM GC stack.
401 * The stack pages are also used by the VMM R0 when VMMR0CallHost is invoked.
402 * (The page protection is modifed during R3 init completion.)
403 */
404#ifdef VBOX_STRICT_VMM_STACK
405 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
406#else
407 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
408#endif
409 if (VBOX_SUCCESS(rc))
410 {
411 /* Set HC and GC stack pointers to top of stack. */
412 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = (RTR0PTR)pVM->vmm.s.pbHCStack;
413 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
414 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
415 AssertRelease(pVM->vmm.s.pbGCStack);
416
417 /* Set hypervisor eip. */
418 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStack);
419
420 /*
421 * Allocate GC & R0 Logger instances (they are finalized in the relocator).
422 */
423#ifdef LOG_ENABLED
424 PRTLOGGER pLogger = RTLogDefaultInstance();
425 if (pLogger)
426 {
427 pVM->vmm.s.cbLoggerGC = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]);
428 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pLoggerHC);
429 if (VBOX_SUCCESS(rc))
430 {
431 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
432
433/*
434 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup), so
435 * you have to sign up here by adding your defined(DEBUG_<userid>) to the #if.
436 *
437 * If you want to log in non-debug modes, you'll have to remember to change SUPDRvShared.c
438 * to not stub all the log functions.
439 *
440 * You might also wish to enable the AssertMsg1/2 overrides in VMMR0.cpp when enabling this.
441 */
442# if defined(DEBUG_sandervl) || defined(DEBUG_frank)
443 rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]),
444 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0Logger);
445 if (VBOX_SUCCESS(rc))
446 {
447 pVM->vmm.s.pR0Logger->pVM = pVM->pVMR0;
448 //pVM->vmm.s.pR0Logger->fCreated = false;
449 pVM->vmm.s.pR0Logger->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]);
450 }
451# endif
452 }
453 }
454#endif /* LOG_ENABLED */
455
456#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
457 /*
458 * Allocate GC Release Logger instances (finalized in the relocator).
459 */
460 if (VBOX_SUCCESS(rc))
461 {
462 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
463 if (pRelLogger)
464 {
465 pVM->vmm.s.cbRelLoggerGC = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
466 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRelLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRelLoggerHC);
467 if (VBOX_SUCCESS(rc))
468 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
469 }
470 }
471#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
472
473#ifdef VBOX_WITH_NMI
474 /*
475 * Allocate mapping for the host APIC.
476 */
477 if (VBOX_SUCCESS(rc))
478 {
479 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
480 AssertRC(rc);
481 }
482#endif
483 if (VBOX_SUCCESS(rc))
484 {
485 rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock);
486 if (VBOX_SUCCESS(rc))
487 {
488 /*
489 * Debug info.
490 */
491 DBGFR3InfoRegisterInternal(pVM, "ff", "Displays the current Forced actions Flags.", vmmR3InfoFF);
492
493 /*
494 * Statistics.
495 */
496 STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches.");
497 STAM_REG(pVM, &pVM->vmm.s.StatGCRetNormal, STAMTYPE_COUNTER, "/VMM/GCRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
498 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterrupt, STAMTYPE_COUNTER, "/VMM/GCRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
499 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
500 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGuestTrap, STAMTYPE_COUNTER, "/VMM/GCRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
501 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitch, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
502 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
503 STAM_REG(pVM, &pVM->vmm.s.StatGCRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/GCRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");
504 STAM_REG(pVM, &pVM->vmm.s.StatGCRetStaleSelector, STAMTYPE_COUNTER, "/VMM/GCRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
505 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIRETTrap, STAMTYPE_COUNTER, "/VMM/GCRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
506 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
507 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
508 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIORead, STAMTYPE_COUNTER, "/VMM/GCRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns.");
509 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns.");
510 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIORead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns.");
511 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns.");
512 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns.");
513 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
514 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
515 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
516 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
517 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
518 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTSSFault, STAMTYPE_COUNTER, "/VMM/GCRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
519 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDFault, STAMTYPE_COUNTER, "/VMM/GCRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
520 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCSAMTask, STAMTYPE_COUNTER, "/VMM/GCRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
521 STAM_REG(pVM, &pVM->vmm.s.StatGCRetSyncCR3, STAMTYPE_COUNTER, "/VMM/GCRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
522 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMisc, STAMTYPE_COUNTER, "/VMM/GCRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
523 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchInt3, STAMTYPE_COUNTER, "/VMM/GCRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
524 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchPF, STAMTYPE_COUNTER, "/VMM/GCRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
525 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchGP, STAMTYPE_COUNTER, "/VMM/GCRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
526 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/GCRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
527 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPageOverflow, STAMTYPE_COUNTER, "/VMM/GCRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns.");
528 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/GCRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
529 STAM_REG(pVM, &pVM->vmm.s.StatGCRetToR3, STAMTYPE_COUNTER, "/VMM/GCRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
530 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTimerPending, STAMTYPE_COUNTER, "/VMM/GCRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
531 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptPending, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
532 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCallHost, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/Misc", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
533 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMGrowRAM, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/GrowRAM", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
534 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PDMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
535 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLogFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/LogFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
536 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMQueueFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/QueueFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
537 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMPoolGrow",STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
538 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRemReplay, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/REMReplay", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
539 STAM_REG(pVM, &pVM->vmm.s.StatGCRetVMSetError, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/VMSetError", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
540 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
541 STAM_REG(pVM, &pVM->vmm.s.StatGCRetHyperAssertion, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/HyperAssert", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
542 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/GCRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
543 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/GCRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
544 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulHlt, STAMTYPE_COUNTER, "/VMM/GCRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");
545 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPendingRequest, STAMTYPE_COUNTER, "/VMM/GCRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
546
547 return VINF_SUCCESS;
548 }
549 AssertRC(rc);
550 }
551 }
552 /** @todo: Need failure cleanup. */
553
554 //more todo in here?
555 //if (VBOX_SUCCESS(rc))
556 //{
557 //}
558 //int rc2 = vmmR3TermCoreCode(pVM);
559 //AssertRC(rc2));
560 }
561
562 return rc;
563}
564
565
566/**
567 * Ring-3 init finalizing.
568 *
569 * @returns VBox status code.
570 * @param pVM The VM handle.
571 */
572VMMR3DECL(int) VMMR3InitFinalize(PVM pVM)
573{
574#ifdef VBOX_STRICT_VMM_STACK
575 /*
576 * Two inaccessible pages at each sides of the stack to catch over/under-flows.
577 */
578 memset(pVM->vmm.s.pbHCStack - PAGE_SIZE, 0xcc, PAGE_SIZE);
579 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack - PAGE_SIZE), PAGE_SIZE, 0);
580 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
581
582 memset(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
583 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack + VMM_STACK_SIZE), PAGE_SIZE, 0);
584 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
585#endif
586
587 /*
588 * Set page attributes to r/w for stack pages.
589 */
590 int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbGCStack, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
591 AssertRC(rc);
592 if (VBOX_SUCCESS(rc))
593 {
594 /*
595 * Create the EMT yield timer.
596 */
597 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
598 if (VBOX_SUCCESS(rc))
599 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
600 }
601#ifdef VBOX_WITH_NMI
602 /*
603 * Map the host APIC into GC - This may be host os specific!
604 */
605 if (VBOX_SUCCESS(rc))
606 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
607 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
608#endif
609 return rc;
610}
611
612
613/**
614 * Initializes the R0 VMM.
615 *
616 * @returns VBox status code.
617 * @param pVM The VM to operate on.
618 */
619VMMR3DECL(int) VMMR3InitR0(PVM pVM)
620{
621 int rc;
622
623 /*
624 * Initialize the ring-0 logger if we haven't done so yet.
625 */
626 if ( pVM->vmm.s.pR0Logger
627 && !pVM->vmm.s.pR0Logger->fCreated)
628 {
629 rc = VMMR3UpdateLoggers(pVM);
630 if (VBOX_FAILURE(rc))
631 return rc;
632 }
633
634 /*
635 * Call Ring-0 entry with init code.
636 */
637 for (;;)
638 {
639#ifdef NO_SUPCALLR0VMM
640 //rc = VERR_GENERAL_FAILURE;
641 rc = VINF_SUCCESS;
642#else
643 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, VMMGetSvnRev(), NULL);
644#endif
645 if ( pVM->vmm.s.pR0Logger
646 && pVM->vmm.s.pR0Logger->Logger.offScratch > 0)
647 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL);
648 if (rc != VINF_VMM_CALL_HOST)
649 break;
650 rc = vmmR3ServiceCallHostRequest(pVM);
651 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
652 break;
653 /* Resume R0 */
654 }
655
656 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
657 {
658 LogRel(("R0 init failed, rc=%Vra\n", rc));
659 if (VBOX_SUCCESS(rc))
660 rc = VERR_INTERNAL_ERROR;
661 }
662 return rc;
663}
664
665
666/**
667 * Initializes the GC VMM.
668 *
669 * @returns VBox status code.
670 * @param pVM The VM to operate on.
671 */
672VMMR3DECL(int) VMMR3InitGC(PVM pVM)
673{
674 /* In VMX mode, there's no need to init GC. */
675 if (pVM->vmm.s.fSwitcherDisabled)
676 return VINF_SUCCESS;
677
678 /*
679 * Call VMMGCInit():
680 * -# resolve the address.
681 * -# setup stackframe and EIP to use the trampoline.
682 * -# do a generic hypervisor call.
683 */
684 RTGCPTR32 GCPtrEP;
685 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
686 if (VBOX_SUCCESS(rc))
687 {
688 CPUMHyperSetCtxCore(pVM, NULL);
689 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
690 uint64_t u64TS = RTTimeProgramStartNanoTS();
691#if GC_ARCH_BITS == 32
692 CPUMPushHyper(pVM, (uint32_t)(u64TS >> 32)); /* Param 3: The program startup TS - Hi. */
693 CPUMPushHyper(pVM, (uint32_t)u64TS); /* Param 3: The program startup TS - Lo. */
694#else /* 64-bit GC */
695 CPUMPushHyper(pVM, u64TS); /* Param 3: The program startup TS. */
696#endif
697 CPUMPushHyper(pVM, VMMGetSvnRev()); /* Param 2: Version argument. */
698 CPUMPushHyper(pVM, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
699 CPUMPushHyper(pVM, pVM->pVMGC); /* Param 0: pVM */
700 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* trampoline param: stacksize. */
701 CPUMPushHyper(pVM, GCPtrEP); /* Call EIP. */
702 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
703
704 for (;;)
705 {
706#ifdef NO_SUPCALLR0VMM
707 //rc = VERR_GENERAL_FAILURE;
708 rc = VINF_SUCCESS;
709#else
710 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_CALL_HYPERVISOR, NULL);
711#endif
712#ifdef LOG_ENABLED
713 PRTLOGGERRC pLogger = pVM->vmm.s.pLoggerHC;
714 if ( pLogger
715 && pLogger->offScratch > 0)
716 RTLogFlushGC(NULL, pLogger);
717#endif
718#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
719 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRelLoggerHC;
720 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
721 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
722#endif
723 if (rc != VINF_VMM_CALL_HOST)
724 break;
725 rc = vmmR3ServiceCallHostRequest(pVM);
726 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
727 break;
728 }
729
730 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
731 {
732 VMMR3FatalDump(pVM, rc);
733 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
734 rc = VERR_INTERNAL_ERROR;
735 }
736 AssertRC(rc);
737 }
738 return rc;
739}
740
741
742/**
743 * Terminate the VMM bits.
744 *
745 * @returns VINF_SUCCESS.
746 * @param pVM The VM handle.
747 */
748VMMR3DECL(int) VMMR3Term(PVM pVM)
749{
750 /*
751 * Call Ring-0 entry with termination code.
752 */
753 int rc;
754 for (;;)
755 {
756#ifdef NO_SUPCALLR0VMM
757 //rc = VERR_GENERAL_FAILURE;
758 rc = VINF_SUCCESS;
759#else
760 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_TERM, 0, NULL);
761#endif
762 if ( pVM->vmm.s.pR0Logger
763 && pVM->vmm.s.pR0Logger->Logger.offScratch > 0)
764 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL);
765 if (rc != VINF_VMM_CALL_HOST)
766 break;
767 rc = vmmR3ServiceCallHostRequest(pVM);
768 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
769 break;
770 /* Resume R0 */
771 }
772 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
773 {
774 LogRel(("VMMR3Term: R0 term failed, rc=%Vra. (warning)\n", rc));
775 if (VBOX_SUCCESS(rc))
776 rc = VERR_INTERNAL_ERROR;
777 }
778
779#ifdef VBOX_STRICT_VMM_STACK
780 /*
781 * Make the two stack guard pages present again.
782 */
783 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
784 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
785#endif
786 return rc;
787}
788
789
790/**
791 * Applies relocations to data and code managed by this
792 * component. This function will be called at init and
793 * whenever the VMM need to relocate it self inside the GC.
794 *
795 * The VMM will need to apply relocations to the core code.
796 *
797 * @param pVM The VM handle.
798 * @param offDelta The relocation delta.
799 */
800VMMR3DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
801{
802 LogFlow(("VMMR3Relocate: offDelta=%VGv\n", offDelta));
803
804 /*
805 * Recalc the GC address.
806 */
807 pVM->vmm.s.pvGCCoreCode = MMHyperHC2GC(pVM, pVM->vmm.s.pvHCCoreCodeR3);
808
809 /*
810 * The stack.
811 */
812 CPUMSetHyperESP(pVM, CPUMGetHyperESP(pVM) + offDelta);
813 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
814 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
815
816 /*
817 * All the switchers.
818 */
819 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
820 {
821 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
822 if (pSwitcher && pSwitcher->pfnRelocate)
823 {
824 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
825 pSwitcher->pfnRelocate(pVM,
826 pSwitcher,
827 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR0 + off,
828 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + off,
829 pVM->vmm.s.pvGCCoreCode + off,
830 pVM->vmm.s.HCPhysCoreCode + off);
831 }
832 }
833
834 /*
835 * Recalc the GC address for the current switcher.
836 */
837 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
838 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
839 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
840 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
841 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
842 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
843 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
844
845 /*
846 * Get other GC entry points.
847 */
848 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMGCResumeGuest);
849 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Vra\n", rc));
850
851 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMGCResumeGuestV86);
852 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Vra\n", rc));
853
854 /*
855 * Update the logger.
856 */
857 VMMR3UpdateLoggers(pVM);
858}
859
860
861/**
862 * Updates the settings for the GC and R0 loggers.
863 *
864 * @returns VBox status code.
865 * @param pVM The VM handle.
866 */
867VMMR3DECL(int) VMMR3UpdateLoggers(PVM pVM)
868{
869 /*
870 * Simply clone the logger instance (for GC).
871 */
872 int rc = VINF_SUCCESS;
873 RTGCPTR32 GCPtrLoggerFlush = 0;
874
875 if (pVM->vmm.s.pLoggerHC
876#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
877 || pVM->vmm.s.pRelLoggerHC
878#endif
879 )
880 {
881 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &GCPtrLoggerFlush);
882 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Vra\n", rc));
883 }
884
885 if (pVM->vmm.s.pLoggerHC)
886 {
887 RTGCPTR32 GCPtrLoggerWrapper = 0;
888 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &GCPtrLoggerWrapper);
889 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Vra\n", rc));
890 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
891 rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.pLoggerHC, pVM->vmm.s.cbLoggerGC,
892 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
893 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
894 }
895
896#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
897 if (pVM->vmm.s.pRelLoggerHC)
898 {
899 RTGCPTR32 GCPtrLoggerWrapper = 0;
900 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &GCPtrLoggerWrapper);
901 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Vra\n", rc));
902 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
903 rc = RTLogCloneRC(RTLogRelDefaultInstance(), pVM->vmm.s.pRelLoggerHC, pVM->vmm.s.cbRelLoggerGC,
904 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
905 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
906 }
907#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
908
909 /*
910 * For the ring-0 EMT logger, we use a per-thread logger
911 * instance in ring-0. Only initialize it once.
912 */
913 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
914 if (pR0Logger)
915 {
916 if (!pR0Logger->fCreated)
917 {
918 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
919 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
920 AssertReleaseMsgRCReturn(rc, ("VMMLoggerWrapper not found! rc=%Vra\n", rc), rc);
921
922 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
923 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
924 AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Vra\n", rc), rc);
925
926 rc = RTLogCreateForR0(&pR0Logger->Logger, pR0Logger->cbLogger,
927 *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush,
928 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
929 AssertReleaseMsgRCReturn(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc), rc);
930 pR0Logger->fCreated = true;
931 }
932
933 rc = RTLogCopyGroupsAndFlags(&pR0Logger->Logger, NULL /* default */, RTLOGFLAGS_BUFFERED, 0);
934 AssertRC(rc);
935 }
936
937 return rc;
938}
939
940
941/**
942 * Generic switch code relocator.
943 *
944 * @param pVM The VM handle.
945 * @param pSwitcher The switcher definition.
946 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
947 * @param pu8CodeR0 Pointer to the core code block for the switcher, ring-0 mapping.
948 * @param GCPtrCode The guest context address corresponding to pu8Code.
949 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
950 * @param SelCS The hypervisor CS selector.
951 * @param SelDS The hypervisor DS selector.
952 * @param SelTSS The hypervisor TSS selector.
953 * @param GCPtrGDT The GC address of the hypervisor GDT.
954 * @param SelCS64 The 64-bit mode hypervisor CS selector.
955 */
956static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
957 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
958{
959 union
960 {
961 const uint8_t *pu8;
962 const uint16_t *pu16;
963 const uint32_t *pu32;
964 const uint64_t *pu64;
965 const void *pv;
966 uintptr_t u;
967 } u;
968 u.pv = pSwitcher->pvFixups;
969
970 /*
971 * Process fixups.
972 */
973 uint8_t u8;
974 while ((u8 = *u.pu8++) != FIX_THE_END)
975 {
976 /*
977 * Get the source (where to write the fixup).
978 */
979 uint32_t offSrc = *u.pu32++;
980 Assert(offSrc < pSwitcher->cbCode);
981 union
982 {
983 uint8_t *pu8;
984 uint16_t *pu16;
985 uint32_t *pu32;
986 uint64_t *pu64;
987 uintptr_t u;
988 } uSrc;
989 uSrc.pu8 = pu8CodeR3 + offSrc;
990
991 /* The fixup target and method depends on the type. */
992 switch (u8)
993 {
994 /*
995 * 32-bit relative, source in HC and target in GC.
996 */
997 case FIX_HC_2_GC_NEAR_REL:
998 {
999 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1000 uint32_t offTrg = *u.pu32++;
1001 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1002 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
1003 break;
1004 }
1005
1006 /*
1007 * 32-bit relative, source in HC and target in ID.
1008 */
1009 case FIX_HC_2_ID_NEAR_REL:
1010 {
1011 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1012 uint32_t offTrg = *u.pu32++;
1013 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1014 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - ((uintptr_t)pu8CodeR0 + offSrc + 4));
1015 break;
1016 }
1017
1018 /*
1019 * 32-bit relative, source in GC and target in HC.
1020 */
1021 case FIX_GC_2_HC_NEAR_REL:
1022 {
1023 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1024 uint32_t offTrg = *u.pu32++;
1025 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1026 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (GCPtrCode + offSrc + 4));
1027 break;
1028 }
1029
1030 /*
1031 * 32-bit relative, source in GC and target in ID.
1032 */
1033 case FIX_GC_2_ID_NEAR_REL:
1034 {
1035 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1036 uint32_t offTrg = *u.pu32++;
1037 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1038 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
1039 break;
1040 }
1041
1042 /*
1043 * 32-bit relative, source in ID and target in HC.
1044 */
1045 case FIX_ID_2_HC_NEAR_REL:
1046 {
1047 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1048 uint32_t offTrg = *u.pu32++;
1049 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1050 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (u32IDCode + offSrc + 4));
1051 break;
1052 }
1053
1054 /*
1055 * 32-bit relative, source in ID and target in HC.
1056 */
1057 case FIX_ID_2_GC_NEAR_REL:
1058 {
1059 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1060 uint32_t offTrg = *u.pu32++;
1061 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1062 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
1063 break;
1064 }
1065
1066 /*
1067 * 16:32 far jump, target in GC.
1068 */
1069 case FIX_GC_FAR32:
1070 {
1071 uint32_t offTrg = *u.pu32++;
1072 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1073 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
1074 *uSrc.pu16++ = SelCS;
1075 break;
1076 }
1077
1078 /*
1079 * Make 32-bit GC pointer given CPUM offset.
1080 */
1081 case FIX_GC_CPUM_OFF:
1082 {
1083 uint32_t offCPUM = *u.pu32++;
1084 Assert(offCPUM < sizeof(pVM->cpum));
1085 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM);
1086 break;
1087 }
1088
1089 /*
1090 * Make 32-bit GC pointer given VM offset.
1091 */
1092 case FIX_GC_VM_OFF:
1093 {
1094 uint32_t offVM = *u.pu32++;
1095 Assert(offVM < sizeof(VM));
1096 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM);
1097 break;
1098 }
1099
1100 /*
1101 * Make 32-bit HC pointer given CPUM offset.
1102 */
1103 case FIX_HC_CPUM_OFF:
1104 {
1105 uint32_t offCPUM = *u.pu32++;
1106 Assert(offCPUM < sizeof(pVM->cpum));
1107 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
1108 break;
1109 }
1110
1111 /*
1112 * Make 32-bit R0 pointer given VM offset.
1113 */
1114 case FIX_HC_VM_OFF:
1115 {
1116 uint32_t offVM = *u.pu32++;
1117 Assert(offVM < sizeof(VM));
1118 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
1119 break;
1120 }
1121
1122 /*
1123 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
1124 */
1125 case FIX_INTER_32BIT_CR3:
1126 {
1127
1128 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
1129 break;
1130 }
1131
1132 /*
1133 * Store the PAE CR3 (32-bit) for the intermediate memory context.
1134 */
1135 case FIX_INTER_PAE_CR3:
1136 {
1137
1138 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
1139 break;
1140 }
1141
1142 /*
1143 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
1144 */
1145 case FIX_INTER_AMD64_CR3:
1146 {
1147
1148 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
1149 break;
1150 }
1151
1152 /*
1153 * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
1154 */
1155 case FIX_HYPER_32BIT_CR3:
1156 {
1157
1158 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
1159 break;
1160 }
1161
1162 /*
1163 * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
1164 */
1165 case FIX_HYPER_PAE_CR3:
1166 {
1167
1168 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
1169 break;
1170 }
1171
1172 /*
1173 * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
1174 */
1175 case FIX_HYPER_AMD64_CR3:
1176 {
1177
1178 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
1179 break;
1180 }
1181
1182 /*
1183 * Store Hypervisor CS (16-bit).
1184 */
1185 case FIX_HYPER_CS:
1186 {
1187 *uSrc.pu16 = SelCS;
1188 break;
1189 }
1190
1191 /*
1192 * Store Hypervisor DS (16-bit).
1193 */
1194 case FIX_HYPER_DS:
1195 {
1196 *uSrc.pu16 = SelDS;
1197 break;
1198 }
1199
1200 /*
1201 * Store Hypervisor TSS (16-bit).
1202 */
1203 case FIX_HYPER_TSS:
1204 {
1205 *uSrc.pu16 = SelTSS;
1206 break;
1207 }
1208
1209 /*
1210 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
1211 */
1212 case FIX_GC_TSS_GDTE_DW2:
1213 {
1214 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
1215 *uSrc.pu32 = (uint32_t)GCPtr;
1216 break;
1217 }
1218
1219
1220 ///@todo case FIX_CR4_MASK:
1221 ///@todo case FIX_CR4_OSFSXR:
1222
1223 /*
1224 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
1225 */
1226 case FIX_NO_FXSAVE_JMP:
1227 {
1228 uint32_t offTrg = *u.pu32++;
1229 Assert(offTrg < pSwitcher->cbCode);
1230 if (!CPUMSupportsFXSR(pVM))
1231 {
1232 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1233 *uSrc.pu32++ = offTrg - (offSrc + 5);
1234 }
1235 else
1236 {
1237 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1238 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1239 }
1240 break;
1241 }
1242
1243 /*
1244 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1245 */
1246 case FIX_NO_SYSENTER_JMP:
1247 {
1248 uint32_t offTrg = *u.pu32++;
1249 Assert(offTrg < pSwitcher->cbCode);
1250 if (!CPUMIsHostUsingSysEnter(pVM))
1251 {
1252 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1253 *uSrc.pu32++ = offTrg - (offSrc + 5);
1254 }
1255 else
1256 {
1257 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1258 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1259 }
1260 break;
1261 }
1262
1263 /*
1264 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1265 */
1266 case FIX_NO_SYSCALL_JMP:
1267 {
1268 uint32_t offTrg = *u.pu32++;
1269 Assert(offTrg < pSwitcher->cbCode);
1270 if (!CPUMIsHostUsingSysEnter(pVM))
1271 {
1272 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1273 *uSrc.pu32++ = offTrg - (offSrc + 5);
1274 }
1275 else
1276 {
1277 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1278 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1279 }
1280 break;
1281 }
1282
1283 /*
1284 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1285 */
1286 case FIX_HC_32BIT:
1287 {
1288 uint32_t offTrg = *u.pu32++;
1289 Assert(offSrc < pSwitcher->cbCode);
1290 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1291 *uSrc.pu32 = (uintptr_t)pu8CodeR0 + offTrg;
1292 break;
1293 }
1294
1295#if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1296 /*
1297 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1298 */
1299 case FIX_HC_64BIT:
1300 {
1301 uint32_t offTrg = *u.pu32++;
1302 Assert(offSrc < pSwitcher->cbCode);
1303 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1304 *uSrc.pu64 = (uintptr_t)pu8CodeR0 + offTrg;
1305 break;
1306 }
1307
1308 /*
1309 * 64-bit HC Code Selector (no argument).
1310 */
1311 case FIX_HC_64BIT_CS:
1312 {
1313 Assert(offSrc < pSwitcher->cbCode);
1314#if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1315 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
1316#else
1317 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
1318#endif
1319 break;
1320 }
1321
1322 /*
1323 * 64-bit HC pointer to the CPUM instance data (no argument).
1324 */
1325 case FIX_HC_64BIT_CPUM:
1326 {
1327 Assert(offSrc < pSwitcher->cbCode);
1328 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
1329 break;
1330 }
1331#endif
1332
1333 /*
1334 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
1335 */
1336 case FIX_ID_32BIT:
1337 {
1338 uint32_t offTrg = *u.pu32++;
1339 Assert(offSrc < pSwitcher->cbCode);
1340 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1341 *uSrc.pu32 = u32IDCode + offTrg;
1342 break;
1343 }
1344
1345 /*
1346 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
1347 */
1348 case FIX_ID_64BIT:
1349 {
1350 uint32_t offTrg = *u.pu32++;
1351 Assert(offSrc < pSwitcher->cbCode);
1352 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1353 *uSrc.pu64 = u32IDCode + offTrg;
1354 break;
1355 }
1356
1357 /*
1358 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
1359 */
1360 case FIX_ID_FAR32_TO_64BIT_MODE:
1361 {
1362 uint32_t offTrg = *u.pu32++;
1363 Assert(offSrc < pSwitcher->cbCode);
1364 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1365 *uSrc.pu32++ = u32IDCode + offTrg;
1366 *uSrc.pu16 = SelCS64;
1367 AssertRelease(SelCS64);
1368 break;
1369 }
1370
1371#ifdef VBOX_WITH_NMI
1372 /*
1373 * 32-bit address to the APIC base.
1374 */
1375 case FIX_GC_APIC_BASE_32BIT:
1376 {
1377 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
1378 break;
1379 }
1380#endif
1381
1382 default:
1383 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
1384 break;
1385 }
1386 }
1387
1388#ifdef LOG_ENABLED
1389 /*
1390 * If Log2 is enabled disassemble the switcher code.
1391 *
1392 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
1393 */
1394 if (LogIs2Enabled())
1395 {
1396 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
1397 " pu8CodeR0 = %p\n"
1398 " pu8CodeR3 = %p\n"
1399 " GCPtrCode = %VGv\n"
1400 " u32IDCode = %08x\n"
1401 " pVMGC = %VGv\n"
1402 " pCPUMGC = %VGv\n"
1403 " pVMHC = %p\n"
1404 " pCPUMHC = %p\n"
1405 " GCPtrGDT = %VGv\n"
1406 " InterCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1407 " HyperCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1408 " SelCS = %04x\n"
1409 " SelDS = %04x\n"
1410 " SelCS64 = %04x\n"
1411 " SelTSS = %04x\n",
1412 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
1413 pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM),
1414 VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum,
1415 GCPtrGDT,
1416 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
1417 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
1418 SelCS, SelDS, SelCS64, SelTSS);
1419
1420 uint32_t offCode = 0;
1421 while (offCode < pSwitcher->cbCode)
1422 {
1423 /*
1424 * Figure out where this is.
1425 */
1426 const char *pszDesc = NULL;
1427 RTUINTPTR uBase;
1428 uint32_t cbCode;
1429 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
1430 {
1431 pszDesc = "HCCode0";
1432 uBase = (RTUINTPTR)pu8CodeR0;
1433 offCode = pSwitcher->offHCCode0;
1434 cbCode = pSwitcher->cbHCCode0;
1435 }
1436 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
1437 {
1438 pszDesc = "HCCode1";
1439 uBase = (RTUINTPTR)pu8CodeR0;
1440 offCode = pSwitcher->offHCCode1;
1441 cbCode = pSwitcher->cbHCCode1;
1442 }
1443 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
1444 {
1445 pszDesc = "GCCode";
1446 uBase = GCPtrCode;
1447 offCode = pSwitcher->offGCCode;
1448 cbCode = pSwitcher->cbGCCode;
1449 }
1450 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
1451 {
1452 pszDesc = "IDCode0";
1453 uBase = u32IDCode;
1454 offCode = pSwitcher->offIDCode0;
1455 cbCode = pSwitcher->cbIDCode0;
1456 }
1457 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
1458 {
1459 pszDesc = "IDCode1";
1460 uBase = u32IDCode;
1461 offCode = pSwitcher->offIDCode1;
1462 cbCode = pSwitcher->cbIDCode1;
1463 }
1464 else
1465 {
1466 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
1467 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1468 offCode++;
1469 continue;
1470 }
1471
1472 /*
1473 * Disassemble it.
1474 */
1475 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
1476 DISCPUSTATE Cpu;
1477
1478 memset(&Cpu, 0, sizeof(Cpu));
1479 Cpu.mode = CPUMODE_32BIT;
1480 while (cbCode > 0)
1481 {
1482 /* try label it */
1483 if (pSwitcher->offR0HostToGuest == offCode)
1484 RTLogPrintf(" *R0HostToGuest:\n");
1485 if (pSwitcher->offGCGuestToHost == offCode)
1486 RTLogPrintf(" *GCGuestToHost:\n");
1487 if (pSwitcher->offGCCallTrampoline == offCode)
1488 RTLogPrintf(" *GCCallTrampoline:\n");
1489 if (pSwitcher->offGCGuestToHostAsm == offCode)
1490 RTLogPrintf(" *GCGuestToHostAsm:\n");
1491 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
1492 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
1493 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
1494 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
1495
1496 /* disas */
1497 uint32_t cbInstr = 0;
1498 char szDisas[256];
1499 if (RT_SUCCESS(DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas)))
1500 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
1501 else
1502 {
1503 RTLogPrintf(" %04x: %02x '%c'\n",
1504 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1505 cbInstr = 1;
1506 }
1507 offCode += cbInstr;
1508 cbCode -= RT_MIN(cbInstr, cbCode);
1509 }
1510 }
1511 }
1512#endif
1513}
1514
1515
1516/**
1517 * Relocator for the 32-Bit to 32-Bit world switcher.
1518 */
1519DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1520{
1521 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1522 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1523}
1524
1525
1526/**
1527 * Relocator for the 32-Bit to PAE world switcher.
1528 */
1529DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1530{
1531 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1532 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1533}
1534
1535
1536/**
1537 * Relocator for the PAE to 32-Bit world switcher.
1538 */
1539DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1540{
1541 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1542 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1543}
1544
1545
1546/**
1547 * Relocator for the PAE to PAE world switcher.
1548 */
1549DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1550{
1551 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1552 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1553}
1554
1555
1556/**
1557 * Relocator for the AMD64 to PAE world switcher.
1558 */
1559DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1560{
1561 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1562 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1563}
1564
1565
1566/**
1567 * Gets the pointer to g_szRTAssertMsg1 in GC.
1568 * @returns Pointer to VMMGC::g_szRTAssertMsg1.
1569 * Returns NULL if not present.
1570 * @param pVM The VM handle.
1571 */
1572VMMR3DECL(const char *) VMMR3GetGCAssertMsg1(PVM pVM)
1573{
1574 RTGCPTR32 GCPtr;
1575 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg1", &GCPtr);
1576 if (VBOX_SUCCESS(rc))
1577 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1578 return NULL;
1579}
1580
1581
1582/**
1583 * Gets the pointer to g_szRTAssertMsg2 in GC.
1584 * @returns Pointer to VMMGC::g_szRTAssertMsg2.
1585 * Returns NULL if not present.
1586 * @param pVM The VM handle.
1587 */
1588VMMR3DECL(const char *) VMMR3GetGCAssertMsg2(PVM pVM)
1589{
1590 RTGCPTR32 GCPtr;
1591 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg2", &GCPtr);
1592 if (VBOX_SUCCESS(rc))
1593 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1594 return NULL;
1595}
1596
1597
1598/**
1599 * Execute state save operation.
1600 *
1601 * @returns VBox status code.
1602 * @param pVM VM Handle.
1603 * @param pSSM SSM operation handle.
1604 */
1605static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1606{
1607 LogFlow(("vmmR3Save:\n"));
1608
1609 /*
1610 * The hypervisor stack.
1611 */
1612 SSMR3PutGCPtr(pSSM, pVM->vmm.s.pbGCStackBottom);
1613 RTGCPTR GCPtrESP = CPUMGetHyperESP(pVM);
1614 Assert(pVM->vmm.s.pbGCStackBottom - GCPtrESP <= VMM_STACK_SIZE);
1615 SSMR3PutGCPtr(pSSM, GCPtrESP);
1616 SSMR3PutMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1617 return SSMR3PutU32(pSSM, ~0); /* terminator */
1618}
1619
1620
1621/**
1622 * Execute state load operation.
1623 *
1624 * @returns VBox status code.
1625 * @param pVM VM Handle.
1626 * @param pSSM SSM operation handle.
1627 * @param u32Version Data layout version.
1628 */
1629static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
1630{
1631 LogFlow(("vmmR3Load:\n"));
1632
1633 /*
1634 * Validate version.
1635 */
1636 if (u32Version != VMM_SAVED_STATE_VERSION)
1637 {
1638 Log(("vmmR3Load: Invalid version u32Version=%d!\n", u32Version));
1639 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1640 }
1641
1642 /*
1643 * Check that the stack is in the same place, or that it's fearly empty.
1644 */
1645 RTGCPTR GCPtrStackBottom;
1646 SSMR3GetGCPtr(pSSM, &GCPtrStackBottom);
1647 RTGCPTR GCPtrESP;
1648 int rc = SSMR3GetGCPtr(pSSM, &GCPtrESP);
1649 if (VBOX_FAILURE(rc))
1650 return rc;
1651 if ( GCPtrStackBottom == pVM->vmm.s.pbGCStackBottom
1652 || (GCPtrStackBottom - GCPtrESP < 32)) /** @todo This will break if we start preemting the hypervisor. */
1653 {
1654 /*
1655 * We *must* set the ESP because the CPUM load + PGM load relocations will render
1656 * the ESP in CPUM fatally invalid.
1657 */
1658 CPUMSetHyperESP(pVM, GCPtrESP);
1659
1660 /* restore the stack. */
1661 SSMR3GetMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1662
1663 /* terminator */
1664 uint32_t u32;
1665 rc = SSMR3GetU32(pSSM, &u32);
1666 if (VBOX_FAILURE(rc))
1667 return rc;
1668 if (u32 != ~0U)
1669 {
1670 AssertMsgFailed(("u32=%#x\n", u32));
1671 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1672 }
1673 return VINF_SUCCESS;
1674 }
1675
1676 LogRel(("The stack is not in the same place and it's not empty! GCPtrStackBottom=%VGv pbGCStackBottom=%VGv ESP=%VGv\n",
1677 GCPtrStackBottom, pVM->vmm.s.pbGCStackBottom, GCPtrESP));
1678 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
1679 return VINF_SUCCESS; /* ignore this */
1680 AssertFailed();
1681 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1682}
1683
1684
1685/**
1686 * Selects the switcher to be used for switching to GC.
1687 *
1688 * @returns VBox status code.
1689 * @param pVM VM handle.
1690 * @param enmSwitcher The new switcher.
1691 * @remark This function may be called before the VMM is initialized.
1692 */
1693VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1694{
1695 /*
1696 * Validate input.
1697 */
1698 if ( enmSwitcher < VMMSWITCHER_INVALID
1699 || enmSwitcher >= VMMSWITCHER_MAX)
1700 {
1701 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1702 return VERR_INVALID_PARAMETER;
1703 }
1704
1705 /* Do nothing if the switcher is disabled. */
1706 if (pVM->vmm.s.fSwitcherDisabled)
1707 return VINF_SUCCESS;
1708
1709 /*
1710 * Select the new switcher.
1711 */
1712 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
1713 if (pSwitcher)
1714 {
1715 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
1716 pVM->vmm.s.enmSwitcher = enmSwitcher;
1717
1718 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvHCCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvHCCoreCodeR0 type */
1719 pVM->vmm.s.pfnR0HostToGuest = pbCodeR0 + pSwitcher->offR0HostToGuest;
1720
1721 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1722 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
1723 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
1724 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
1725 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
1726 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
1727 return VINF_SUCCESS;
1728 }
1729 return VERR_NOT_IMPLEMENTED;
1730}
1731
1732/**
1733 * Disable the switcher logic permanently.
1734 *
1735 * @returns VBox status code.
1736 * @param pVM VM handle.
1737 */
1738VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
1739{
1740/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
1741 * @code
1742 * mov eax, VERR_INTERNAL_ERROR
1743 * ret
1744 * @endcode
1745 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
1746 */
1747 pVM->vmm.s.fSwitcherDisabled = true;
1748 return VINF_SUCCESS;
1749}
1750
1751
1752/**
1753 * Resolve a builtin GC symbol.
1754 * Called by PDM when loading or relocating GC modules.
1755 *
1756 * @returns VBox status
1757 * @param pVM VM Handle.
1758 * @param pszSymbol Symbol to resolv
1759 * @param pGCPtrValue Where to store the symbol value.
1760 * @remark This has to work before VMMR3Relocate() is called.
1761 */
1762VMMR3DECL(int) VMMR3GetImportGC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue)
1763{
1764 if (!strcmp(pszSymbol, "g_Logger"))
1765 {
1766 if (pVM->vmm.s.pLoggerHC)
1767 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
1768 *pGCPtrValue = pVM->vmm.s.pLoggerGC;
1769 }
1770 else if (!strcmp(pszSymbol, "g_RelLogger"))
1771 {
1772#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1773 if (pVM->vmm.s.pRelLoggerHC)
1774 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
1775 *pGCPtrValue = pVM->vmm.s.pRelLoggerGC;
1776#else
1777 *pGCPtrValue = NIL_RTGCPTR;
1778#endif
1779 }
1780 else
1781 return VERR_SYMBOL_NOT_FOUND;
1782 return VINF_SUCCESS;
1783}
1784
1785
1786/**
1787 * Suspends the the CPU yielder.
1788 *
1789 * @param pVM The VM handle.
1790 */
1791VMMR3DECL(void) VMMR3YieldSuspend(PVM pVM)
1792{
1793 if (!pVM->vmm.s.cYieldResumeMillies)
1794 {
1795 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1796 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1797 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1798 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1799 else
1800 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1801 TMTimerStop(pVM->vmm.s.pYieldTimer);
1802 }
1803 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1804}
1805
1806
1807/**
1808 * Stops the the CPU yielder.
1809 *
1810 * @param pVM The VM handle.
1811 */
1812VMMR3DECL(void) VMMR3YieldStop(PVM pVM)
1813{
1814 if (!pVM->vmm.s.cYieldResumeMillies)
1815 TMTimerStop(pVM->vmm.s.pYieldTimer);
1816 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1817 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1818}
1819
1820
1821/**
1822 * Resumes the CPU yielder when it has been a suspended or stopped.
1823 *
1824 * @param pVM The VM handle.
1825 */
1826VMMR3DECL(void) VMMR3YieldResume(PVM pVM)
1827{
1828 if (pVM->vmm.s.cYieldResumeMillies)
1829 {
1830 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1831 pVM->vmm.s.cYieldResumeMillies = 0;
1832 }
1833}
1834
1835
1836/**
1837 * Internal timer callback function.
1838 *
1839 * @param pVM The VM.
1840 * @param pTimer The timer handle.
1841 * @param pvUser User argument specified upon timer creation.
1842 */
1843static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1844{
1845 /*
1846 * This really needs some careful tuning. While we shouldn't be too gready since
1847 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1848 * because that'll cause us to stop up.
1849 *
1850 * The current logic is to use the default interval when there is no lag worth
1851 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1852 *
1853 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1854 * so the lag is up to date.)
1855 */
1856 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1857 if ( u64Lag < 50000000 /* 50ms */
1858 || ( u64Lag < 1000000000 /* 1s */
1859 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1860 )
1861 {
1862 uint64_t u64Elapsed = RTTimeNanoTS();
1863 pVM->vmm.s.u64LastYield = u64Elapsed;
1864
1865 RTThreadYield();
1866
1867#ifdef LOG_ENABLED
1868 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1869 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1870#endif
1871 }
1872 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1873}
1874
1875
1876/**
1877 * Acquire global VM lock.
1878 *
1879 * @returns VBox status code
1880 * @param pVM The VM to operate on.
1881 */
1882VMMR3DECL(int) VMMR3Lock(PVM pVM)
1883{
1884 return RTCritSectEnter(&pVM->vmm.s.CritSectVMLock);
1885}
1886
1887
1888/**
1889 * Release global VM lock.
1890 *
1891 * @returns VBox status code
1892 * @param pVM The VM to operate on.
1893 */
1894VMMR3DECL(int) VMMR3Unlock(PVM pVM)
1895{
1896 return RTCritSectLeave(&pVM->vmm.s.CritSectVMLock);
1897}
1898
1899
1900/**
1901 * Return global VM lock owner.
1902 *
1903 * @returns Thread id of owner.
1904 * @returns NIL_RTTHREAD if no owner.
1905 * @param pVM The VM to operate on.
1906 */
1907VMMR3DECL(RTNATIVETHREAD) VMMR3LockGetOwner(PVM pVM)
1908{
1909 return RTCritSectGetOwner(&pVM->vmm.s.CritSectVMLock);
1910}
1911
1912
1913/**
1914 * Checks if the current thread is the owner of the global VM lock.
1915 *
1916 * @returns true if owner.
1917 * @returns false if not owner.
1918 * @param pVM The VM to operate on.
1919 */
1920VMMR3DECL(bool) VMMR3LockIsOwner(PVM pVM)
1921{
1922 return RTCritSectIsOwner(&pVM->vmm.s.CritSectVMLock);
1923}
1924
1925
1926/**
1927 * Executes guest code.
1928 *
1929 * @param pVM VM handle.
1930 */
1931VMMR3DECL(int) VMMR3RawRunGC(PVM pVM)
1932{
1933 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1934
1935 /*
1936 * Set the EIP and ESP.
1937 */
1938 CPUMSetHyperEIP(pVM, CPUMGetGuestEFlags(pVM) & X86_EFL_VM
1939 ? pVM->vmm.s.pfnCPUMGCResumeGuestV86
1940 : pVM->vmm.s.pfnCPUMGCResumeGuest);
1941 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom);
1942
1943 /*
1944 * We hide log flushes (outer) and hypervisor interrupts (inner).
1945 */
1946 for (;;)
1947 {
1948 int rc;
1949 do
1950 {
1951#ifdef NO_SUPCALLR0VMM
1952 rc = VERR_GENERAL_FAILURE;
1953#else
1954 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN);
1955 if (RT_LIKELY(rc == VINF_SUCCESS))
1956 rc = pVM->vmm.s.iLastGCRc;
1957#endif
1958 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1959
1960 /*
1961 * Flush the logs.
1962 */
1963#ifdef LOG_ENABLED
1964 PRTLOGGERRC pLogger = pVM->vmm.s.pLoggerHC;
1965 if ( pLogger
1966 && pLogger->offScratch > 0)
1967 RTLogFlushGC(NULL, pLogger);
1968#endif
1969#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1970 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRelLoggerHC;
1971 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1972 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
1973#endif
1974 if (rc != VINF_VMM_CALL_HOST)
1975 {
1976 Log2(("VMMR3RawRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1977 return rc;
1978 }
1979 rc = vmmR3ServiceCallHostRequest(pVM);
1980 if (VBOX_FAILURE(rc))
1981 return rc;
1982 /* Resume GC */
1983 }
1984}
1985
1986
1987/**
1988 * Executes guest code (Intel VT-x and AMD-V).
1989 *
1990 * @param pVM VM handle.
1991 */
1992VMMR3DECL(int) VMMR3HwAccRunGC(PVM pVM)
1993{
1994 Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1995
1996 for (;;)
1997 {
1998 int rc;
1999 do
2000 {
2001#ifdef NO_SUPCALLR0VMM
2002 rc = VERR_GENERAL_FAILURE;
2003#else
2004 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN);
2005 if (RT_LIKELY(rc == VINF_SUCCESS))
2006 rc = pVM->vmm.s.iLastGCRc;
2007#endif
2008 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2009
2010#ifdef LOG_ENABLED
2011 /*
2012 * Flush the log
2013 */
2014 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
2015 if ( pR0Logger
2016 && pR0Logger->Logger.offScratch > 0)
2017 RTLogFlushToLogger(&pR0Logger->Logger, NULL);
2018#endif /* !LOG_ENABLED */
2019 if (rc != VINF_VMM_CALL_HOST)
2020 {
2021 Log2(("VMMR3HwAccRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
2022 return rc;
2023 }
2024 rc = vmmR3ServiceCallHostRequest(pVM);
2025 if (VBOX_FAILURE(rc) || rc == VINF_EM_DBG_HYPER_ASSERTION)
2026 return rc;
2027 /* Resume R0 */
2028 }
2029}
2030
2031/**
2032 * Calls GC a function.
2033 *
2034 * @param pVM The VM handle.
2035 * @param GCPtrEntry The GC function address.
2036 * @param cArgs The number of arguments in the ....
2037 * @param ... Arguments to the function.
2038 */
2039VMMR3DECL(int) VMMR3CallGC(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, ...)
2040{
2041 va_list args;
2042 va_start(args, cArgs);
2043 int rc = VMMR3CallGCV(pVM, GCPtrEntry, cArgs, args);
2044 va_end(args);
2045 return rc;
2046}
2047
2048
2049/**
2050 * Calls GC a function.
2051 *
2052 * @param pVM The VM handle.
2053 * @param GCPtrEntry The GC function address.
2054 * @param cArgs The number of arguments in the ....
2055 * @param args Arguments to the function.
2056 */
2057VMMR3DECL(int) VMMR3CallGCV(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, va_list args)
2058{
2059 Log2(("VMMR3CallGCV: GCPtrEntry=%VGv cArgs=%d\n", GCPtrEntry, cArgs));
2060
2061 /*
2062 * Setup the call frame using the trampoline.
2063 */
2064 CPUMHyperSetCtxCore(pVM, NULL);
2065 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
2066 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom - cArgs * sizeof(RTGCUINTPTR));
2067 PRTGCUINTPTR pFrame = (PRTGCUINTPTR)(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE) - cArgs;
2068 int i = cArgs;
2069 while (i-- > 0)
2070 *pFrame++ = va_arg(args, RTGCUINTPTR);
2071
2072 CPUMPushHyper(pVM, cArgs * sizeof(RTGCUINTPTR)); /* stack frame size */
2073 CPUMPushHyper(pVM, GCPtrEntry); /* what to call */
2074 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2075
2076 /*
2077 * We hide log flushes (outer) and hypervisor interrupts (inner).
2078 */
2079 for (;;)
2080 {
2081 int rc;
2082 do
2083 {
2084#ifdef NO_SUPCALLR0VMM
2085 rc = VERR_GENERAL_FAILURE;
2086#else
2087 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN);
2088 if (RT_LIKELY(rc == VINF_SUCCESS))
2089 rc = pVM->vmm.s.iLastGCRc;
2090#endif
2091 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2092
2093 /*
2094 * Flush the logs.
2095 */
2096#ifdef LOG_ENABLED
2097 PRTLOGGERRC pLogger = pVM->vmm.s.pLoggerHC;
2098 if ( pLogger
2099 && pLogger->offScratch > 0)
2100 RTLogFlushGC(NULL, pLogger);
2101#endif
2102#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2103 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2104 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2105 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2106#endif
2107 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2108 VMMR3FatalDump(pVM, rc);
2109 if (rc != VINF_VMM_CALL_HOST)
2110 {
2111 Log2(("VMMR3CallGCV: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
2112 return rc;
2113 }
2114 rc = vmmR3ServiceCallHostRequest(pVM);
2115 if (VBOX_FAILURE(rc))
2116 return rc;
2117 }
2118}
2119
2120
2121/**
2122 * Resumes executing hypervisor code when interrupted
2123 * by a queue flush or a debug event.
2124 *
2125 * @returns VBox status code.
2126 * @param pVM VM handle.
2127 */
2128VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM)
2129{
2130 Log(("VMMR3ResumeHyper: eip=%VGv esp=%VGv\n", CPUMGetHyperEIP(pVM), CPUMGetHyperESP(pVM)));
2131
2132 /*
2133 * We hide log flushes (outer) and hypervisor interrupts (inner).
2134 */
2135 for (;;)
2136 {
2137 int rc;
2138 do
2139 {
2140#ifdef NO_SUPCALLR0VMM
2141 rc = VERR_GENERAL_FAILURE;
2142#else
2143 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN);
2144 if (RT_LIKELY(rc == VINF_SUCCESS))
2145 rc = pVM->vmm.s.iLastGCRc;
2146#endif
2147 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2148
2149 /*
2150 * Flush the loggers,
2151 */
2152#ifdef LOG_ENABLED
2153 PRTLOGGERRC pLogger = pVM->vmm.s.pLoggerHC;
2154 if ( pLogger
2155 && pLogger->offScratch > 0)
2156 RTLogFlushGC(NULL, pLogger);
2157#endif
2158#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2159 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2160 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2161 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2162#endif
2163 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2164 VMMR3FatalDump(pVM, rc);
2165 if (rc != VINF_VMM_CALL_HOST)
2166 {
2167 Log(("VMMR3ResumeHyper: returns %Vrc\n", rc));
2168 return rc;
2169 }
2170 rc = vmmR3ServiceCallHostRequest(pVM);
2171 if (VBOX_FAILURE(rc))
2172 return rc;
2173 }
2174}
2175
2176
2177/**
2178 * Service a call to the ring-3 host code.
2179 *
2180 * @returns VBox status code.
2181 * @param pVM VM handle.
2182 * @remark Careful with critsects.
2183 */
2184static int vmmR3ServiceCallHostRequest(PVM pVM)
2185{
2186 switch (pVM->vmm.s.enmCallHostOperation)
2187 {
2188 /*
2189 * Acquire the PDM lock.
2190 */
2191 case VMMCALLHOST_PDM_LOCK:
2192 {
2193 pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM);
2194 break;
2195 }
2196
2197 /*
2198 * Flush a PDM queue.
2199 */
2200 case VMMCALLHOST_PDM_QUEUE_FLUSH:
2201 {
2202 PDMR3QueueFlushWorker(pVM, NULL);
2203 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
2204 break;
2205 }
2206
2207 /*
2208 * Grow the PGM pool.
2209 */
2210 case VMMCALLHOST_PGM_POOL_GROW:
2211 {
2212 pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
2213 break;
2214 }
2215
2216 /*
2217 * Maps an page allocation chunk into ring-3 so ring-0 can use it.
2218 */
2219 case VMMCALLHOST_PGM_MAP_CHUNK:
2220 {
2221 pVM->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVM->vmm.s.u64CallHostArg);
2222 break;
2223 }
2224
2225 /*
2226 * Allocates more handy pages.
2227 */
2228 case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
2229 {
2230 pVM->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM);
2231 break;
2232 }
2233#ifndef VBOX_WITH_NEW_PHYS_CODE
2234
2235 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
2236 {
2237 const RTGCPHYS GCPhys = pVM->vmm.s.u64CallHostArg;
2238 pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, &GCPhys);
2239 break;
2240 }
2241#endif
2242
2243 /*
2244 * Acquire the PGM lock.
2245 */
2246 case VMMCALLHOST_PGM_LOCK:
2247 {
2248 pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM);
2249 break;
2250 }
2251
2252 /*
2253 * Flush REM handler notifications.
2254 */
2255 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
2256 {
2257 REMR3ReplayHandlerNotifications(pVM);
2258 break;
2259 }
2260
2261 /*
2262 * This is a noop. We just take this route to avoid unnecessary
2263 * tests in the loops.
2264 */
2265 case VMMCALLHOST_VMM_LOGGER_FLUSH:
2266 break;
2267
2268 /*
2269 * Set the VM error message.
2270 */
2271 case VMMCALLHOST_VM_SET_ERROR:
2272 VMR3SetErrorWorker(pVM);
2273 break;
2274
2275 /*
2276 * Set the VM runtime error message.
2277 */
2278 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
2279 VMR3SetRuntimeErrorWorker(pVM);
2280 break;
2281
2282 /*
2283 * Signal a ring 0 hypervisor assertion.
2284 * Cancel the longjmp operation that's in progress.
2285 */
2286 case VMMCALLHOST_VM_R0_HYPER_ASSERTION:
2287 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
2288 pVM->vmm.s.CallHostR0JmpBuf.fInRing3Call = false;
2289#ifdef RT_ARCH_X86
2290 pVM->vmm.s.CallHostR0JmpBuf.eip = 0;
2291#else
2292 pVM->vmm.s.CallHostR0JmpBuf.rip = 0;
2293#endif
2294 return VINF_EM_DBG_HYPER_ASSERTION;
2295
2296 default:
2297 AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation));
2298 return VERR_INTERNAL_ERROR;
2299 }
2300
2301 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
2302 return VINF_SUCCESS;
2303}
2304
2305
2306
2307/**
2308 * Structure to pass to DBGFR3Info() and for doing all other
2309 * output during fatal dump.
2310 */
2311typedef struct VMMR3FATALDUMPINFOHLP
2312{
2313 /** The helper core. */
2314 DBGFINFOHLP Core;
2315 /** The release logger instance. */
2316 PRTLOGGER pRelLogger;
2317 /** The saved release logger flags. */
2318 RTUINT fRelLoggerFlags;
2319 /** The logger instance. */
2320 PRTLOGGER pLogger;
2321 /** The saved logger flags. */
2322 RTUINT fLoggerFlags;
2323 /** The saved logger destination flags. */
2324 RTUINT fLoggerDestFlags;
2325 /** Whether to output to stderr or not. */
2326 bool fStdErr;
2327} VMMR3FATALDUMPINFOHLP, *PVMMR3FATALDUMPINFOHLP;
2328typedef const VMMR3FATALDUMPINFOHLP *PCVMMR3FATALDUMPINFOHLP;
2329
2330
2331/**
2332 * Print formatted string.
2333 *
2334 * @param pHlp Pointer to this structure.
2335 * @param pszFormat The format string.
2336 * @param ... Arguments.
2337 */
2338static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
2339{
2340 va_list args;
2341 va_start(args, pszFormat);
2342 pHlp->pfnPrintfV(pHlp, pszFormat, args);
2343 va_end(args);
2344}
2345
2346
2347/**
2348 * Print formatted string.
2349 *
2350 * @param pHlp Pointer to this structure.
2351 * @param pszFormat The format string.
2352 * @param args Argument list.
2353 */
2354static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
2355{
2356 PCVMMR3FATALDUMPINFOHLP pMyHlp = (PCVMMR3FATALDUMPINFOHLP)pHlp;
2357
2358 if (pMyHlp->pRelLogger)
2359 {
2360 va_list args2;
2361 va_copy(args2, args);
2362 RTLogLoggerV(pMyHlp->pRelLogger, pszFormat, args2);
2363 va_end(args2);
2364 }
2365 if (pMyHlp->pLogger)
2366 {
2367 va_list args2;
2368 va_copy(args2, args);
2369 RTLogLoggerV(pMyHlp->pLogger, pszFormat, args);
2370 va_end(args2);
2371 }
2372 if (pMyHlp->fStdErr)
2373 {
2374 va_list args2;
2375 va_copy(args2, args);
2376 RTStrmPrintfV(g_pStdErr, pszFormat, args);
2377 va_end(args2);
2378 }
2379}
2380
2381
2382/**
2383 * Initializes the fatal dump output helper.
2384 *
2385 * @param pHlp The structure to initialize.
2386 */
2387static void vmmR3FatalDumpInfoHlpInit(PVMMR3FATALDUMPINFOHLP pHlp)
2388{
2389 memset(pHlp, 0, sizeof(*pHlp));
2390
2391 pHlp->Core.pfnPrintf = vmmR3FatalDumpInfoHlp_pfnPrintf;
2392 pHlp->Core.pfnPrintfV = vmmR3FatalDumpInfoHlp_pfnPrintfV;
2393
2394 /*
2395 * The loggers.
2396 */
2397 pHlp->pRelLogger = RTLogRelDefaultInstance();
2398#ifndef LOG_ENABLED
2399 if (!pHlp->pRelLogger)
2400#endif
2401 pHlp->pLogger = RTLogDefaultInstance();
2402
2403 if (pHlp->pRelLogger)
2404 {
2405 pHlp->fRelLoggerFlags = pHlp->pRelLogger->fFlags;
2406 pHlp->pRelLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2407 }
2408
2409 if (pHlp->pLogger)
2410 {
2411 pHlp->fLoggerFlags = pHlp->pLogger->fFlags;
2412 pHlp->fLoggerDestFlags = pHlp->pLogger->fDestFlags;
2413 pHlp->pLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2414#ifndef DEBUG_sandervl
2415 pHlp->pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
2416#endif
2417 }
2418
2419 /*
2420 * Check if we need write to stderr.
2421 */
2422#ifdef DEBUG_sandervl
2423 pHlp->fStdErr = false; /* takes too long to display here */
2424#else
2425 pHlp->fStdErr = (!pHlp->pRelLogger || !(pHlp->pRelLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)))
2426 && (!pHlp->pLogger || !(pHlp->pLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)));
2427#endif
2428}
2429
2430
2431/**
2432 * Deletes the fatal dump output helper.
2433 *
2434 * @param pHlp The structure to delete.
2435 */
2436static void vmmR3FatalDumpInfoHlpDelete(PVMMR3FATALDUMPINFOHLP pHlp)
2437{
2438 if (pHlp->pRelLogger)
2439 {
2440 RTLogFlush(pHlp->pRelLogger);
2441 pHlp->pRelLogger->fFlags = pHlp->fRelLoggerFlags;
2442 }
2443
2444 if (pHlp->pLogger)
2445 {
2446 RTLogFlush(pHlp->pLogger);
2447 pHlp->pLogger->fFlags = pHlp->fLoggerFlags;
2448 pHlp->pLogger->fDestFlags = pHlp->fLoggerDestFlags;
2449 }
2450}
2451
2452
2453/**
2454 * Dumps the VM state on a fatal error.
2455 *
2456 * @param pVM VM Handle.
2457 * @param rcErr VBox status code.
2458 */
2459VMMR3DECL(void) VMMR3FatalDump(PVM pVM, int rcErr)
2460{
2461 /*
2462 * Create our output helper and sync it with the log settings.
2463 * This helper will be used for all the output.
2464 */
2465 VMMR3FATALDUMPINFOHLP Hlp;
2466 PCDBGFINFOHLP pHlp = &Hlp.Core;
2467 vmmR3FatalDumpInfoHlpInit(&Hlp);
2468
2469 /*
2470 * Header.
2471 */
2472 pHlp->pfnPrintf(pHlp,
2473 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
2474 "!!\n"
2475 "!! Guru Meditation %d (%Vrc)\n"
2476 "!!\n",
2477 rcErr, rcErr);
2478
2479 /*
2480 * Continue according to context.
2481 */
2482 bool fDoneHyper = false;
2483 switch (rcErr)
2484 {
2485 /*
2486 * Hyper visor errors.
2487 */
2488 case VINF_EM_DBG_HYPER_ASSERTION:
2489 pHlp->pfnPrintf(pHlp, "%s%s!!\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
2490 /* fall thru */
2491 case VERR_TRPM_DONT_PANIC:
2492 case VERR_TRPM_PANIC:
2493 case VINF_EM_RAW_STALE_SELECTOR:
2494 case VINF_EM_RAW_IRET_TRAP:
2495 case VINF_EM_DBG_HYPER_BREAKPOINT:
2496 case VINF_EM_DBG_HYPER_STEPPED:
2497 {
2498 /* Trap? */
2499 uint32_t uEIP = CPUMGetHyperEIP(pVM);
2500 TRPMEVENT enmType;
2501 uint8_t u8TrapNo = 0xce;
2502 RTGCUINT uErrorCode = 0xdeadface;
2503 RTGCUINTPTR uCR2 = 0xdeadface;
2504 int rc2 = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
2505 if (VBOX_SUCCESS(rc2))
2506 pHlp->pfnPrintf(pHlp,
2507 "!! TRAP=%02x ERRCD=%VGv CR2=%VGv EIP=%VGv Type=%d\n",
2508 u8TrapNo, uErrorCode, uCR2, uEIP, enmType);
2509 else
2510 pHlp->pfnPrintf(pHlp,
2511 "!! EIP=%VGv NOTRAP\n",
2512 uEIP);
2513
2514 /*
2515 * Try figure out where eip is.
2516 */
2517 /** @todo make query call for core code or move this function to VMM. */
2518 /* core code? */
2519 //if (uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode < pVM->vmm.s.cbCoreCode)
2520 // pHlp->pfnPrintf(pHlp,
2521 // "!! EIP is in CoreCode, offset %#x\n",
2522 // uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode);
2523 //else
2524 { /* ask PDM */
2525 /** @todo ask DBGFR3Sym later. */
2526 char szModName[64];
2527 RTGCPTR GCPtrMod;
2528 char szNearSym1[260];
2529 RTGCPTR GCPtrNearSym1;
2530 char szNearSym2[260];
2531 RTGCPTR GCPtrNearSym2;
2532 int rc = PDMR3QueryModFromEIP(pVM, uEIP,
2533 &szModName[0], sizeof(szModName), &GCPtrMod,
2534 &szNearSym1[0], sizeof(szNearSym1), &GCPtrNearSym1,
2535 &szNearSym2[0], sizeof(szNearSym2), &GCPtrNearSym2);
2536 if (VBOX_SUCCESS(rc))
2537 {
2538 pHlp->pfnPrintf(pHlp,
2539 "!! EIP in %s (%VGv) at rva %x near symbols:\n"
2540 "!! %VGv rva %VGv off %08x %s\n"
2541 "!! %VGv rva %VGv off -%08x %s\n",
2542 szModName, GCPtrMod, (unsigned)(uEIP - GCPtrMod),
2543 GCPtrNearSym1, GCPtrNearSym1 - GCPtrMod, (unsigned)(uEIP - GCPtrNearSym1), szNearSym1,
2544 GCPtrNearSym2, GCPtrNearSym2 - GCPtrMod, (unsigned)(GCPtrNearSym2 - uEIP), szNearSym2);
2545 }
2546 else
2547 pHlp->pfnPrintf(pHlp,
2548 "!! EIP is not in any code known to VMM!\n");
2549 }
2550
2551 /* Disassemble the instruction. */
2552 char szInstr[256];
2553 rc2 = DBGFR3DisasInstrEx(pVM, 0, 0, DBGF_DISAS_FLAGS_CURRENT_HYPER, &szInstr[0], sizeof(szInstr), NULL);
2554 if (VBOX_SUCCESS(rc2))
2555 pHlp->pfnPrintf(pHlp,
2556 "!! %s\n", szInstr);
2557
2558 /* Dump the hypervisor cpu state. */
2559 pHlp->pfnPrintf(pHlp,
2560 "!!\n"
2561 "!!\n"
2562 "!!\n");
2563 rc2 = DBGFR3Info(pVM, "cpumhyper", "verbose", pHlp);
2564 fDoneHyper = true;
2565
2566 /* Callstack. */
2567 DBGFSTACKFRAME Frame = {0};
2568 rc2 = DBGFR3StackWalkBeginHyper(pVM, &Frame);
2569 if (VBOX_SUCCESS(rc2))
2570 {
2571 pHlp->pfnPrintf(pHlp,
2572 "!!\n"
2573 "!! Call Stack:\n"
2574 "!!\n"
2575 "EBP Ret EBP Ret CS:EIP Arg0 Arg1 Arg2 Arg3 CS:EIP Symbol [line]\n");
2576 do
2577 {
2578 pHlp->pfnPrintf(pHlp,
2579 "%08RX32 %08RX32 %04RX32:%08RX32 %08RX32 %08RX32 %08RX32 %08RX32",
2580 (uint32_t)Frame.AddrFrame.off,
2581 (uint32_t)Frame.AddrReturnFrame.off,
2582 (uint32_t)Frame.AddrReturnPC.Sel,
2583 (uint32_t)Frame.AddrReturnPC.off,
2584 Frame.Args.au32[0],
2585 Frame.Args.au32[1],
2586 Frame.Args.au32[2],
2587 Frame.Args.au32[3]);
2588 pHlp->pfnPrintf(pHlp, " %RTsel:%08RGv", Frame.AddrPC.Sel, Frame.AddrPC.off);
2589 if (Frame.pSymPC)
2590 {
2591 RTGCINTPTR offDisp = Frame.AddrPC.FlatPtr - Frame.pSymPC->Value;
2592 if (offDisp > 0)
2593 pHlp->pfnPrintf(pHlp, " %s+%llx", Frame.pSymPC->szName, (int64_t)offDisp);
2594 else if (offDisp < 0)
2595 pHlp->pfnPrintf(pHlp, " %s-%llx", Frame.pSymPC->szName, -(int64_t)offDisp);
2596 else
2597 pHlp->pfnPrintf(pHlp, " %s", Frame.pSymPC->szName);
2598 }
2599 if (Frame.pLinePC)
2600 pHlp->pfnPrintf(pHlp, " [%s @ 0i%d]", Frame.pLinePC->szFilename, Frame.pLinePC->uLineNo);
2601 pHlp->pfnPrintf(pHlp, "\n");
2602
2603 /* next */
2604 rc2 = DBGFR3StackWalkNext(pVM, &Frame);
2605 } while (VBOX_SUCCESS(rc2));
2606 DBGFR3StackWalkEnd(pVM, &Frame);
2607 }
2608
2609 /* raw stack */
2610 pHlp->pfnPrintf(pHlp,
2611 "!!\n"
2612 "!! Raw stack (mind the direction).\n"
2613 "!!\n"
2614 "%.*Vhxd\n",
2615 VMM_STACK_SIZE, (char *)pVM->vmm.s.pbHCStack);
2616 break;
2617 }
2618
2619 default:
2620 {
2621 break;
2622 }
2623
2624 } /* switch (rcErr) */
2625
2626
2627 /*
2628 * Generic info dumper loop.
2629 */
2630 static struct
2631 {
2632 const char *pszInfo;
2633 const char *pszArgs;
2634 } const aInfo[] =
2635 {
2636 { "mappings", NULL },
2637 { "hma", NULL },
2638 { "cpumguest", "verbose" },
2639 { "cpumguestinstr", "verbose" },
2640 { "cpumhyper", "verbose" },
2641 { "cpumhost", "verbose" },
2642 { "mode", "all" },
2643 { "cpuid", "verbose" },
2644 { "gdt", NULL },
2645 { "ldt", NULL },
2646 //{ "tss", NULL },
2647 { "ioport", NULL },
2648 { "mmio", NULL },
2649 { "phys", NULL },
2650 //{ "pgmpd", NULL }, - doesn't always work at init time...
2651 { "timers", NULL },
2652 { "activetimers", NULL },
2653 { "handlers", "phys virt hyper stats" },
2654 { "cfgm", NULL },
2655 };
2656 for (unsigned i = 0; i < ELEMENTS(aInfo); i++)
2657 {
2658 if (fDoneHyper && !strcmp(aInfo[i].pszInfo, "cpumhyper"))
2659 continue;
2660 pHlp->pfnPrintf(pHlp,
2661 "!!\n"
2662 "!! {%s, %s}\n"
2663 "!!\n",
2664 aInfo[i].pszInfo, aInfo[i].pszArgs);
2665 DBGFR3Info(pVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
2666 }
2667
2668 /* done */
2669 pHlp->pfnPrintf(pHlp,
2670 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
2671
2672
2673 /*
2674 * Delete the output instance (flushing and restoring of flags).
2675 */
2676 vmmR3FatalDumpInfoHlpDelete(&Hlp);
2677}
2678
2679
2680
2681/**
2682 * Displays the Force action Flags.
2683 *
2684 * @param pVM The VM handle.
2685 * @param pHlp The output helpers.
2686 * @param pszArgs The additional arguments (ignored).
2687 */
2688static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2689{
2690 const uint32_t fForcedActions = pVM->fForcedActions;
2691
2692 pHlp->pfnPrintf(pHlp, "Forced action Flags: %#RX32", fForcedActions);
2693
2694 /* show the flag mnemonics */
2695 int c = 0;
2696 uint32_t f = fForcedActions;
2697#define PRINT_FLAG(flag) do { \
2698 if (f & (flag)) \
2699 { \
2700 static const char *s_psz = #flag; \
2701 if (!(c % 6)) \
2702 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz + 6); \
2703 else \
2704 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
2705 c++; \
2706 f &= ~(flag); \
2707 } \
2708 } while (0)
2709 PRINT_FLAG(VM_FF_INTERRUPT_APIC);
2710 PRINT_FLAG(VM_FF_INTERRUPT_PIC);
2711 PRINT_FLAG(VM_FF_TIMER);
2712 PRINT_FLAG(VM_FF_PDM_QUEUES);
2713 PRINT_FLAG(VM_FF_PDM_DMA);
2714 PRINT_FLAG(VM_FF_PDM_CRITSECT);
2715 PRINT_FLAG(VM_FF_DBGF);
2716 PRINT_FLAG(VM_FF_REQUEST);
2717 PRINT_FLAG(VM_FF_TERMINATE);
2718 PRINT_FLAG(VM_FF_RESET);
2719 PRINT_FLAG(VM_FF_PGM_SYNC_CR3);
2720 PRINT_FLAG(VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
2721 PRINT_FLAG(VM_FF_TRPM_SYNC_IDT);
2722 PRINT_FLAG(VM_FF_SELM_SYNC_TSS);
2723 PRINT_FLAG(VM_FF_SELM_SYNC_GDT);
2724 PRINT_FLAG(VM_FF_SELM_SYNC_LDT);
2725 PRINT_FLAG(VM_FF_INHIBIT_INTERRUPTS);
2726 PRINT_FLAG(VM_FF_CSAM_SCAN_PAGE);
2727 PRINT_FLAG(VM_FF_CSAM_PENDING_ACTION);
2728 PRINT_FLAG(VM_FF_TO_R3);
2729 PRINT_FLAG(VM_FF_DEBUG_SUSPEND);
2730 if (f)
2731 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2732 else
2733 pHlp->pfnPrintf(pHlp, "\n");
2734#undef PRINT_FLAG
2735
2736 /* the groups */
2737 c = 0;
2738#define PRINT_GROUP(grp) do { \
2739 if (fForcedActions & (grp)) \
2740 { \
2741 static const char *s_psz = #grp; \
2742 if (!(c % 5)) \
2743 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : "Groups:\n", s_psz + 6); \
2744 else \
2745 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
2746 c++; \
2747 } \
2748 } while (0)
2749 PRINT_GROUP(VM_FF_EXTERNAL_SUSPENDED_MASK);
2750 PRINT_GROUP(VM_FF_EXTERNAL_HALTED_MASK);
2751 PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_MASK);
2752 PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK);
2753 PRINT_GROUP(VM_FF_HIGH_PRIORITY_POST_MASK);
2754 PRINT_GROUP(VM_FF_NORMAL_PRIORITY_POST_MASK);
2755 PRINT_GROUP(VM_FF_NORMAL_PRIORITY_MASK);
2756 PRINT_GROUP(VM_FF_RESUME_GUEST_MASK);
2757 PRINT_GROUP(VM_FF_ALL_BUT_RAW_MASK);
2758 if (c)
2759 pHlp->pfnPrintf(pHlp, "\n");
2760#undef PRINT_GROUP
2761}
2762
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette