VirtualBox

source: vbox/trunk/src/VBox/VMM/VMM.cpp@ 3254

Last change on this file since 3254 was 3123, checked in by vboxsync, 17 years ago

Made VBOX_LOG_FLAGS=msprog work in GC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 93.5 KB
Line 
1/* $Id: VMM.cpp 3123 2007-06-15 14:46:16Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22//#define NO_SUPCALLR0VMM
23
24/** @page pg_vmm VMM - The Virtual Machine Monitor
25 *
26 * !Revise this! It's already incorrect!
27 *
28 * The Virtual Machine Monitor (VMM) is the core of the virtual machine. It
29 * manages the alternate reality; controlling the virtualization, managing
30 * resources, tracking CPU state, it's resources and so on...
31 *
32 * We will split the VMM into smaller entities:
33 *
34 * - Virtual Machine Core Monitor (VMCM), which purpose it is to
35 * provide ring and world switching, that including routing
36 * interrupts to the host OS and traps to the appropriate trap
37 * handlers. It will implement an external interface for
38 * managing trap handlers.
39 *
40 * - CPU Monitor (CM), tracking the state of the CPU (in the alternate
41 * reality) and implementing external interfaces to read and change
42 * the state.
43 *
44 * - Memory Monitor (MM), which purpose it is to virtualize physical
45 * pages, segment descriptor tables, interrupt descriptor tables, task
46 * segments, and keep track of all memory providing external interfaces
47 * to access content and map pages. (Internally splitt into smaller entities!)
48 *
49 * - IO Monitor (IOM), which virtualizes in and out I/O operations. It
50 * interacts with the MM to implement memory mapped I/O. External
51 * interfaces for adding and removing I/O ranges are implemented.
52 *
53 * - External Interrupt Monitor (EIM), which purpose it is to manage
54 * interrupts generated by virtual devices. This monitor provides
55 * an interfaces for raising interrupts which is accessible at any
56 * time and from all thread.
57 * <p>
58 * A subentity of the EIM is the vitual Programmable Interrupt
59 * Controller Device (VPICD), and perhaps a virtual I/O Advanced
60 * Programmable Interrupt Controller Device (VAPICD).
61 *
62 * - Direct Memory Access Monitor (DMAM), which purpose it is to support
63 * virtual device using the DMA controller. Interfaces must be as the
64 * EIM interfaces independent and threadable.
65 * <p>
66 * A subentity of the DMAM is a virtual DMA Controller Device (VDMACD).
67 *
68 *
69 * Entities working on a higher level:
70 *
71 * - Device Manager (DM), which is a support facility for virtualized
72 * hardware. This provides generic facilities for efficient device
73 * virtualization. It will manage device attaching and detaching
74 * conversing with EIM and IOM.
75 *
76 * - Debugger Facility (DBGF) provides the basic features for
77 * debugging the alternate reality execution.
78 *
79 *
80 *
81 * @section pg_vmm_s_use_cases Use Cases
82 *
83 * @subsection pg_vmm_s_use_case_boot Bootstrap
84 *
85 * - Basic Init:
86 * - Init SUPDRV.
87 *
88 * - Init Virtual Machine Instance:
89 * - Load settings.
90 * - Check resource requirements (memory, com, stuff).
91 *
92 * - Init Host Ring 3 part:
93 * - Init Core code.
94 * - Load Pluggable Components.
95 * - Init Pluggable Components.
96 *
97 * - Init Host Ring 0 part:
98 * - Load Core (core = core components like VMM, RMI, CA, and so on) code.
99 * - Init Core code.
100 * - Load Pluggable Component code.
101 * - Init Pluggable Component code.
102 *
103 * - Allocate first chunk of memory and pin it down. This block of memory
104 * will fit the following pieces:
105 * - Virtual Machine Instance data. (Config, CPU state, VMM state, ++)
106 * (This is available from everywhere (at different addresses though)).
107 * - VMM Guest Context code.
108 * - Pluggable devices Guest Context code.
109 * - Page tables (directory and everything) for the VMM Guest
110 *
111 * - Setup Guest (Ring 0) part:
112 * - Setup initial page tables (i.e. directory all the stuff).
113 * - Load Core Guest Context code.
114 * - Load Pluggable Devices Guest Context code.
115 *
116 *
117 */
118
119
120/*******************************************************************************
121* Header Files *
122*******************************************************************************/
123#define LOG_GROUP LOG_GROUP_VMM
124#include <VBox/vmm.h>
125#include <VBox/vmapi.h>
126#include <VBox/pgm.h>
127#include <VBox/cfgm.h>
128#include <VBox/pdm.h>
129#include <VBox/cpum.h>
130#include <VBox/mm.h>
131#include <VBox/iom.h>
132#include <VBox/trpm.h>
133#include <VBox/selm.h>
134#include <VBox/em.h>
135#include <VBox/sup.h>
136#include <VBox/dbgf.h>
137#include <VBox/csam.h>
138#include <VBox/patm.h>
139#include <VBox/rem.h>
140#include <VBox/ssm.h>
141#include <VBox/tm.h>
142#include "VMMInternal.h"
143#include "VMMSwitcher/VMMSwitcher.h"
144#include <VBox/vm.h>
145#include <VBox/err.h>
146#include <VBox/param.h>
147#include <VBox/version.h>
148#include <VBox/x86.h>
149#include <VBox/hwaccm.h>
150#include <iprt/assert.h>
151#include <iprt/alloc.h>
152#include <iprt/asm.h>
153#include <iprt/time.h>
154#include <iprt/stream.h>
155#include <iprt/string.h>
156#include <iprt/stdarg.h>
157#include <iprt/ctype.h>
158
159
160
161/** The saved state version. */
162#define VMM_SAVED_STATE_VERSION 3
163
164
165/*******************************************************************************
166* Internal Functions *
167*******************************************************************************/
168static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
169static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
170static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
171static int vmmR3ServiceCallHostRequest(PVM pVM);
172
173
174/*******************************************************************************
175* Global Variables *
176*******************************************************************************/
177/** Array of switcher defininitions.
178 * The type and index shall match!
179 */
180static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
181{
182 NULL, /* invalid entry */
183#ifndef __AMD64__
184 &vmmR3Switcher32BitTo32Bit_Def,
185 &vmmR3Switcher32BitToPAE_Def,
186 NULL, //&vmmR3Switcher32BitToAMD64_Def,
187 &vmmR3SwitcherPAETo32Bit_Def,
188 &vmmR3SwitcherPAEToPAE_Def,
189 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
190# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
191 &vmmR3SwitcherAMD64ToPAE_Def,
192# else
193 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
194# endif
195 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
196#else
197 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
198 NULL, //&vmmR3Switcher32BitToPAE_Def,
199 NULL, //&vmmR3Switcher32BitToAMD64_Def,
200 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
201 NULL, //&vmmR3SwitcherPAEToPAE_Def,
202 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
203 &vmmR3SwitcherAMD64ToPAE_Def,
204 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
205#endif
206};
207
208
209
210/**
211 * Initiates the core code.
212 *
213 * This is core per VM code which might need fixups and/or for ease of use
214 * are put on linear contiguous backing.
215 *
216 * @returns VBox status code.
217 * @param pVM Pointer to VM structure.
218 */
219static int vmmR3InitCoreCode(PVM pVM)
220{
221 /*
222 * Calc the size.
223 */
224 unsigned cbCoreCode = 0;
225 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
226 {
227 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
228 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
229 if (pSwitcher)
230 {
231 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
232 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
233 }
234 }
235
236 /*
237 * Allocate continguous pages for switchers and deal with
238 * conflicts in the intermediate mapping of the code.
239 */
240 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
241 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
242 int rc = VERR_NO_MEMORY;
243 if (pVM->vmm.s.pvHCCoreCodeR3)
244 {
245 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
246 if (rc == VERR_PGM_MAPPINGS_FIX_CONFLICT)
247 {
248 /* try more allocations. */
249 struct
250 {
251 RTR0PTR pvR0;
252 void *pvR3;
253 RTHCPHYS HCPhys;
254 RTUINT cb;
255 } aBadTries[16];
256 unsigned i = 0;
257 do
258 {
259 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
260 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
261 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
262 i++;
263 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
264 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
265 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
266 if (!pVM->vmm.s.pvHCCoreCodeR3)
267 break;
268 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
269 } while ( rc == VERR_PGM_MAPPINGS_FIX_CONFLICT
270 && i < ELEMENTS(aBadTries) - 1);
271
272 /* cleanup */
273 if (VBOX_FAILURE(rc))
274 {
275 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
276 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
277 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
278 aBadTries[i].cb = pVM->vmm.s.cbCoreCode;
279 i++;
280 LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc));
281 }
282 while (i-- > 0)
283 {
284 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n",
285 i, aBadTries[i].pvR3, aBadTries[i].pvR0, aBadTries[i].HCPhys));
286 SUPContFree(aBadTries[i].pvR3, aBadTries[i].cb >> PAGE_SHIFT);
287 }
288 }
289 }
290 if (VBOX_SUCCESS(rc))
291 {
292 /*
293 * copy the code.
294 */
295 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
296 {
297 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
298 if (pSwitcher)
299 memcpy((uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
300 pSwitcher->pvCode, pSwitcher->cbCode);
301 }
302
303 /*
304 * Map the code into the GC address space.
305 */
306 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &pVM->vmm.s.pvGCCoreCode);
307 if (VBOX_SUCCESS(rc))
308 {
309 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
310 LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VGv Phys=%VHp cb=%#x\n",
311 pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
312
313 /*
314 * Finally, PGM probably have selected a switcher already but we need
315 * to do get the addresses so we'll reselect it.
316 * This may legally fail so, we're ignoring the rc.
317 */
318 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
319 return rc;
320 }
321
322 /* shit */
323 AssertMsgFailed(("PGMR3Map(,%VGv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
324 SUPContFree(pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
325 }
326 else
327 VMSetError(pVM, rc, RT_SRC_POS,
328 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code."),
329 cbCoreCode);
330
331 pVM->vmm.s.pvHCCoreCodeR3 = NULL;
332 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
333 pVM->vmm.s.pvGCCoreCode = 0;
334 return rc;
335}
336
337
338/**
339 * Initializes the VMM.
340 *
341 * @returns VBox status code.
342 * @param pVM The VM to operate on.
343 */
344VMMR3DECL(int) VMMR3Init(PVM pVM)
345{
346 LogFlow(("VMMR3Init\n"));
347
348 /*
349 * Assert alignment, sizes and order.
350 */
351 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
352 AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s),
353 ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n",
354 sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s)));
355
356 /*
357 * Init basic VM VMM members.
358 */
359 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
360 int rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies);
361 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
362 pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */
363 //pVM->vmm.s.cYieldEveryMillies = 8; //debugging
364 else
365 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Vrc\n", rc), rc);
366
367 /* GC switchers are enabled by default. Turned off by HWACCM. */
368 pVM->vmm.s.fSwitcherDisabled = false;
369
370 /*
371 * Register the saved state data unit.
372 */
373 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
374 NULL, vmmR3Save, NULL,
375 NULL, vmmR3Load, NULL);
376 if (VBOX_FAILURE(rc))
377 return rc;
378
379#ifdef VBOX_WITHOUT_IDT_PATCHING
380 /*
381 * Register the Ring-0 VM handle with the session for fast ioctl calls.
382 */
383 rc = SUPSetVMForFastIOCtl(pVM->pVMR0);
384 if (VBOX_FAILURE(rc))
385 return rc;
386#endif
387
388 /*
389 * Init core code.
390 */
391 rc = vmmR3InitCoreCode(pVM);
392 if (VBOX_SUCCESS(rc))
393 {
394 /*
395 * Allocate & init VMM GC stack.
396 * The stack pages are also used by the VMM R0 when VMMR0CallHost is invoked.
397 * (The page protection is modifed during R3 init completion.)
398 */
399#ifdef VBOX_STRICT_VMM_STACK
400 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
401#else
402 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
403#endif
404 if (VBOX_SUCCESS(rc))
405 {
406 /* Set HC and GC stack pointers to top of stack. */
407 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = (RTR0PTR)pVM->vmm.s.pbHCStack;
408 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
409 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
410 AssertRelease(pVM->vmm.s.pbGCStack);
411
412 /* Set hypervisor eip. */
413 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStack);
414
415 /*
416 * Allocate GC & R0 Logger instances (they are finalized in the relocator).
417 */
418#ifdef LOG_ENABLED
419 PRTLOGGER pLogger = RTLogDefaultInstance();
420 if (pLogger)
421 {
422 pVM->vmm.s.cbLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pLogger->cGroups]);
423 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pLoggerHC);
424 if (VBOX_SUCCESS(rc))
425 {
426 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
427
428/*
429 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup), so
430 * you have to sign up here by adding your defined(DEBUG_<userid>) to the #if.
431 *
432 * If you want to log in non-debug modes, you'll have to remember to change SUPDRvShared.c
433 * to not stub all the log functions.
434 */
435# ifdef DEBUG_sandervl
436 rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]),
437 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0Logger);
438 if (VBOX_SUCCESS(rc))
439 {
440 pVM->vmm.s.pR0Logger->pVM = pVM;
441 //pVM->vmm.s.pR0Logger->fCreated = false;
442 pVM->vmm.s.pR0Logger->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]);
443 }
444# endif
445 }
446 }
447#endif /* LOG_ENABLED */
448
449#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
450 /*
451 * Allocate GC Release Logger instances (finalized in the relocator).
452 */
453 if (VBOX_SUCCESS(rc))
454 {
455 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
456 if (pRelLogger)
457 {
458 pVM->vmm.s.cbRelLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pRelLogger->cGroups]);
459 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRelLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRelLoggerHC);
460 if (VBOX_SUCCESS(rc))
461 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
462 }
463 }
464#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
465
466#ifdef VBOX_WITH_NMI
467 /*
468 * Allocate mapping for the host APIC.
469 */
470 if (VBOX_SUCCESS(rc))
471 {
472 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
473 AssertRC(rc);
474 }
475#endif
476 if (VBOX_SUCCESS(rc))
477 {
478 rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock);
479 if (VBOX_SUCCESS(rc))
480 {
481 /*
482 * Statistics.
483 */
484 STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches.");
485 STAM_REG(pVM, &pVM->vmm.s.StatGCRetNormal, STAMTYPE_COUNTER, "/VMM/GCRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
486 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterrupt, STAMTYPE_COUNTER, "/VMM/GCRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
487 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
488 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGuestTrap, STAMTYPE_COUNTER, "/VMM/GCRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
489 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitch, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
490 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
491 STAM_REG(pVM, &pVM->vmm.s.StatGCRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/GCRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");
492 STAM_REG(pVM, &pVM->vmm.s.StatGCRetStaleSelector, STAMTYPE_COUNTER, "/VMM/GCRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
493 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIRETTrap, STAMTYPE_COUNTER, "/VMM/GCRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
494 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
495 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
496 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIORead, STAMTYPE_COUNTER, "/VMM/GCRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns.");
497 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns.");
498 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIORead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns.");
499 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns.");
500 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns.");
501 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
502 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
503 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
504 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
505 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
506 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTSSFault, STAMTYPE_COUNTER, "/VMM/GCRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
507 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDFault, STAMTYPE_COUNTER, "/VMM/GCRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
508 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCSAMTask, STAMTYPE_COUNTER, "/VMM/GCRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
509 STAM_REG(pVM, &pVM->vmm.s.StatGCRetSyncCR3, STAMTYPE_COUNTER, "/VMM/GCRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
510 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMisc, STAMTYPE_COUNTER, "/VMM/GCRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
511 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchInt3, STAMTYPE_COUNTER, "/VMM/GCRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
512 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchPF, STAMTYPE_COUNTER, "/VMM/GCRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
513 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchGP, STAMTYPE_COUNTER, "/VMM/GCRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
514 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/GCRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
515 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPageOverflow, STAMTYPE_COUNTER, "/VMM/GCRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns.");
516 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/GCRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
517 STAM_REG(pVM, &pVM->vmm.s.StatGCRetToR3, STAMTYPE_COUNTER, "/VMM/GCRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
518 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTimerPending, STAMTYPE_COUNTER, "/VMM/GCRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
519 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptPending, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
520 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCallHost, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/Misc", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
521 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMGrowRAM, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/GrowRAM", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
522 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PDMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
523 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLogFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/LogFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
524 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMQueueFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/QueueFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
525 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMPoolGrow",STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
526 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRemReplay, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/REMReplay", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
527 STAM_REG(pVM, &pVM->vmm.s.StatGCRetVMSetError, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/VMSetError", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
528 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
529 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/GCRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
530 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/GCRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
531 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulHlt, STAMTYPE_COUNTER, "/VMM/GCRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");
532 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPendingRequest, STAMTYPE_COUNTER, "/VMM/GCRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
533
534 return VINF_SUCCESS;
535 }
536 AssertRC(rc);
537 }
538 }
539 /** @todo: Need failure cleanup. */
540
541 //more todo in here?
542 //if (VBOX_SUCCESS(rc))
543 //{
544 //}
545 //int rc2 = vmmR3TermCoreCode(pVM);
546 //AssertRC(rc2));
547 }
548
549 return rc;
550}
551
552
553/**
554 * Ring-3 init finalizing.
555 *
556 * @returns VBox status code.
557 * @param pVM The VM handle.
558 */
559VMMR3DECL(int) VMMR3InitFinalize(PVM pVM)
560{
561#ifdef VBOX_STRICT_VMM_STACK
562 /*
563 * Two inaccessible pages at each sides of the stack to catch over/under-flows.
564 */
565 memset(pVM->vmm.s.pbHCStack - PAGE_SIZE, 0xcc, PAGE_SIZE);
566 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack - PAGE_SIZE), PAGE_SIZE, 0);
567 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
568
569 memset(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
570 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack + VMM_STACK_SIZE), PAGE_SIZE, 0);
571 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
572#endif
573
574 /*
575 * Set page attributes to r/w for stack pages.
576 */
577 int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbGCStack, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
578 AssertRC(rc);
579 if (VBOX_SUCCESS(rc))
580 {
581 /*
582 * Create the EMT yield timer.
583 */
584 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
585 if (VBOX_SUCCESS(rc))
586 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
587 }
588#ifdef VBOX_WITH_NMI
589 /*
590 * Map the host APIC into GC - This may be host os specific!
591 */
592 if (VBOX_SUCCESS(rc))
593 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
594 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
595#endif
596 return rc;
597}
598
599
600/**
601 * Initializes the R0 VMM.
602 *
603 * @returns VBox status code.
604 * @param pVM The VM to operate on.
605 */
606VMMR3DECL(int) VMMR3InitR0(PVM pVM)
607{
608 int rc;
609
610 /*
611 * Initialize the ring-0 logger if we haven't done so yet.
612 */
613 if ( pVM->vmm.s.pR0Logger
614 && !pVM->vmm.s.pR0Logger->fCreated)
615 {
616 rc = VMMR3UpdateLoggers(pVM);
617 if (VBOX_FAILURE(rc))
618 return rc;
619 }
620
621 /*
622 * Call Ring-0 entry with init code.
623 */
624 for (;;)
625 {
626#ifdef NO_SUPCALLR0VMM
627 //rc = VERR_GENERAL_FAILURE;
628 rc = VINF_SUCCESS;
629#else
630 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, (void *)VBOX_VERSION);
631#endif
632 if ( pVM->vmm.s.pR0Logger
633 && pVM->vmm.s.pR0Logger->Logger.offScratch > 0)
634 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL);
635 if (rc != VINF_VMM_CALL_HOST)
636 break;
637 rc = vmmR3ServiceCallHostRequest(pVM);
638 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
639 break;
640 break; // remove this when we do setjmp for all ring-0 stuff.
641 }
642
643 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
644 {
645 LogRel(("R0 init failed, rc=%Vra\n", rc));
646 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
647 rc = VERR_INTERNAL_ERROR;
648 }
649 return rc;
650}
651
652
653/**
654 * Initializes the GC VMM.
655 *
656 * @returns VBox status code.
657 * @param pVM The VM to operate on.
658 */
659VMMR3DECL(int) VMMR3InitGC(PVM pVM)
660{
661 /* In VMX mode, there's no need to init GC. */
662 if (pVM->vmm.s.fSwitcherDisabled)
663 return VINF_SUCCESS;
664
665 /*
666 * Call VMMGCInit():
667 * -# resolve the address.
668 * -# setup stackframe and EIP to use the trampoline.
669 * -# do a generic hypervisor call.
670 */
671 RTGCPTR GCPtrEP;
672 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
673 if (VBOX_SUCCESS(rc))
674 {
675 CPUMHyperSetCtxCore(pVM, NULL);
676 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
677 uint64_t u64TS = RTTimeProgramStartNanoTS();
678#if GC_ARCH_BITS == 32
679 CPUMPushHyper(pVM, (uint32_t)(u64TS >> 32)); /* Param 3: The program startup TS - Hi. */
680 CPUMPushHyper(pVM, (uint32_t)u64TS); /* Param 3: The program startup TS - Lo. */
681#else /* 64-bit GC */
682 CPUMPushHyper(pVM, u64TS); /* Param 3: The program startup TS. */
683#endif
684 CPUMPushHyper(pVM, VBOX_VERSION); /* Param 2: Version argument. */
685 CPUMPushHyper(pVM, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
686 CPUMPushHyper(pVM, pVM->pVMGC); /* Param 0: pVM */
687 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* trampoline param: stacksize. */
688 CPUMPushHyper(pVM, GCPtrEP); /* Call EIP. */
689 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
690
691 for (;;)
692 {
693#ifdef NO_SUPCALLR0VMM
694 //rc = VERR_GENERAL_FAILURE;
695 rc = VINF_SUCCESS;
696#else
697 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_CALL_HYPERVISOR, NULL);
698#endif
699#ifdef LOG_ENABLED
700 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
701 if ( pLogger
702 && pLogger->offScratch > 0)
703 RTLogFlushGC(NULL, pLogger);
704#endif
705#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
706 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
707 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
708 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
709#endif
710 if (rc != VINF_VMM_CALL_HOST)
711 break;
712 rc = vmmR3ServiceCallHostRequest(pVM);
713 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
714 break;
715 }
716
717 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
718 {
719 VMMR3FatalDump(pVM, rc);
720 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
721 rc = VERR_INTERNAL_ERROR;
722 }
723 AssertRC(rc);
724 }
725 return rc;
726}
727
728
729/**
730 * Terminate the VMM bits.
731 *
732 * @returns VINF_SUCCESS.
733 * @param pVM The VM handle.
734 */
735VMMR3DECL(int) VMMR3Term(PVM pVM)
736{
737 /** @todo must call ring-0 so the logger thread instance can be properly removed. */
738
739#ifdef VBOX_STRICT_VMM_STACK
740 /*
741 * Make the two stack guard pages present again.
742 */
743 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
744 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
745#endif
746 return VINF_SUCCESS;
747}
748
749
750/**
751 * Applies relocations to data and code managed by this
752 * component. This function will be called at init and
753 * whenever the VMM need to relocate it self inside the GC.
754 *
755 * The VMM will need to apply relocations to the core code.
756 *
757 * @param pVM The VM handle.
758 * @param offDelta The relocation delta.
759 */
760VMMR3DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
761{
762 LogFlow(("VMMR3Relocate: offDelta=%VGv\n", offDelta));
763
764 /*
765 * Recalc the GC address.
766 */
767 pVM->vmm.s.pvGCCoreCode = MMHyperHC2GC(pVM, pVM->vmm.s.pvHCCoreCodeR3);
768
769 /*
770 * The stack.
771 */
772 CPUMSetHyperESP(pVM, CPUMGetHyperESP(pVM) + offDelta);
773 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
774 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
775
776 /*
777 * All the switchers.
778 */
779 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
780 {
781 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
782 if (pSwitcher && pSwitcher->pfnRelocate)
783 {
784 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
785 pSwitcher->pfnRelocate(pVM,
786 pSwitcher,
787 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR0 + off,
788 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + off,
789 pVM->vmm.s.pvGCCoreCode + off,
790 pVM->vmm.s.HCPhysCoreCode + off);
791 }
792 }
793
794 /*
795 * Recalc the GC address for the current switcher.
796 */
797 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
798 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
799 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
800 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
801 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
802 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
803 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
804
805 /*
806 * Get other GC entry points.
807 */
808 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMGCResumeGuest);
809 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Vra\n", rc));
810
811 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMGCResumeGuestV86);
812 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Vra\n", rc));
813
814 /*
815 * Update the logger.
816 */
817 VMMR3UpdateLoggers(pVM);
818}
819
820
821/**
822 * Updates the settings for the GC and R0 loggers.
823 *
824 * @returns VBox status code.
825 * @param pVM The VM handle.
826 */
827VMMR3DECL(int) VMMR3UpdateLoggers(PVM pVM)
828{
829 /*
830 * Simply clone the logger instance (for GC).
831 */
832 int rc = VINF_SUCCESS;
833 RTGCPTR GCPtrLoggerFlush = 0;
834
835 if (pVM->vmm.s.pLoggerHC
836#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
837 || pVM->vmm.s.pRelLoggerHC
838#endif
839 )
840 {
841 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &GCPtrLoggerFlush);
842 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Vra\n", rc));
843 }
844
845 if (pVM->vmm.s.pLoggerHC)
846 {
847 RTGCPTR GCPtrLoggerWrapper = 0;
848 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &GCPtrLoggerWrapper);
849 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Vra\n", rc));
850 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
851 rc = RTLogCloneGC(NULL /* default */, pVM->vmm.s.pLoggerHC, pVM->vmm.s.cbLoggerGC,
852 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
853 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
854 }
855
856#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
857 if (pVM->vmm.s.pRelLoggerHC)
858 {
859 RTGCPTR GCPtrLoggerWrapper = 0;
860 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &GCPtrLoggerWrapper);
861 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Vra\n", rc));
862 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
863 rc = RTLogCloneGC(RTLogRelDefaultInstance(), pVM->vmm.s.pRelLoggerHC, pVM->vmm.s.cbRelLoggerGC,
864 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
865 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
866 }
867#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
868
869 /*
870 * For the ring-0 EMT logger, we use a per-thread logger
871 * instance in ring-0. Only initialize it once.
872 */
873 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
874 if (pR0Logger)
875 {
876 if (!pR0Logger->fCreated)
877 {
878 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
879 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
880 AssertReleaseMsgRCReturn(rc, ("VMMLoggerWrapper not found! rc=%Vra\n", rc), rc);
881
882 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
883 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
884 AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Vra\n", rc), rc);
885
886 rc = RTLogCreateForR0(&pR0Logger->Logger, pR0Logger->cbLogger,
887 *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush,
888 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
889 AssertReleaseMsgRCReturn(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc), rc);
890 pR0Logger->fCreated = true;
891 }
892
893 rc = RTLogCopyGroupsAndFlags(&pR0Logger->Logger, NULL /* default */, RTLOGFLAGS_BUFFERED, 0);
894 AssertRC(rc);
895 }
896
897 return rc;
898}
899
900
901/**
902 * Generic switch code relocator.
903 *
904 * @param pVM The VM handle.
905 * @param pSwitcher The switcher definition.
906 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
907 * @param pu8CodeR0 Pointer to the core code block for the switcher, ring-0 mapping.
908 * @param GCPtrCode The guest context address corresponding to pu8Code.
909 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
910 * @param SelCS The hypervisor CS selector.
911 * @param SelDS The hypervisor DS selector.
912 * @param SelTSS The hypervisor TSS selector.
913 * @param GCPtrGDT The GC address of the hypervisor GDT.
914 * @param SelCS64 The 64-bit mode hypervisor CS selector.
915 */
916static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
917 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
918{
919 union
920 {
921 const uint8_t *pu8;
922 const uint16_t *pu16;
923 const uint32_t *pu32;
924 const uint64_t *pu64;
925 const void *pv;
926 uintptr_t u;
927 } u;
928 u.pv = pSwitcher->pvFixups;
929
930 /*
931 * Process fixups.
932 */
933 uint8_t u8;
934 while ((u8 = *u.pu8++) != FIX_THE_END)
935 {
936 /*
937 * Get the source (where to write the fixup).
938 */
939 uint32_t offSrc = *u.pu32++;
940 Assert(offSrc < pSwitcher->cbCode);
941 union
942 {
943 uint8_t *pu8;
944 uint16_t *pu16;
945 uint32_t *pu32;
946 uint64_t *pu64;
947 uintptr_t u;
948 } uSrc;
949 uSrc.pu8 = pu8CodeR3 + offSrc;
950
951 /* The fixup target and method depends on the type. */
952 switch (u8)
953 {
954 /*
955 * 32-bit relative, source in HC and target in GC.
956 */
957 case FIX_HC_2_GC_NEAR_REL:
958 {
959 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
960 uint32_t offTrg = *u.pu32++;
961 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
962 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
963 break;
964 }
965
966 /*
967 * 32-bit relative, source in HC and target in ID.
968 */
969 case FIX_HC_2_ID_NEAR_REL:
970 {
971 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
972 uint32_t offTrg = *u.pu32++;
973 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
974 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (uSrc.u + 4));
975 break;
976 }
977
978 /*
979 * 32-bit relative, source in GC and target in HC.
980 */
981 case FIX_GC_2_HC_NEAR_REL:
982 {
983 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
984 uint32_t offTrg = *u.pu32++;
985 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
986 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (GCPtrCode + offSrc + 4));
987 break;
988 }
989
990 /*
991 * 32-bit relative, source in GC and target in ID.
992 */
993 case FIX_GC_2_ID_NEAR_REL:
994 {
995 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
996 uint32_t offTrg = *u.pu32++;
997 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
998 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
999 break;
1000 }
1001
1002 /*
1003 * 32-bit relative, source in ID and target in HC.
1004 */
1005 case FIX_ID_2_HC_NEAR_REL:
1006 {
1007 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1008 uint32_t offTrg = *u.pu32++;
1009 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1010 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (u32IDCode + offSrc + 4));
1011 break;
1012 }
1013
1014 /*
1015 * 32-bit relative, source in ID and target in HC.
1016 */
1017 case FIX_ID_2_GC_NEAR_REL:
1018 {
1019 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1020 uint32_t offTrg = *u.pu32++;
1021 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1022 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
1023 break;
1024 }
1025
1026 /*
1027 * 16:32 far jump, target in GC.
1028 */
1029 case FIX_GC_FAR32:
1030 {
1031 uint32_t offTrg = *u.pu32++;
1032 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1033 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
1034 *uSrc.pu16++ = SelCS;
1035 break;
1036 }
1037
1038 /*
1039 * Make 32-bit GC pointer given CPUM offset.
1040 */
1041 case FIX_GC_CPUM_OFF:
1042 {
1043 uint32_t offCPUM = *u.pu32++;
1044 Assert(offCPUM < sizeof(pVM->cpum));
1045 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM);
1046 break;
1047 }
1048
1049 /*
1050 * Make 32-bit GC pointer given VM offset.
1051 */
1052 case FIX_GC_VM_OFF:
1053 {
1054 uint32_t offVM = *u.pu32++;
1055 Assert(offVM < sizeof(VM));
1056 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM);
1057 break;
1058 }
1059
1060 /*
1061 * Make 32-bit HC pointer given CPUM offset.
1062 */
1063 case FIX_HC_CPUM_OFF:
1064 {
1065 uint32_t offCPUM = *u.pu32++;
1066 Assert(offCPUM < sizeof(pVM->cpum));
1067 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
1068 break;
1069 }
1070
1071 /*
1072 * Make 32-bit R0 pointer given VM offset.
1073 */
1074 case FIX_HC_VM_OFF:
1075 {
1076 uint32_t offVM = *u.pu32++;
1077 Assert(offVM < sizeof(VM));
1078 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
1079 break;
1080 }
1081
1082 /*
1083 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
1084 */
1085 case FIX_INTER_32BIT_CR3:
1086 {
1087
1088 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
1089 break;
1090 }
1091
1092 /*
1093 * Store the PAE CR3 (32-bit) for the intermediate memory context.
1094 */
1095 case FIX_INTER_PAE_CR3:
1096 {
1097
1098 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
1099 break;
1100 }
1101
1102 /*
1103 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
1104 */
1105 case FIX_INTER_AMD64_CR3:
1106 {
1107
1108 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
1109 break;
1110 }
1111
1112 /*
1113 * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
1114 */
1115 case FIX_HYPER_32BIT_CR3:
1116 {
1117
1118 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
1119 break;
1120 }
1121
1122 /*
1123 * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
1124 */
1125 case FIX_HYPER_PAE_CR3:
1126 {
1127
1128 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
1129 break;
1130 }
1131
1132 /*
1133 * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
1134 */
1135 case FIX_HYPER_AMD64_CR3:
1136 {
1137
1138 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
1139 break;
1140 }
1141
1142 /*
1143 * Store Hypervisor CS (16-bit).
1144 */
1145 case FIX_HYPER_CS:
1146 {
1147 *uSrc.pu16 = SelCS;
1148 break;
1149 }
1150
1151 /*
1152 * Store Hypervisor DS (16-bit).
1153 */
1154 case FIX_HYPER_DS:
1155 {
1156 *uSrc.pu16 = SelDS;
1157 break;
1158 }
1159
1160 /*
1161 * Store Hypervisor TSS (16-bit).
1162 */
1163 case FIX_HYPER_TSS:
1164 {
1165 *uSrc.pu16 = SelTSS;
1166 break;
1167 }
1168
1169 /*
1170 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
1171 */
1172 case FIX_GC_TSS_GDTE_DW2:
1173 {
1174 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
1175 *uSrc.pu32 = (uint32_t)GCPtr;
1176 break;
1177 }
1178
1179
1180 ///@todo case FIX_CR4_MASK:
1181 ///@todo case FIX_CR4_OSFSXR:
1182
1183 /*
1184 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
1185 */
1186 case FIX_NO_FXSAVE_JMP:
1187 {
1188 uint32_t offTrg = *u.pu32++;
1189 Assert(offTrg < pSwitcher->cbCode);
1190 if (!CPUMSupportsFXSR(pVM))
1191 {
1192 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1193 *uSrc.pu32++ = offTrg - (offSrc + 5);
1194 }
1195 else
1196 {
1197 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1198 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1199 }
1200 break;
1201 }
1202
1203 /*
1204 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1205 */
1206 case FIX_NO_SYSENTER_JMP:
1207 {
1208 uint32_t offTrg = *u.pu32++;
1209 Assert(offTrg < pSwitcher->cbCode);
1210 if (!CPUMIsHostUsingSysEnter(pVM))
1211 {
1212 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1213 *uSrc.pu32++ = offTrg - (offSrc + 5);
1214 }
1215 else
1216 {
1217 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1218 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1219 }
1220 break;
1221 }
1222
1223 /*
1224 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1225 */
1226 case FIX_NO_SYSCALL_JMP:
1227 {
1228 uint32_t offTrg = *u.pu32++;
1229 Assert(offTrg < pSwitcher->cbCode);
1230 if (!CPUMIsHostUsingSysEnter(pVM))
1231 {
1232 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1233 *uSrc.pu32++ = offTrg - (offSrc + 5);
1234 }
1235 else
1236 {
1237 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1238 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1239 }
1240 break;
1241 }
1242
1243 /*
1244 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1245 */
1246 case FIX_HC_32BIT:
1247 {
1248 uint32_t offTrg = *u.pu32++;
1249 Assert(offSrc < pSwitcher->cbCode);
1250 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1251 *uSrc.pu32 = (uintptr_t)pu8CodeR0 + offTrg;
1252 break;
1253 }
1254
1255#if defined(__AMD64__) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1256 /*
1257 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1258 */
1259 case FIX_HC_64BIT:
1260 {
1261 uint32_t offTrg = *u.pu32++;
1262 Assert(offSrc < pSwitcher->cbCode);
1263 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1264 *uSrc.pu64 = (uintptr_t)pu8CodeR0 + offTrg;
1265 break;
1266 }
1267
1268 /*
1269 * 64-bit HC Code Selector (no argument).
1270 */
1271 case FIX_HC_64BIT_CS:
1272 {
1273 Assert(offSrc < pSwitcher->cbCode);
1274#if defined(__DARWIN__) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1275 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
1276#else
1277 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
1278#endif
1279 break;
1280 }
1281
1282 /*
1283 * 64-bit HC pointer to the CPUM instance data (no argument).
1284 */
1285 case FIX_HC_64BIT_CPUM:
1286 {
1287 Assert(offSrc < pSwitcher->cbCode);
1288 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
1289 break;
1290 }
1291#endif
1292
1293 /*
1294 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
1295 */
1296 case FIX_ID_32BIT:
1297 {
1298 uint32_t offTrg = *u.pu32++;
1299 Assert(offSrc < pSwitcher->cbCode);
1300 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1301 *uSrc.pu32 = u32IDCode + offTrg;
1302 break;
1303 }
1304
1305 /*
1306 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
1307 */
1308 case FIX_ID_64BIT:
1309 {
1310 uint32_t offTrg = *u.pu32++;
1311 Assert(offSrc < pSwitcher->cbCode);
1312 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1313 *uSrc.pu64 = u32IDCode + offTrg;
1314 break;
1315 }
1316
1317 /*
1318 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
1319 */
1320 case FIX_ID_FAR32_TO_64BIT_MODE:
1321 {
1322 uint32_t offTrg = *u.pu32++;
1323 Assert(offSrc < pSwitcher->cbCode);
1324 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1325 *uSrc.pu32++ = u32IDCode + offTrg;
1326 *uSrc.pu16 = SelCS64;
1327 AssertRelease(SelCS64);
1328 break;
1329 }
1330
1331#ifdef VBOX_WITH_NMI
1332 /*
1333 * 32-bit address to the APIC base.
1334 */
1335 case FIX_GC_APIC_BASE_32BIT:
1336 {
1337 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
1338 break;
1339 }
1340#endif
1341
1342 default:
1343 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
1344 break;
1345 }
1346 }
1347
1348#ifdef LOG_ENABLED
1349 /*
1350 * If Log2 is enabled disassemble the switcher code.
1351 *
1352 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
1353 */
1354 if (LogIs2Enabled())
1355 {
1356 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
1357 " pu8CodeR0 = %p\n"
1358 " pu8CodeR3 = %p\n"
1359 " GCPtrCode = %VGv\n"
1360 " u32IDCode = %08x\n"
1361 " pVMGC = %VGv\n"
1362 " pCPUMGC = %VGv\n"
1363 " pVMHC = %p\n"
1364 " pCPUMHC = %p\n"
1365 " GCPtrGDT = %VGv\n"
1366 " InterCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1367 " HyperCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1368 " SelCS = %04x\n"
1369 " SelDS = %04x\n"
1370 " SelCS64 = %04x\n"
1371 " SelTSS = %04x\n",
1372 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
1373 pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM),
1374 VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum,
1375 GCPtrGDT,
1376 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
1377 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
1378 SelCS, SelDS, SelCS64, SelTSS);
1379
1380 uint32_t offCode = 0;
1381 while (offCode < pSwitcher->cbCode)
1382 {
1383 /*
1384 * Figure out where this is.
1385 */
1386 const char *pszDesc = NULL;
1387 RTUINTPTR uBase;
1388 uint32_t cbCode;
1389 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
1390 {
1391 pszDesc = "HCCode0";
1392 uBase = (RTUINTPTR)pu8CodeR0;
1393 offCode = pSwitcher->offHCCode0;
1394 cbCode = pSwitcher->cbHCCode0;
1395 }
1396 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
1397 {
1398 pszDesc = "HCCode1";
1399 uBase = (RTUINTPTR)pu8CodeR0;
1400 offCode = pSwitcher->offHCCode1;
1401 cbCode = pSwitcher->cbHCCode1;
1402 }
1403 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
1404 {
1405 pszDesc = "GCCode";
1406 uBase = GCPtrCode;
1407 offCode = pSwitcher->offGCCode;
1408 cbCode = pSwitcher->cbGCCode;
1409 }
1410 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
1411 {
1412 pszDesc = "IDCode0";
1413 uBase = u32IDCode;
1414 offCode = pSwitcher->offIDCode0;
1415 cbCode = pSwitcher->cbIDCode0;
1416 }
1417 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
1418 {
1419 pszDesc = "IDCode1";
1420 uBase = u32IDCode;
1421 offCode = pSwitcher->offIDCode1;
1422 cbCode = pSwitcher->cbIDCode1;
1423 }
1424 else
1425 {
1426 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
1427 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1428 offCode++;
1429 continue;
1430 }
1431
1432 /*
1433 * Disassemble it.
1434 */
1435 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
1436 DISCPUSTATE Cpu = {0};
1437 Cpu.mode = CPUMODE_32BIT;
1438 while (cbCode > 0)
1439 {
1440 /* try label it */
1441 if (pSwitcher->offR0HostToGuest == offCode)
1442 RTLogPrintf(" *R0HostToGuest:\n");
1443 if (pSwitcher->offGCGuestToHost == offCode)
1444 RTLogPrintf(" *GCGuestToHost:\n");
1445 if (pSwitcher->offGCCallTrampoline == offCode)
1446 RTLogPrintf(" *GCCallTrampoline:\n");
1447 if (pSwitcher->offGCGuestToHostAsm == offCode)
1448 RTLogPrintf(" *GCGuestToHostAsm:\n");
1449 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
1450 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
1451 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
1452 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
1453
1454 /* disas */
1455 uint32_t cbInstr = 0;
1456 char szDisas[256];
1457 if (DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas))
1458 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
1459 else
1460 {
1461 RTLogPrintf(" %04x: %02x '%c'\n",
1462 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1463 cbInstr = 1;
1464 }
1465 offCode += cbInstr;
1466 cbCode -= RT_MIN(cbInstr, cbCode);
1467 }
1468 }
1469 }
1470#endif
1471}
1472
1473
1474/**
1475 * Relocator for the 32-Bit to 32-Bit world switcher.
1476 */
1477DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1478{
1479 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1480 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1481}
1482
1483
1484/**
1485 * Relocator for the 32-Bit to PAE world switcher.
1486 */
1487DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1488{
1489 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1490 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1491}
1492
1493
1494/**
1495 * Relocator for the PAE to 32-Bit world switcher.
1496 */
1497DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1498{
1499 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1500 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1501}
1502
1503
1504/**
1505 * Relocator for the PAE to PAE world switcher.
1506 */
1507DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1508{
1509 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1510 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1511}
1512
1513
1514/**
1515 * Relocator for the AMD64 to PAE world switcher.
1516 */
1517DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1518{
1519 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1520 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1521}
1522
1523
1524/**
1525 * Gets the pointer to g_szRTAssertMsg1 in GC.
1526 * @returns Pointer to VMMGC::g_szRTAssertMsg1.
1527 * Returns NULL if not present.
1528 * @param pVM The VM handle.
1529 */
1530VMMR3DECL(const char *) VMMR3GetGCAssertMsg1(PVM pVM)
1531{
1532 RTGCPTR GCPtr;
1533 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg1", &GCPtr);
1534 if (VBOX_SUCCESS(rc))
1535 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1536 return NULL;
1537}
1538
1539
1540/**
1541 * Gets the pointer to g_szRTAssertMsg2 in GC.
1542 * @returns Pointer to VMMGC::g_szRTAssertMsg2.
1543 * Returns NULL if not present.
1544 * @param pVM The VM handle.
1545 */
1546VMMR3DECL(const char *) VMMR3GetGCAssertMsg2(PVM pVM)
1547{
1548 RTGCPTR GCPtr;
1549 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg2", &GCPtr);
1550 if (VBOX_SUCCESS(rc))
1551 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1552 return NULL;
1553}
1554
1555
1556/**
1557 * Execute state save operation.
1558 *
1559 * @returns VBox status code.
1560 * @param pVM VM Handle.
1561 * @param pSSM SSM operation handle.
1562 */
1563static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1564{
1565 LogFlow(("vmmR3Save:\n"));
1566
1567 /*
1568 * The hypervisor stack.
1569 */
1570 SSMR3PutGCPtr(pSSM, pVM->vmm.s.pbGCStackBottom);
1571 RTGCPTR GCPtrESP = CPUMGetHyperESP(pVM);
1572 Assert(pVM->vmm.s.pbGCStackBottom - GCPtrESP <= VMM_STACK_SIZE);
1573 SSMR3PutGCPtr(pSSM, GCPtrESP);
1574 SSMR3PutMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1575 return SSMR3PutU32(pSSM, ~0); /* terminator */
1576}
1577
1578
1579/**
1580 * Execute state load operation.
1581 *
1582 * @returns VBox status code.
1583 * @param pVM VM Handle.
1584 * @param pSSM SSM operation handle.
1585 * @param u32Version Data layout version.
1586 */
1587static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
1588{
1589 LogFlow(("vmmR3Load:\n"));
1590
1591 /*
1592 * Validate version.
1593 */
1594 if (u32Version != VMM_SAVED_STATE_VERSION)
1595 {
1596 Log(("vmmR3Load: Invalid version u32Version=%d!\n", u32Version));
1597 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1598 }
1599
1600 /*
1601 * Check that the stack is in the same place, or that it's fearly empty.
1602 */
1603 RTGCPTR GCPtrStackBottom;
1604 SSMR3GetGCPtr(pSSM, &GCPtrStackBottom);
1605 RTGCPTR GCPtrESP;
1606 int rc = SSMR3GetGCPtr(pSSM, &GCPtrESP);
1607 if (VBOX_FAILURE(rc))
1608 return rc;
1609 if ( GCPtrStackBottom == pVM->vmm.s.pbGCStackBottom
1610 || (GCPtrStackBottom - GCPtrESP < 32)) /** @todo This will break if we start preemting the hypervisor. */
1611 {
1612 /*
1613 * We *must* set the ESP because the CPUM load + PGM load relocations will render
1614 * the ESP in CPUM fatally invalid.
1615 */
1616 CPUMSetHyperESP(pVM, GCPtrESP);
1617
1618 /* restore the stack. */
1619 SSMR3GetMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1620
1621 /* terminator */
1622 uint32_t u32;
1623 rc = SSMR3GetU32(pSSM, &u32);
1624 if (VBOX_FAILURE(rc))
1625 return rc;
1626 if (u32 != ~0U)
1627 {
1628 AssertMsgFailed(("u32=%#x\n", u32));
1629 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1630 }
1631 return VINF_SUCCESS;
1632 }
1633
1634 LogRel(("The stack is not in the same place and it's not empty! GCPtrStackBottom=%VGv pbGCStackBottom=%VGv ESP=%VGv\n",
1635 GCPtrStackBottom, pVM->vmm.s.pbGCStackBottom, GCPtrESP));
1636 AssertFailed();
1637 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1638}
1639
1640
1641/**
1642 * Selects the switcher to be used for switching to GC.
1643 *
1644 * @returns VBox status code.
1645 * @param pVM VM handle.
1646 * @param enmSwitcher The new switcher.
1647 * @remark This function may be called before the VMM is initialized.
1648 */
1649VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1650{
1651 /*
1652 * Validate input.
1653 */
1654 if ( enmSwitcher < VMMSWITCHER_INVALID
1655 || enmSwitcher >= VMMSWITCHER_MAX)
1656 {
1657 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1658 return VERR_INVALID_PARAMETER;
1659 }
1660
1661 /*
1662 * Select the new switcher.
1663 */
1664 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
1665 if (pSwitcher)
1666 {
1667 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
1668 pVM->vmm.s.enmSwitcher = enmSwitcher;
1669
1670 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvHCCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvHCCoreCodeR0 type */
1671 pVM->vmm.s.pfnR0HostToGuest = pbCodeR0 + pSwitcher->offR0HostToGuest;
1672
1673 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1674 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
1675 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
1676 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
1677 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
1678 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
1679 return VINF_SUCCESS;
1680 }
1681 return VERR_NOT_IMPLEMENTED;
1682}
1683
1684/**
1685 * Disable the switcher logic permanently.
1686 *
1687 * @returns VBox status code.
1688 * @param pVM VM handle.
1689 */
1690VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
1691{
1692/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
1693 * @code
1694 * mov eax, VERR_INTERNAL_ERROR
1695 * ret
1696 * @endcode
1697 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
1698 */
1699 pVM->vmm.s.fSwitcherDisabled = true;
1700 return VINF_SUCCESS;
1701}
1702
1703
1704/**
1705 * Resolve a builtin GC symbol.
1706 * Called by PDM when loading or relocating GC modules.
1707 *
1708 * @returns VBox status
1709 * @param pVM VM Handle.
1710 * @param pszSymbol Symbol to resolv
1711 * @param pGCPtrValue Where to store the symbol value.
1712 * @remark This has to work before VMMR3Relocate() is called.
1713 */
1714VMMR3DECL(int) VMMR3GetImportGC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue)
1715{
1716 if (!strcmp(pszSymbol, "g_Logger"))
1717 {
1718 if (pVM->vmm.s.pLoggerHC)
1719 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
1720 *pGCPtrValue = pVM->vmm.s.pLoggerGC;
1721 }
1722 else if (!strcmp(pszSymbol, "g_RelLogger"))
1723 {
1724#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1725 if (pVM->vmm.s.pRelLoggerHC)
1726 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
1727 *pGCPtrValue = pVM->vmm.s.pRelLoggerGC;
1728#else
1729 *pGCPtrValue = NIL_RTGCPTR;
1730#endif
1731 }
1732 else
1733 return VERR_SYMBOL_NOT_FOUND;
1734 return VINF_SUCCESS;
1735}
1736
1737
1738/**
1739 * Suspends the the CPU yielder.
1740 *
1741 * @param pVM The VM handle.
1742 */
1743VMMR3DECL(void) VMMR3YieldSuspend(PVM pVM)
1744{
1745 if (!pVM->vmm.s.cYieldResumeMillies)
1746 {
1747 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1748 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1749 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1750 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1751 else
1752 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1753 TMTimerStop(pVM->vmm.s.pYieldTimer);
1754 }
1755 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1756}
1757
1758
1759/**
1760 * Stops the the CPU yielder.
1761 *
1762 * @param pVM The VM handle.
1763 */
1764VMMR3DECL(void) VMMR3YieldStop(PVM pVM)
1765{
1766 if (!pVM->vmm.s.cYieldResumeMillies)
1767 TMTimerStop(pVM->vmm.s.pYieldTimer);
1768 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1769 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1770}
1771
1772
1773/**
1774 * Resumes the CPU yielder when it has been a suspended or stopped.
1775 *
1776 * @param pVM The VM handle.
1777 */
1778VMMR3DECL(void) VMMR3YieldResume(PVM pVM)
1779{
1780 if (pVM->vmm.s.cYieldResumeMillies)
1781 {
1782 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1783 pVM->vmm.s.cYieldResumeMillies = 0;
1784 }
1785}
1786
1787
1788/**
1789 * Internal timer callback function.
1790 *
1791 * @param pVM The VM.
1792 * @param pTimer The timer handle.
1793 * @param pvUser User argument specified upon timer creation.
1794 */
1795static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1796{
1797 /*
1798 * This really needs some careful tuning. While we shouldn't be too gready since
1799 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1800 * because that'll cause us to stop up.
1801 *
1802 * The current logic is to use the default interval when there is no lag worth
1803 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1804 *
1805 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1806 * so the lag is up to date.)
1807 */
1808 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1809 if ( u64Lag < 50000000 /* 50ms */
1810 || ( u64Lag < 1000000000 /* 1s */
1811 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1812 )
1813 {
1814 uint64_t u64Elapsed = RTTimeNanoTS();
1815 pVM->vmm.s.u64LastYield = u64Elapsed;
1816
1817 RTThreadYield();
1818
1819#ifdef LOG_ENABLED
1820 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1821 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1822#endif
1823 }
1824 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1825}
1826
1827
1828/**
1829 * Acquire global VM lock.
1830 *
1831 * @returns VBox status code
1832 * @param pVM The VM to operate on.
1833 */
1834VMMR3DECL(int) VMMR3Lock(PVM pVM)
1835{
1836 return RTCritSectEnter(&pVM->vmm.s.CritSectVMLock);
1837}
1838
1839
1840/**
1841 * Release global VM lock.
1842 *
1843 * @returns VBox status code
1844 * @param pVM The VM to operate on.
1845 */
1846VMMR3DECL(int) VMMR3Unlock(PVM pVM)
1847{
1848 return RTCritSectLeave(&pVM->vmm.s.CritSectVMLock);
1849}
1850
1851
1852/**
1853 * Return global VM lock owner.
1854 *
1855 * @returns Thread id of owner.
1856 * @returns NIL_RTTHREAD if no owner.
1857 * @param pVM The VM to operate on.
1858 */
1859VMMR3DECL(RTNATIVETHREAD) VMMR3LockGetOwner(PVM pVM)
1860{
1861 return RTCritSectGetOwner(&pVM->vmm.s.CritSectVMLock);
1862}
1863
1864
1865/**
1866 * Checks if the current thread is the owner of the global VM lock.
1867 *
1868 * @returns true if owner.
1869 * @returns false if not owner.
1870 * @param pVM The VM to operate on.
1871 */
1872VMMR3DECL(bool) VMMR3LockIsOwner(PVM pVM)
1873{
1874 return RTCritSectIsOwner(&pVM->vmm.s.CritSectVMLock);
1875}
1876
1877
1878/**
1879 * Executes guest code.
1880 *
1881 * @param pVM VM handle.
1882 */
1883VMMR3DECL(int) VMMR3RawRunGC(PVM pVM)
1884{
1885 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1886
1887 /*
1888 * Set the EIP and ESP.
1889 */
1890 CPUMSetHyperEIP(pVM, CPUMGetGuestEFlags(pVM) & X86_EFL_VM
1891 ? pVM->vmm.s.pfnCPUMGCResumeGuestV86
1892 : pVM->vmm.s.pfnCPUMGCResumeGuest);
1893 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom);
1894
1895 /*
1896 * We hide log flushes (outer) and hypervisor interrupts (inner).
1897 */
1898 for (;;)
1899 {
1900 int rc;
1901 do
1902 {
1903#ifdef NO_SUPCALLR0VMM
1904 rc = VERR_GENERAL_FAILURE;
1905#else
1906 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
1907#endif
1908 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1909
1910 /*
1911 * Flush the logs.
1912 */
1913#ifdef LOG_ENABLED
1914 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
1915 if ( pLogger
1916 && pLogger->offScratch > 0)
1917 RTLogFlushGC(NULL, pLogger);
1918#endif
1919#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1920 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
1921 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1922 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
1923#endif
1924 if (rc != VINF_VMM_CALL_HOST)
1925 {
1926 Log2(("VMMR3RawRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1927 return rc;
1928 }
1929 rc = vmmR3ServiceCallHostRequest(pVM);
1930 if (VBOX_FAILURE(rc))
1931 return rc;
1932 /* Resume GC */
1933 }
1934}
1935
1936
1937/**
1938 * Executes guest code (Intel VMX and AMD SVM).
1939 *
1940 * @param pVM VM handle.
1941 */
1942VMMR3DECL(int) VMMR3HwAccRunGC(PVM pVM)
1943{
1944 Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1945
1946 for (;;)
1947 {
1948 int rc;
1949 do
1950 {
1951#ifdef NO_SUPCALLR0VMM
1952 rc = VERR_GENERAL_FAILURE;
1953#else
1954 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL);
1955#endif
1956 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1957
1958#ifdef LOG_ENABLED
1959 /*
1960 * Flush the log
1961 */
1962 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
1963 if ( pR0Logger
1964 && pR0Logger->Logger.offScratch > 0)
1965 RTLogFlushToLogger(&pR0Logger->Logger, NULL);
1966#endif /* !LOG_ENABLED */
1967 if (rc != VINF_VMM_CALL_HOST)
1968 {
1969 Log2(("VMMR3HwAccRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1970 return rc;
1971 }
1972 rc = vmmR3ServiceCallHostRequest(pVM);
1973 if (VBOX_FAILURE(rc))
1974 return rc;
1975 /* Resume R0 */
1976 }
1977}
1978
1979/**
1980 * Calls GC a function.
1981 *
1982 * @param pVM The VM handle.
1983 * @param GCPtrEntry The GC function address.
1984 * @param cArgs The number of arguments in the ....
1985 * @param ... Arguments to the function.
1986 */
1987VMMR3DECL(int) VMMR3CallGC(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, ...)
1988{
1989 va_list args;
1990 va_start(args, cArgs);
1991 int rc = VMMR3CallGCV(pVM, GCPtrEntry, cArgs, args);
1992 va_end(args);
1993 return rc;
1994}
1995
1996
1997/**
1998 * Calls GC a function.
1999 *
2000 * @param pVM The VM handle.
2001 * @param GCPtrEntry The GC function address.
2002 * @param cArgs The number of arguments in the ....
2003 * @param args Arguments to the function.
2004 */
2005VMMR3DECL(int) VMMR3CallGCV(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, va_list args)
2006{
2007 Log2(("VMMR3CallGCV: GCPtrEntry=%VGv cArgs=%d\n", GCPtrEntry, cArgs));
2008
2009 /*
2010 * Setup the call frame using the trampoline.
2011 */
2012 CPUMHyperSetCtxCore(pVM, NULL);
2013 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
2014 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom - cArgs * sizeof(RTGCUINTPTR));
2015 PRTGCUINTPTR pFrame = (PRTGCUINTPTR)(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE) - cArgs;
2016 int i = cArgs;
2017 while (i-- > 0)
2018 *pFrame++ = va_arg(args, RTGCUINTPTR);
2019
2020 CPUMPushHyper(pVM, cArgs * sizeof(RTGCUINTPTR)); /* stack frame size */
2021 CPUMPushHyper(pVM, GCPtrEntry); /* what to call */
2022 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2023
2024 /*
2025 * We hide log flushes (outer) and hypervisor interrupts (inner).
2026 */
2027 for (;;)
2028 {
2029 int rc;
2030 do
2031 {
2032#ifdef NO_SUPCALLR0VMM
2033 rc = VERR_GENERAL_FAILURE;
2034#else
2035 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2036#endif
2037 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2038
2039 /*
2040 * Flush the logs.
2041 */
2042#ifdef LOG_ENABLED
2043 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
2044 if ( pLogger
2045 && pLogger->offScratch > 0)
2046 RTLogFlushGC(NULL, pLogger);
2047#endif
2048#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2049 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2050 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2051 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2052#endif
2053 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2054 VMMR3FatalDump(pVM, rc);
2055 if (rc != VINF_VMM_CALL_HOST)
2056 {
2057 Log2(("VMMR3CallGCV: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
2058 return rc;
2059 }
2060 rc = vmmR3ServiceCallHostRequest(pVM);
2061 if (VBOX_FAILURE(rc))
2062 return rc;
2063 }
2064}
2065
2066
2067/**
2068 * Resumes executing hypervisor code when interrupted
2069 * by a queue flush or a debug event.
2070 *
2071 * @returns VBox status code.
2072 * @param pVM VM handle.
2073 */
2074VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM)
2075{
2076 Log(("VMMR3ResumeHyper: eip=%VGv esp=%VGv\n", CPUMGetHyperEIP(pVM), CPUMGetHyperESP(pVM)));
2077
2078 /*
2079 * We hide log flushes (outer) and hypervisor interrupts (inner).
2080 */
2081 for (;;)
2082 {
2083 int rc;
2084 do
2085 {
2086#ifdef NO_SUPCALLR0VMM
2087 rc = VERR_GENERAL_FAILURE;
2088#else
2089 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2090#endif
2091 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2092
2093 /*
2094 * Flush the loggers,
2095 */
2096#ifdef LOG_ENABLED
2097 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
2098 if ( pLogger
2099 && pLogger->offScratch > 0)
2100 RTLogFlushGC(NULL, pLogger);
2101#endif
2102#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2103 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2104 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2105 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2106#endif
2107 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2108 VMMR3FatalDump(pVM, rc);
2109 if (rc != VINF_VMM_CALL_HOST)
2110 {
2111 Log(("VMMR3ResumeHyper: returns %Vrc\n", rc));
2112 return rc;
2113 }
2114 rc = vmmR3ServiceCallHostRequest(pVM);
2115 if (VBOX_FAILURE(rc))
2116 return rc;
2117 }
2118}
2119
2120
2121/**
2122 * Service a call to the ring-3 host code.
2123 *
2124 * @returns VBox status code.
2125 * @param pVM VM handle.
2126 * @remark Careful with critsects.
2127 */
2128static int vmmR3ServiceCallHostRequest(PVM pVM)
2129{
2130 switch (pVM->vmm.s.enmCallHostOperation)
2131 {
2132 /*
2133 * Acquire the PDM lock.
2134 */
2135 case VMMCALLHOST_PDM_LOCK:
2136 {
2137 pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM);
2138 break;
2139 }
2140
2141 /*
2142 * Flush a PDM queue.
2143 */
2144 case VMMCALLHOST_PDM_QUEUE_FLUSH:
2145 {
2146 PDMR3QueueFlushWorker(pVM, NULL);
2147 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
2148 break;
2149 }
2150
2151 /*
2152 * Grow the PGM pool.
2153 */
2154 case VMMCALLHOST_PGM_POOL_GROW:
2155 {
2156 pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
2157 break;
2158 }
2159
2160 /*
2161 * Acquire the PGM lock.
2162 */
2163 case VMMCALLHOST_PGM_LOCK:
2164 {
2165 pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM);
2166 break;
2167 }
2168
2169 /*
2170 * Flush REM handler notifications.
2171 */
2172 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
2173 {
2174 REMR3ReplayHandlerNotifications(pVM);
2175 break;
2176 }
2177
2178 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
2179 {
2180 pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, pVM->vmm.s.u64CallHostArg);
2181 break;
2182 }
2183
2184 /*
2185 * This is a noop. We just take this route to avoid unnecessary
2186 * tests in the loops.
2187 */
2188 case VMMCALLHOST_VMM_LOGGER_FLUSH:
2189 break;
2190
2191 /*
2192 * Set the VM error message.
2193 */
2194 case VMMCALLHOST_VM_SET_ERROR:
2195 VMR3SetErrorWorker(pVM);
2196 break;
2197
2198 /*
2199 * Set the VM runtime error message.
2200 */
2201 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
2202 VMR3SetRuntimeErrorWorker(pVM);
2203 break;
2204
2205 default:
2206 AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation));
2207 return VERR_INTERNAL_ERROR;
2208 }
2209
2210 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
2211 return VINF_SUCCESS;
2212}
2213
2214
2215
2216/**
2217 * Structure to pass to DBGFR3Info() and for doing all other
2218 * output during fatal dump.
2219 */
2220typedef struct VMMR3FATALDUMPINFOHLP
2221{
2222 /** The helper core. */
2223 DBGFINFOHLP Core;
2224 /** The release logger instance. */
2225 PRTLOGGER pRelLogger;
2226 /** The saved release logger flags. */
2227 RTUINT fRelLoggerFlags;
2228 /** The logger instance. */
2229 PRTLOGGER pLogger;
2230 /** The saved logger flags. */
2231 RTUINT fLoggerFlags;
2232 /** The saved logger destination flags. */
2233 RTUINT fLoggerDestFlags;
2234 /** Whether to output to stderr or not. */
2235 bool fStdErr;
2236} VMMR3FATALDUMPINFOHLP, *PVMMR3FATALDUMPINFOHLP;
2237typedef const VMMR3FATALDUMPINFOHLP *PCVMMR3FATALDUMPINFOHLP;
2238
2239
2240/**
2241 * Print formatted string.
2242 *
2243 * @param pHlp Pointer to this structure.
2244 * @param pszFormat The format string.
2245 * @param ... Arguments.
2246 */
2247static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
2248{
2249 va_list args;
2250 va_start(args, pszFormat);
2251 pHlp->pfnPrintfV(pHlp, pszFormat, args);
2252 va_end(args);
2253}
2254
2255
2256/**
2257 * Print formatted string.
2258 *
2259 * @param pHlp Pointer to this structure.
2260 * @param pszFormat The format string.
2261 * @param args Argument list.
2262 */
2263static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
2264{
2265 PCVMMR3FATALDUMPINFOHLP pMyHlp = (PCVMMR3FATALDUMPINFOHLP)pHlp;
2266
2267 if (pMyHlp->pRelLogger)
2268 {
2269 va_list args2;
2270 va_copy(args2, args);
2271 RTLogLoggerV(pMyHlp->pRelLogger, pszFormat, args2);
2272 va_end(args2);
2273 }
2274 if (pMyHlp->pLogger)
2275 {
2276 va_list args2;
2277 va_copy(args2, args);
2278 RTLogLoggerV(pMyHlp->pLogger, pszFormat, args);
2279 va_end(args2);
2280 }
2281 if (pMyHlp->fStdErr)
2282 {
2283 va_list args2;
2284 va_copy(args2, args);
2285 RTStrmPrintfV(g_pStdErr, pszFormat, args);
2286 va_end(args2);
2287 }
2288}
2289
2290
2291/**
2292 * Initializes the fatal dump output helper.
2293 *
2294 * @param pHlp The structure to initialize.
2295 */
2296static void vmmR3FatalDumpInfoHlpInit(PVMMR3FATALDUMPINFOHLP pHlp)
2297{
2298 memset(pHlp, 0, sizeof(*pHlp));
2299
2300 pHlp->Core.pfnPrintf = vmmR3FatalDumpInfoHlp_pfnPrintf;
2301 pHlp->Core.pfnPrintfV = vmmR3FatalDumpInfoHlp_pfnPrintfV;
2302
2303 /*
2304 * The loggers.
2305 */
2306 pHlp->pRelLogger = RTLogRelDefaultInstance();
2307#ifndef LOG_ENABLED
2308 if (!pHlp->pRelLogger)
2309#endif
2310 pHlp->pLogger = RTLogDefaultInstance();
2311
2312 if (pHlp->pRelLogger)
2313 {
2314 pHlp->fRelLoggerFlags = pHlp->pRelLogger->fFlags;
2315 pHlp->pRelLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2316 }
2317
2318 if (pHlp->pLogger)
2319 {
2320 pHlp->fLoggerFlags = pHlp->pLogger->fFlags;
2321 pHlp->fLoggerDestFlags = pHlp->pLogger->fDestFlags;
2322 pHlp->pLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2323#ifndef DEBUG_sandervl
2324 pHlp->pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
2325#endif
2326 }
2327
2328 /*
2329 * Check if we need write to stderr.
2330 */
2331 pHlp->fStdErr = (!pHlp->pRelLogger || !(pHlp->pRelLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)))
2332 && (!pHlp->pLogger || !(pHlp->pLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)));
2333}
2334
2335
2336/**
2337 * Deletes the fatal dump output helper.
2338 *
2339 * @param pHlp The structure to delete.
2340 */
2341static void vmmR3FatalDumpInfoHlpDelete(PVMMR3FATALDUMPINFOHLP pHlp)
2342{
2343 if (pHlp->pRelLogger)
2344 {
2345 RTLogFlush(pHlp->pRelLogger);
2346 pHlp->pRelLogger->fFlags = pHlp->fRelLoggerFlags;
2347 }
2348
2349 if (pHlp->pLogger)
2350 {
2351 RTLogFlush(pHlp->pLogger);
2352 pHlp->pLogger->fFlags = pHlp->fLoggerFlags;
2353 pHlp->pLogger->fDestFlags = pHlp->fLoggerDestFlags;
2354 }
2355}
2356
2357
2358/**
2359 * Dumps the VM state on a fatal error.
2360 *
2361 * @param pVM VM Handle.
2362 * @param rcErr VBox status code.
2363 */
2364VMMR3DECL(void) VMMR3FatalDump(PVM pVM, int rcErr)
2365{
2366 /*
2367 * Create our output helper and sync it with the log settings.
2368 * This helper will be used for all the output.
2369 */
2370 VMMR3FATALDUMPINFOHLP Hlp;
2371 PCDBGFINFOHLP pHlp = &Hlp.Core;
2372 vmmR3FatalDumpInfoHlpInit(&Hlp);
2373
2374 /*
2375 * Header.
2376 */
2377 pHlp->pfnPrintf(pHlp,
2378 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
2379 "!!\n"
2380 "!! Guru Meditation %d (%Vrc)\n"
2381 "!!\n",
2382 rcErr, rcErr);
2383
2384 /*
2385 * Continue according to context.
2386 */
2387 bool fDoneHyper = false;
2388 switch (rcErr)
2389 {
2390 /*
2391 * Hyper visor errors.
2392 */
2393 case VINF_EM_DBG_HYPER_ASSERTION:
2394 pHlp->pfnPrintf(pHlp, "%s%s!!\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
2395 /* fall thru */
2396 case VERR_TRPM_DONT_PANIC:
2397 case VERR_TRPM_PANIC:
2398 case VINF_EM_RAW_STALE_SELECTOR:
2399 case VINF_EM_RAW_IRET_TRAP:
2400 case VINF_EM_DBG_HYPER_BREAKPOINT:
2401 case VINF_EM_DBG_HYPER_STEPPED:
2402 {
2403 /* Trap? */
2404 uint32_t uEIP = CPUMGetHyperEIP(pVM);
2405 TRPMEVENT enmType;
2406 uint8_t u8TrapNo = 0xce;
2407 RTGCUINT uErrorCode = 0xdeadface;
2408 RTGCUINTPTR uCR2 = 0xdeadface;
2409 int rc2 = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
2410 if (VBOX_SUCCESS(rc2))
2411 pHlp->pfnPrintf(pHlp,
2412 "!! TRAP=%02x ERRCD=%VGv CR2=%VGv EIP=%VGv Type=%d\n",
2413 u8TrapNo, uErrorCode, uCR2, uEIP, enmType);
2414 else
2415 pHlp->pfnPrintf(pHlp,
2416 "!! EIP=%VGv NOTRAP\n",
2417 uEIP);
2418
2419 /*
2420 * Try figure out where eip is.
2421 */
2422 /** @todo make query call for core code or move this function to VMM. */
2423 /* core code? */
2424 //if (uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode < pVM->vmm.s.cbCoreCode)
2425 // pHlp->pfnPrintf(pHlp,
2426 // "!! EIP is in CoreCode, offset %#x\n",
2427 // uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode);
2428 //else
2429 { /* ask PDM */
2430 /** @todo ask DBGFR3Sym later. */
2431 char szModName[64];
2432 RTGCPTR GCPtrMod;
2433 char szNearSym1[260];
2434 RTGCPTR GCPtrNearSym1;
2435 char szNearSym2[260];
2436 RTGCPTR GCPtrNearSym2;
2437 int rc = PDMR3QueryModFromEIP(pVM, uEIP,
2438 &szModName[0], sizeof(szModName), &GCPtrMod,
2439 &szNearSym1[0], sizeof(szNearSym1), &GCPtrNearSym1,
2440 &szNearSym2[0], sizeof(szNearSym2), &GCPtrNearSym2);
2441 if (VBOX_SUCCESS(rc))
2442 {
2443 pHlp->pfnPrintf(pHlp,
2444 "!! EIP in %s (%p) at rva %x near symbols:\n"
2445 "!! %VGv rva %VGv off %08x %s\n"
2446 "!! %VGv rva %VGv off -%08x %s\n",
2447 szModName, GCPtrMod, (unsigned)(uEIP - GCPtrMod),
2448 GCPtrNearSym1, GCPtrNearSym1 - GCPtrMod, (unsigned)(uEIP - GCPtrNearSym1), szNearSym1,
2449 GCPtrNearSym2, GCPtrNearSym2 - GCPtrMod, (unsigned)(GCPtrNearSym2 - uEIP), szNearSym2);
2450 }
2451 else
2452 pHlp->pfnPrintf(pHlp,
2453 "!! EIP is not in any code known to VMM!\n");
2454 }
2455
2456 /* Disassemble the instruction. */
2457 char szInstr[256];
2458 rc2 = DBGFR3DisasInstrEx(pVM, 0, 0, DBGF_DISAS_FLAGS_CURRENT_HYPER, &szInstr[0], sizeof(szInstr), NULL);
2459 if (VBOX_SUCCESS(rc2))
2460 pHlp->pfnPrintf(pHlp,
2461 "!! %s\n", szInstr);
2462
2463 /* Dump the hypervisor cpu state. */
2464 pHlp->pfnPrintf(pHlp,
2465 "!!\n"
2466 "!!\n"
2467 "!!\n");
2468 rc2 = DBGFR3Info(pVM, "cpumhyper", "verbose", pHlp);
2469 fDoneHyper = true;
2470
2471 /* Callstack. */
2472 DBGFSTACKFRAME Frame = {0};
2473 rc2 = DBGFR3StackWalkBeginHyper(pVM, &Frame);
2474 if (VBOX_SUCCESS(rc2))
2475 {
2476 pHlp->pfnPrintf(pHlp,
2477 "!!\n"
2478 "!! Call Stack:\n"
2479 "!!\n"
2480 "EBP Ret EBP Ret CS:EIP Arg0 Arg1 Arg2 Arg3 CS:EIP Symbol [line]\n");
2481 do
2482 {
2483 pHlp->pfnPrintf(pHlp,
2484 "%08RX32 %08RX32 %04RX32:%08RX32 %08RX32 %08RX32 %08RX32 %08RX32",
2485 (uint32_t)Frame.AddrFrame.off,
2486 (uint32_t)Frame.AddrReturnFrame.off,
2487 (uint32_t)Frame.AddrReturnPC.Sel,
2488 (uint32_t)Frame.AddrReturnPC.off,
2489 Frame.Args.au32[0],
2490 Frame.Args.au32[1],
2491 Frame.Args.au32[2],
2492 Frame.Args.au32[3]);
2493 pHlp->pfnPrintf(pHlp, " %RTsel:%08RGv", Frame.AddrPC.Sel, Frame.AddrPC.off);
2494 if (Frame.pSymPC)
2495 {
2496 RTGCINTPTR offDisp = Frame.AddrPC.FlatPtr - Frame.pSymPC->Value;
2497 if (offDisp > 0)
2498 pHlp->pfnPrintf(pHlp, " %s+%llx", Frame.pSymPC->szName, (int64_t)offDisp);
2499 else if (offDisp < 0)
2500 pHlp->pfnPrintf(pHlp, " %s-%llx", Frame.pSymPC->szName, -(int64_t)offDisp);
2501 else
2502 pHlp->pfnPrintf(pHlp, " %s", Frame.pSymPC->szName);
2503 }
2504 if (Frame.pLinePC)
2505 pHlp->pfnPrintf(pHlp, " [%s @ 0i%d]", Frame.pLinePC->szFilename, Frame.pLinePC->uLineNo);
2506 pHlp->pfnPrintf(pHlp, "\n");
2507
2508 /* next */
2509 rc2 = DBGFR3StackWalkNext(pVM, &Frame);
2510 } while (VBOX_SUCCESS(rc2));
2511 DBGFR3StackWalkEnd(pVM, &Frame);
2512 }
2513
2514 /* raw stack */
2515 pHlp->pfnPrintf(pHlp,
2516 "!!\n"
2517 "!! Raw stack (mind the direction).\n"
2518 "!!\n"
2519 "%.*Vhxd\n",
2520 VMM_STACK_SIZE, (char *)pVM->vmm.s.pbHCStack);
2521 break;
2522 }
2523
2524 default:
2525 {
2526 break;
2527 }
2528
2529 } /* switch (rcErr) */
2530
2531
2532 /*
2533 * Dump useful state information.
2534 */
2535 /** @todo convert these dumpers to DBGFR3Info() handlers!!! */
2536 pHlp->pfnPrintf(pHlp,
2537 "!!\n"
2538 "!! PGM Access Handlers & Stuff:\n"
2539 "!!\n");
2540 PGMR3DumpMappings(pVM);
2541
2542
2543 /*
2544 * Generic info dumper loop.
2545 */
2546 static struct
2547 {
2548 const char *pszInfo;
2549 const char *pszArgs;
2550 } const aInfo[] =
2551 {
2552 { "hma", NULL },
2553 { "cpumguest", "verbose" },
2554 { "cpumhyper", "verbose" },
2555 { "cpumhost", "verbose" },
2556 { "mode", "all" },
2557 { "cpuid", "verbose" },
2558 { "gdt", NULL },
2559 { "ldt", NULL },
2560 //{ "tss", NULL },
2561 { "ioport", NULL },
2562 { "mmio", NULL },
2563 { "phys", NULL },
2564 //{ "pgmpd", NULL }, - doesn't always work at init time...
2565 { "timers", NULL },
2566 { "activetimers", NULL },
2567 { "handlers", "phys virt stats" },
2568 { "cfgm", NULL },
2569 };
2570 for (unsigned i = 0; i < ELEMENTS(aInfo); i++)
2571 {
2572 if (fDoneHyper && !strcmp(aInfo[i].pszInfo, "cpumhyper"))
2573 continue;
2574 pHlp->pfnPrintf(pHlp,
2575 "!!\n"
2576 "!! {%s, %s}\n"
2577 "!!\n",
2578 aInfo[i].pszInfo, aInfo[i].pszArgs);
2579 DBGFR3Info(pVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
2580 }
2581
2582 /* done */
2583 pHlp->pfnPrintf(pHlp,
2584 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
2585
2586
2587 /*
2588 * Delete the output instance (flushing and restoring of flags).
2589 */
2590 vmmR3FatalDumpInfoHlpDelete(&Hlp);
2591}
2592
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette