VirtualBox

source: vbox/trunk/src/VBox/VMM/VMM.cpp@ 7

Last change on this file since 7 was 1, checked in by vboxsync, 55 years ago

import

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 105.0 KB
Line 
1/** @file
2 *
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22#if defined(__AMD64__) && !defined(__WIN__)
23# define NO_SUPCALLR0VMM
24#endif
25
26/** @page pg_vmm VMM - The Virtual Machine Monitor
27 *
28 * !Revise this! It's already incorrect!
29 *
30 * The Virtual Machine Monitor (VMM) is the core of the virtual machine. It
31 * manages the alternate reality; controlling the virtualization, managing
32 * resources, tracking CPU state, it's resources and so on...
33 *
34 * We will split the VMM into smaller entities:
35 *
36 * - Virtual Machine Core Monitor (VMCM), which purpose it is to
37 * provide ring and world switching, that including routing
38 * interrupts to the host OS and traps to the appropriate trap
39 * handlers. It will implement an external interface for
40 * managing trap handlers.
41 *
42 * - CPU Monitor (CM), tracking the state of the CPU (in the alternate
43 * reality) and implementing external interfaces to read and change
44 * the state.
45 *
46 * - Memory Monitor (MM), which purpose it is to virtualize physical
47 * pages, segment descriptor tables, interrupt descriptor tables, task
48 * segments, and keep track of all memory providing external interfaces
49 * to access content and map pages. (Internally splitt into smaller entities!)
50 *
51 * - IO Monitor (IOM), which virtualizes in and out I/O operations. It
52 * interacts with the MM to implement memory mapped I/O. External
53 * interfaces for adding and removing I/O ranges are implemented.
54 *
55 * - External Interrupt Monitor (EIM), which purpose it is to manage
56 * interrupts generated by virtual devices. This monitor provides
57 * an interfaces for raising interrupts which is accessible at any
58 * time and from all thread.
59 * <p>
60 * A subentity of the EIM is the vitual Programmable Interrupt
61 * Controller Device (VPICD), and perhaps a virtual I/O Advanced
62 * Programmable Interrupt Controller Device (VAPICD).
63 *
64 * - Direct Memory Access Monitor (DMAM), which purpose it is to support
65 * virtual device using the DMA controller. Interfaces must be as the
66 * EIM interfaces independent and threadable.
67 * <p>
68 * A subentity of the DMAM is a virtual DMA Controller Device (VDMACD).
69 *
70 *
71 * Entities working on a higher level:
72 *
73 * - Device Manager (DM), which is a support facility for virtualized
74 * hardware. This provides generic facilities for efficient device
75 * virtualization. It will manage device attaching and detaching
76 * conversing with EIM and IOM.
77 *
78 * - Debugger Facility (DBGF) provides the basic features for
79 * debugging the alternate reality execution.
80 *
81 *
82 *
83 * @section pg_vmm_s_use_cases Use Cases
84 *
85 * @subsection pg_vmm_s_use_case_boot Bootstrap
86 *
87 * - Basic Init:
88 * - Init SUPDRV.
89 *
90 * - Init Virtual Machine Instance:
91 * - Load settings.
92 * - Check resource requirements (memory, com, stuff).
93 *
94 * - Init Host Ring 3 part:
95 * - Init Core code.
96 * - Load Pluggable Components.
97 * - Init Pluggable Components.
98 *
99 * - Init Host Ring 0 part:
100 * - Load Core (core = core components like VMM, RMI, CA, and so on) code.
101 * - Init Core code.
102 * - Load Pluggable Component code.
103 * - Init Pluggable Component code.
104 *
105 * - Allocate first chunk of memory and pin it down. This block of memory
106 * will fit the following pieces:
107 * - Virtual Machine Instance data. (Config, CPU state, VMM state, ++)
108 * (This is available from everywhere (at different addresses though)).
109 * - VMM Guest Context code.
110 * - Pluggable devices Guest Context code.
111 * - Page tables (directory and everything) for the VMM Guest
112 *
113 * - Setup Guest (Ring 0) part:
114 * - Setup initial page tables (i.e. directory all the stuff).
115 * - Load Core Guest Context code.
116 * - Load Pluggable Devices Guest Context code.
117 *
118 *
119 */
120
121
122/*******************************************************************************
123* Header Files *
124*******************************************************************************/
125#define LOG_GROUP LOG_GROUP_VMM
126#include <VBox/vmm.h>
127#include <VBox/vmapi.h>
128#include <VBox/pgm.h>
129#include <VBox/cfgm.h>
130#include <VBox/pdm.h>
131#include <VBox/cpum.h>
132#include <VBox/mm.h>
133#include <VBox/iom.h>
134#include <VBox/trpm.h>
135#include <VBox/selm.h>
136#include <VBox/em.h>
137#include <VBox/sup.h>
138#include <VBox/dbgf.h>
139#include <VBox/csam.h>
140#include <VBox/patm.h>
141#include <VBox/rem.h>
142#include <VBox/ssm.h>
143#include <VBox/tm.h>
144#include "VMMInternal.h"
145#include "VMMSwitcher/VMMSwitcher.h"
146#include <VBox/vm.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/version.h>
150#include <VBox/x86.h>
151#include <iprt/assert.h>
152#include <iprt/alloc.h>
153#include <iprt/asm.h>
154#include <iprt/time.h>
155#include <iprt/stream.h>
156#include <iprt/string.h>
157#include <iprt/stdarg.h>
158#include <iprt/ctype.h>
159
160
161
162/** The saved state version. */
163#define VMM_SAVED_STATE_VERSION 3
164
165
166/*******************************************************************************
167* Internal Functions *
168*******************************************************************************/
169static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
170static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
171static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
172static int vmmR3ServiceCallHostRequest(PVM pVM);
173
174
175/*******************************************************************************
176* Global Variables *
177*******************************************************************************/
178/** Array of switcher defininitions.
179 * The type and index shall match!
180 */
181static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
182{
183 NULL, /* invalid entry */
184#ifndef __AMD64__
185 &vmmR3Switcher32BitTo32Bit_Def,
186 &vmmR3Switcher32BitToPAE_Def,
187 NULL, //&vmmR3Switcher32BitToAMD64_Def,
188 &vmmR3SwitcherPAETo32Bit_Def,
189 &vmmR3SwitcherPAEToPAE_Def,
190 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
191 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
192 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
193#else
194 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
195 NULL, //&vmmR3Switcher32BitToPAE_Def,
196 NULL, //&vmmR3Switcher32BitToAMD64_Def,
197 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
198 NULL, //&vmmR3SwitcherPAEToPAE_Def,
199 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
200 &vmmR3SwitcherAMD64ToPAE_Def,
201 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
202#endif
203};
204
205
206
207/**
208 * Initiates the core code.
209 *
210 * This is core per VM code which might need fixups and/or for ease of use
211 * are put on linear contiguous backing.
212 *
213 * @returns VBox status code.
214 * @param pVM Pointer to VM structure.
215 */
216static int vmmR3InitCoreCode(PVM pVM)
217{
218 /*
219 * Calc the size.
220 */
221 unsigned cbCoreCode = 0;
222 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
223 {
224 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
225 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
226 if (pSwitcher)
227 {
228 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
229 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
230 }
231 }
232
233 /*
234 * Allocate continguous pages for switchers and deal with
235 * conflicts in the intermediate mapping of the code.
236 */
237 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
238 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
239 int rc = VERR_NO_MEMORY;
240 if (pVM->vmm.s.pvHCCoreCodeR3)
241 {
242 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
243 if (rc == VERR_PGM_MAPPINGS_FIX_CONFLICT)
244 {
245 /* try more allocations. */
246 struct
247 {
248 void *pvR0;
249 void *pvR3;
250 RTHCPHYS HCPhys;
251 } aBadTries[16];
252 unsigned i = 0;
253 do
254 {
255 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
256 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
257 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
258 i++;
259 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
260 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
261 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
262 if (!pVM->vmm.s.pvHCCoreCodeR3)
263 break;
264 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
265 } while ( rc == VERR_PGM_MAPPINGS_FIX_CONFLICT
266 && i < ELEMENTS(aBadTries) - 1);
267
268 /* cleanup */
269 if (VBOX_FAILURE(rc))
270 {
271 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
272 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
273 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
274 i++;
275 LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc));
276 }
277 while (i-- > 0)
278 {
279 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n",
280 i, aBadTries[i].pvR3, aBadTries[i].pvR0, aBadTries[i].HCPhys));
281 SUPContFree(aBadTries[i].pvR3);
282 }
283 }
284 }
285 if (VBOX_SUCCESS(rc))
286 {
287 /*
288 * copy the code.
289 */
290 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
291 {
292 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
293 if (pSwitcher)
294 memcpy((uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
295 pSwitcher->pvCode, pSwitcher->cbCode);
296 }
297
298 /*
299 * Map the code into the GC address space.
300 */
301 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &pVM->vmm.s.pvGCCoreCode);
302 if (VBOX_SUCCESS(rc))
303 {
304 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
305 /*
306 * Finally, PGM probably have selected a switcher already but we need
307 * to do get the addresses so we'll reselect it.
308 * This may legally fail so, we're ignoring the rc.
309 */
310 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
311 return rc;
312 }
313
314 /* shit */
315 AssertMsgFailed(("PGMR3Map(,%VGv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
316 SUPContFree(pVM->vmm.s.pvHCCoreCodeR3);
317 }
318 else
319 VMSetError(pVM, rc, RT_SRC_POS,
320 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code."),
321 cbCoreCode);
322
323 pVM->vmm.s.pvHCCoreCodeR3 = NULL;
324 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
325 pVM->vmm.s.pvGCCoreCode = 0;
326 return rc;
327}
328
329
330/**
331 * Initializes the VMM.
332 *
333 * @returns VBox status code.
334 * @param pVM The VM to operate on.
335 */
336VMMR3DECL(int) VMMR3Init(PVM pVM)
337{
338 LogFlow(("VMMR3Init\n"));
339
340 /*
341 * Assert alignment, sizes and order.
342 */
343 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
344 AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s),
345 ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n",
346 sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s)));
347
348 /*
349 * Init basic VM VMM members.
350 */
351 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
352 int rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies);
353 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
354 pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */
355 //pVM->vmm.s.cYieldEveryMillies = 8; //debugging
356 else
357 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Vrc\n", rc), rc);
358
359 /*
360 * Register the saved state data unit.
361 */
362 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
363 NULL, vmmR3Save, NULL,
364 NULL, vmmR3Load, NULL);
365 if (VBOX_FAILURE(rc))
366 return rc;
367
368 /* GC switchers are enabled by default. Turned off by HWACCM. */
369 pVM->vmm.s.fSwitcherDisabled = false;
370
371 /*
372 * Init core code.
373 */
374 rc = vmmR3InitCoreCode(pVM);
375 if (VBOX_SUCCESS(rc))
376 {
377 /*
378 * Allocate & init VMM GC stack.
379 * The stack pages are also used by the VMM R0 when VMMR0CallHost is invoked.
380 * (The page protection is modifed during R3 init completion.)
381 */
382#ifdef VBOX_STRICT_VMM_STACK
383 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
384#else
385 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
386#endif
387 if (VBOX_SUCCESS(rc))
388 {
389 /* Set HC and GC stack pointers to top of stack. */
390 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = pVM->vmm.s.pbHCStack;
391 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
392 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
393 AssertRelease(pVM->vmm.s.pbGCStack);
394
395 /* Set hypervisor eip. */
396 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStack);
397
398 /*
399 * Allocate GC & R0 Logger instances (they are finalized in the relocator).
400 */
401#ifdef LOG_ENABLED
402 PRTLOGGER pLogger = RTLogDefaultInstance();
403 if (pLogger)
404 {
405 pVM->vmm.s.cbLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pLogger->cGroups]);
406 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pLoggerHC);
407 if (VBOX_SUCCESS(rc))
408 {
409 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
410
411/*
412 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup), so
413 * you have to sign up here by adding your defined(DEBUG_<userid>) to the #if.
414 *
415 * If you want to log in non-debug modes, you'll have to remember to change SUPDRvShared.c
416 * to not stub all the log functions.
417 */
418# ifdef DEBUG_sandervl
419 rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]),
420 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0Logger);
421 if (VBOX_SUCCESS(rc))
422 {
423 pVM->vmm.s.pR0Logger->pVM = pVM;
424 //pVM->vmm.s.pR0Logger->fCreated = false;
425 pVM->vmm.s.pR0Logger->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]);
426 }
427# endif
428 }
429 }
430#endif /* LOG_ENABLED */
431
432#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
433 /*
434 * Allocate GC Release Logger instances (finalized in the relocator).
435 */
436 if (VBOX_SUCCESS(rc))
437 {
438 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
439 if (pRelLogger)
440 {
441 pVM->vmm.s.cbRelLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pRelLogger->cGroups]);
442 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRelLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRelLoggerHC);
443 if (VBOX_SUCCESS(rc))
444 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
445 }
446 }
447#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
448
449#ifdef VBOX_WITH_NMI
450 /*
451 * Allocate mapping for the host APIC.
452 */
453 if (VBOX_SUCCESS(rc))
454 {
455 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
456 AssertRC(rc);
457 }
458#endif
459 if (VBOX_SUCCESS(rc))
460 {
461 rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock);
462 if (VBOX_SUCCESS(rc))
463 {
464 /*
465 * Statistics.
466 */
467 STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches.");
468 STAM_REG(pVM, &pVM->vmm.s.StatGCRetNormal, STAMTYPE_COUNTER, "/VMM/GCRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
469 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterrupt, STAMTYPE_COUNTER, "/VMM/GCRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
470 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
471 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGuestTrap, STAMTYPE_COUNTER, "/VMM/GCRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
472 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitch, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
473 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
474 STAM_REG(pVM, &pVM->vmm.s.StatGCRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/GCRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");
475 STAM_REG(pVM, &pVM->vmm.s.StatGCRetStaleSelector, STAMTYPE_COUNTER, "/VMM/GCRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
476 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIRETTrap, STAMTYPE_COUNTER, "/VMM/GCRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
477 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
478 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
479 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIORead, STAMTYPE_COUNTER, "/VMM/GCRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns.");
480 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns.");
481 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READWRITE returns.");
482 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIORead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns.");
483 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns.");
484 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns.");
485 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
486 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
487 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
488 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
489 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
490 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTSSFault, STAMTYPE_COUNTER, "/VMM/GCRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
491 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDFault, STAMTYPE_COUNTER, "/VMM/GCRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
492 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCSAMTask, STAMTYPE_COUNTER, "/VMM/GCRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
493 STAM_REG(pVM, &pVM->vmm.s.StatGCRetSyncCR3, STAMTYPE_COUNTER, "/VMM/GCRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
494 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMisc, STAMTYPE_COUNTER, "/VMM/GCRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
495 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchInt3, STAMTYPE_COUNTER, "/VMM/GCRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
496 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchPF, STAMTYPE_COUNTER, "/VMM/GCRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
497 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchGP, STAMTYPE_COUNTER, "/VMM/GCRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
498 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPageOverflow, STAMTYPE_COUNTER, "/VMM/GCRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns.");
499 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/GCRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
500 STAM_REG(pVM, &pVM->vmm.s.StatGCRetToR3, STAMTYPE_COUNTER, "/VMM/GCRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
501 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTimerPending, STAMTYPE_COUNTER, "/VMM/GCRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
502 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptPending, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
503 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCallHost, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/Misc", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
504 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMGrowRAM, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/GrowRAM", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
505 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PDMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
506 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLogFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/LogFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
507 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMQueueFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/QueueFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
508 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMPoolGrow",STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
509 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRemReplay, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/REMReplay", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
510 STAM_REG(pVM, &pVM->vmm.s.StatGCRetVMSetError, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/VMSetError", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
511 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
512 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/GCRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
513 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/GCRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
514 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulHlt, STAMTYPE_COUNTER, "/VMM/GCRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");
515 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPendingRequest, STAMTYPE_COUNTER, "/VMM/GCRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
516
517 return VINF_SUCCESS;
518 }
519 AssertRC(rc);
520 }
521 }
522 /** @todo: Need failure cleanup. */
523
524 //more todo in here?
525 //if (VBOX_SUCCESS(rc))
526 //{
527 //}
528 //int rc2 = vmmR3TermCoreCode(pVM);
529 //AssertRC(rc2));
530 }
531
532 return rc;
533}
534
535
536/**
537 * Ring-3 init finalizing.
538 *
539 * @returns VBox status code.
540 * @param pVM The VM handle.
541 */
542VMMR3DECL(int) VMMR3InitFinalize(PVM pVM)
543{
544#ifdef VBOX_STRICT_VMM_STACK
545 /*
546 * Two inaccessible pages at each sides of the stack to catch over/under-flows.
547 */
548 memset(pVM->vmm.s.pbHCStack - PAGE_SIZE, 0xcc, PAGE_SIZE);
549 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack - PAGE_SIZE), PAGE_SIZE, 0);
550 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
551
552 memset(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
553 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack + VMM_STACK_SIZE), PAGE_SIZE, 0);
554 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
555#endif
556
557 /*
558 * Set page attributes to r/w for stack pages.
559 */
560 int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbGCStack, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
561 AssertRC(rc);
562 if (VBOX_SUCCESS(rc))
563 {
564 /*
565 * Create the EMT yield timer.
566 */
567 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
568 if (VBOX_SUCCESS(rc))
569 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
570 }
571#ifdef VBOX_WITH_NMI
572 /*
573 * Map the host APIC into GC - This may be host os specific!
574 */
575 if (VBOX_SUCCESS(rc))
576 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
577 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
578#endif
579 return rc;
580}
581
582
583/**
584 * Initializes the R0 VMM.
585 *
586 * @returns VBox status code.
587 * @param pVM The VM to operate on.
588 */
589VMMR3DECL(int) VMMR3InitR0(PVM pVM)
590{
591 int rc;
592
593 /*
594 * Initialize the ring-0 logger if we haven't done so yet.
595 */
596 if ( pVM->vmm.s.pR0Logger
597 && !pVM->vmm.s.pR0Logger->fCreated)
598 {
599 rc = VMMR3UpdateLoggers(pVM);
600 if (VBOX_FAILURE(rc))
601 return rc;
602 }
603
604 /*
605 * Call Ring-0 entry with init code.
606 */
607 for (;;)
608 {
609#ifdef NO_SUPCALLR0VMM
610 //rc = VERR_GENERAL_FAILURE;
611 rc = VINF_SUCCESS;
612#else
613 rc = SUPCallVMMR0(pVM, VMMR0_DO_VMMR0_INIT, (void *)VBOX_VERSION);
614#endif
615 if ( pVM->vmm.s.pR0Logger
616 && pVM->vmm.s.pR0Logger->Logger.offScratch > 0)
617 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL);
618 if (rc != VINF_VMM_CALL_HOST)
619 break;
620 rc = vmmR3ServiceCallHostRequest(pVM);
621 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
622 break;
623 break; // remove this when we do setjmp for all ring-0 stuff.
624 }
625
626 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
627 {
628 LogRel(("R0 init failed, rc=%Vra\n", rc));
629 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
630 rc = VERR_INTERNAL_ERROR;
631 }
632 return rc;
633}
634
635
636/**
637 * Initializes the GC VMM.
638 *
639 * @returns VBox status code.
640 * @param pVM The VM to operate on.
641 */
642VMMR3DECL(int) VMMR3InitGC(PVM pVM)
643{
644 /* In VMX mode, there's no need to init GC. */
645 if (pVM->vmm.s.fSwitcherDisabled)
646 return VINF_SUCCESS;
647
648 /*
649 * Call VMMGCInit():
650 * -# resolve the address.
651 * -# setup stackframe and EIP to use the trampoline.
652 * -# do a generic hypervisor call.
653 */
654 RTGCPTR GCPtrEP;
655 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
656 if (VBOX_SUCCESS(rc))
657 {
658 CPUMHyperSetCtxCore(pVM, NULL);
659 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
660 CPUMPushHyper(pVM, VBOX_VERSION); /* Param 2: Version argument. */
661 CPUMPushHyper(pVM, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
662 CPUMPushHyper(pVM, pVM->pVMGC); /* Param 0: pVM */
663 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* trampoline param: stacksize. */
664 CPUMPushHyper(pVM, GCPtrEP); /* Call EIP. */
665 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
666
667 for (;;)
668 {
669#ifdef NO_SUPCALLR0VMM
670 //rc = VERR_GENERAL_FAILURE;
671 rc = VINF_SUCCESS;
672#else
673 rc = SUPCallVMMR0(pVM, VMMR0_DO_CALL_HYPERVISOR, NULL);
674#endif
675#ifdef LOG_ENABLED
676 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
677 if ( pLogger
678 && pLogger->offScratch > 0)
679 RTLogFlushGC(NULL, pLogger);
680#endif
681#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
682 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
683 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
684 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
685#endif
686 if (rc != VINF_VMM_CALL_HOST)
687 break;
688 rc = vmmR3ServiceCallHostRequest(pVM);
689 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
690 break;
691 }
692
693 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
694 {
695 VMMR3FatalDump(pVM, rc);
696 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
697 rc = VERR_INTERNAL_ERROR;
698 }
699 AssertRC(rc);
700 }
701 return rc;
702}
703
704
705/**
706 * Terminate the VMM bits.
707 *
708 * @returns VINF_SUCCESS.
709 * @param pVM The VM handle.
710 */
711VMMR3DECL(int) VMMR3Term(PVM pVM)
712{
713 /** @todo must call ring-0 so the logger thread instance can be properly removed. */
714
715#ifdef VBOX_STRICT_VMM_STACK
716 /*
717 * Make the two stack guard pages present again.
718 */
719 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
720 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
721#endif
722 return VINF_SUCCESS;
723}
724
725
726/**
727 * Applies relocations to data and code managed by this
728 * component. This function will be called at init and
729 * whenever the VMM need to relocate it self inside the GC.
730 *
731 * The VMM will need to apply relocations to the core code.
732 *
733 * @param pVM The VM handle.
734 * @param offDelta The relocation delta.
735 */
736VMMR3DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
737{
738 LogFlow(("VMMR3Relocate: offDelta=%VGv\n", offDelta));
739
740 /*
741 * Recalc the GC address.
742 */
743 pVM->vmm.s.pvGCCoreCode = MMHyperHC2GC(pVM, pVM->vmm.s.pvHCCoreCodeR3);
744
745 /*
746 * The stack.
747 */
748 CPUMSetHyperESP(pVM, CPUMGetHyperESP(pVM) + offDelta);
749 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
750 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
751
752 /*
753 * All the switchers.
754 */
755 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
756 {
757 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
758 if (pSwitcher && pSwitcher->pfnRelocate)
759 {
760 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
761 pSwitcher->pfnRelocate(pVM,
762 pSwitcher,
763 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR0 + off,
764 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + off,
765 pVM->vmm.s.pvGCCoreCode + off,
766 pVM->vmm.s.HCPhysCoreCode + off);
767 }
768 }
769
770 /*
771 * Recalc the GC address for the current switcher.
772 */
773 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
774 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
775 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
776 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
777 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
778 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
779 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
780
781 /*
782 * Get other GC entry points.
783 */
784 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMGCResumeGuest);
785 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Vra\n", rc));
786
787 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMGCResumeGuestV86);
788 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Vra\n", rc));
789
790 /*
791 * Update the logger.
792 */
793 VMMR3UpdateLoggers(pVM);
794}
795
796
797/**
798 * Updates the settings for the GC and R0 loggers.
799 *
800 * @returns VBox status code.
801 * @param pVM The VM handle.
802 */
803VMMR3DECL(int) VMMR3UpdateLoggers(PVM pVM)
804{
805 /*
806 * Simply clone the logger instance (for GC).
807 */
808 int rc = VINF_SUCCESS;
809 RTGCPTR GCPtrLoggerFlush = 0;
810
811 if (pVM->vmm.s.pLoggerHC
812#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
813 || pVM->vmm.s.pRelLoggerHC
814#endif
815 )
816 {
817 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &GCPtrLoggerFlush);
818 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Vra\n", rc));
819 }
820
821 if (pVM->vmm.s.pLoggerHC)
822 {
823 RTGCPTR GCPtrLoggerWrapper = 0;
824 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &GCPtrLoggerWrapper);
825 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Vra\n", rc));
826 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
827 rc = RTLogCloneGC(NULL /* default */, pVM->vmm.s.pLoggerHC, pVM->vmm.s.cbLoggerGC,
828 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
829 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
830 }
831
832#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
833 if (pVM->vmm.s.pRelLoggerHC)
834 {
835 RTGCPTR GCPtrLoggerWrapper = 0;
836 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &GCPtrLoggerWrapper);
837 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Vra\n", rc));
838 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
839 rc = RTLogCloneGC(RTLogRelDefaultInstance(), pVM->vmm.s.pRelLoggerHC, pVM->vmm.s.cbRelLoggerGC,
840 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
841 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
842 }
843#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
844
845 /*
846 * For the ring-0 EMT logger, we use a per-thread logger
847 * instance in ring-0. Only initialize it once.
848 */
849 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
850 if (pR0Logger)
851 {
852 if (!pR0Logger->fCreated)
853 {
854 RTHCPTR pfnLoggerWrapper = NULL;
855 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
856 AssertReleaseMsgRCReturn(rc, ("VMMLoggerWrapper not found! rc=%Vra\n", rc), rc);
857
858 RTHCPTR pfnLoggerFlush = NULL;
859 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
860 AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Vra\n", rc), rc);
861
862 rc = RTLogCreateForR0(&pR0Logger->Logger, pR0Logger->cbLogger,
863 *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush,
864 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
865 AssertReleaseMsgRCReturn(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc), rc);
866 pR0Logger->fCreated = true;
867 }
868
869 rc = RTLogCopyGroupsAndFlags(&pR0Logger->Logger, NULL /* default */, RTLOGFLAGS_BUFFERED, 0);
870 AssertRC(rc);
871 }
872
873 return rc;
874}
875
876
877/**
878 * Generic switch code relocator.
879 *
880 * @param pVM The VM handle.
881 * @param pSwitcher The switcher definition.
882 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
883 * @param pu8CodeR0 Pointer to the core code block for the switcher, ring-0 mapping.
884 * @param GCPtrCode The guest context address corresponding to pu8Code.
885 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
886 * @param SelCS The hypervisor CS selector.
887 * @param SelDS The hypervisor DS selector.
888 * @param SelTSS The hypervisor TSS selector.
889 * @param GCPtrGDT The GC address of the hypervisor GDT.
890 * @param SelCS64 The 64-bit mode hypervisor CS selector.
891 */
892static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
893 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
894{
895 union
896 {
897 const uint8_t *pu8;
898 const uint16_t *pu16;
899 const uint32_t *pu32;
900 const uint64_t *pu64;
901 const void *pv;
902 uintptr_t u;
903 } u;
904 u.pv = pSwitcher->pvFixups;
905
906 /*
907 * Process fixups.
908 */
909 uint8_t u8;
910 while ((u8 = *u.pu8++) != FIX_THE_END)
911 {
912 /*
913 * Get the source (where to write the fixup).
914 */
915 uint32_t offSrc = *u.pu32++;
916 Assert(offSrc < pSwitcher->cbCode);
917 union
918 {
919 uint8_t *pu8;
920 uint16_t *pu16;
921 uint32_t *pu32;
922 uint64_t *pu64;
923 uintptr_t u;
924 } uSrc;
925 uSrc.pu8 = pu8CodeR3 + offSrc;
926
927 /* The fixup target and method depends on the type. */
928 switch (u8)
929 {
930 /*
931 * 32-bit relative, source in HC and target in GC.
932 */
933 case FIX_HC_2_GC_NEAR_REL:
934 {
935 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
936 uint32_t offTrg = *u.pu32++;
937 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
938 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
939 break;
940 }
941
942 /*
943 * 32-bit relative, source in HC and target in ID.
944 */
945 case FIX_HC_2_ID_NEAR_REL:
946 {
947 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
948 uint32_t offTrg = *u.pu32++;
949 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
950 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (uSrc.u + 4));
951 break;
952 }
953
954 /*
955 * 32-bit relative, source in GC and target in HC.
956 */
957 case FIX_GC_2_HC_NEAR_REL:
958 {
959 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
960 uint32_t offTrg = *u.pu32++;
961 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
962 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (GCPtrCode + offSrc + 4));
963 break;
964 }
965
966 /*
967 * 32-bit relative, source in GC and target in ID.
968 */
969 case FIX_GC_2_ID_NEAR_REL:
970 {
971 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
972 uint32_t offTrg = *u.pu32++;
973 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
974 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
975 break;
976 }
977
978 /*
979 * 32-bit relative, source in ID and target in HC.
980 */
981 case FIX_ID_2_HC_NEAR_REL:
982 {
983 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
984 uint32_t offTrg = *u.pu32++;
985 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
986 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (u32IDCode + offSrc + 4));
987 break;
988 }
989
990 /*
991 * 32-bit relative, source in ID and target in HC.
992 */
993 case FIX_ID_2_GC_NEAR_REL:
994 {
995 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
996 uint32_t offTrg = *u.pu32++;
997 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
998 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
999 break;
1000 }
1001
1002 /*
1003 * 16:32 far jump, target in GC.
1004 */
1005 case FIX_GC_FAR32:
1006 {
1007 uint32_t offTrg = *u.pu32++;
1008 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1009 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
1010 *uSrc.pu16++ = SelCS;
1011 break;
1012 }
1013
1014 /*
1015 * Make 32-bit GC pointer given CPUM offset.
1016 */
1017 case FIX_GC_CPUM_OFF:
1018 {
1019 uint32_t offCPUM = *u.pu32++;
1020 Assert(offCPUM < sizeof(pVM->cpum));
1021 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM);
1022 break;
1023 }
1024
1025 /*
1026 * Make 32-bit GC pointer given VM offset.
1027 */
1028 case FIX_GC_VM_OFF:
1029 {
1030 uint32_t offVM = *u.pu32++;
1031 Assert(offVM < sizeof(VM));
1032 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM);
1033 break;
1034 }
1035
1036 /*
1037 * Make 32-bit HC pointer given CPUM offset.
1038 */
1039 case FIX_HC_CPUM_OFF:
1040 {
1041 uint32_t offCPUM = *u.pu32++;
1042 Assert(offCPUM < sizeof(pVM->cpum));
1043 *uSrc.pu32 = (uint32_t)((uintptr_t)&pVM->cpum + offCPUM);
1044 break;
1045 }
1046
1047 /*
1048 * Make 32-bit HC pointer given VM offset.
1049 */
1050 case FIX_HC_VM_OFF:
1051 {
1052 uint32_t offVM = *u.pu32++;
1053 Assert(offVM < sizeof(VM));
1054 *uSrc.pu32 = (uint32_t)(uintptr_t)pVM + offVM;
1055 break;
1056 }
1057
1058 /*
1059 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
1060 */
1061 case FIX_INTER_32BIT_CR3:
1062 {
1063
1064 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
1065 break;
1066 }
1067
1068 /*
1069 * Store the PAE CR3 (32-bit) for the intermediate memory context.
1070 */
1071 case FIX_INTER_PAE_CR3:
1072 {
1073
1074 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
1075 break;
1076 }
1077
1078 /*
1079 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
1080 */
1081 case FIX_INTER_AMD64_CR3:
1082 {
1083
1084 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
1085 break;
1086 }
1087
1088 /*
1089 * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
1090 */
1091 case FIX_HYPER_32BIT_CR3:
1092 {
1093
1094 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
1095 break;
1096 }
1097
1098 /*
1099 * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
1100 */
1101 case FIX_HYPER_PAE_CR3:
1102 {
1103
1104 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
1105 break;
1106 }
1107
1108 /*
1109 * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
1110 */
1111 case FIX_HYPER_AMD64_CR3:
1112 {
1113
1114 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
1115 break;
1116 }
1117
1118 /*
1119 * Store Hypervisor CS (16-bit).
1120 */
1121 case FIX_HYPER_CS:
1122 {
1123 *uSrc.pu16 = SelCS;
1124 break;
1125 }
1126
1127 /*
1128 * Store Hypervisor DS (16-bit).
1129 */
1130 case FIX_HYPER_DS:
1131 {
1132 *uSrc.pu16 = SelDS;
1133 break;
1134 }
1135
1136 /*
1137 * Store Hypervisor TSS (16-bit).
1138 */
1139 case FIX_HYPER_TSS:
1140 {
1141 *uSrc.pu16 = SelTSS;
1142 break;
1143 }
1144
1145 /*
1146 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
1147 */
1148 case FIX_GC_TSS_GDTE_DW2:
1149 {
1150 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
1151 *uSrc.pu32 = (uint32_t)GCPtr;
1152 break;
1153 }
1154
1155
1156 ///@todo case FIX_CR4_MASK:
1157 ///@todo case FIX_CR4_OSFSXR:
1158
1159 /*
1160 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
1161 */
1162 case FIX_NO_FXSAVE_JMP:
1163 {
1164 uint32_t offTrg = *u.pu32++;
1165 Assert(offTrg < pSwitcher->cbCode);
1166 if (!CPUMSupportsFXSR(pVM))
1167 {
1168 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1169 *uSrc.pu32++ = offTrg - (offSrc + 5);
1170 }
1171 else
1172 {
1173 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1174 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1175 }
1176 break;
1177 }
1178
1179 /*
1180 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1181 */
1182 case FIX_NO_SYSENTER_JMP:
1183 {
1184 uint32_t offTrg = *u.pu32++;
1185 Assert(offTrg < pSwitcher->cbCode);
1186 if (!CPUMIsHostUsingSysEnter(pVM))
1187 {
1188 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1189 *uSrc.pu32++ = offTrg - (offSrc + 5);
1190 }
1191 else
1192 {
1193 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1194 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1195 }
1196 break;
1197 }
1198
1199 /*
1200 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1201 */
1202 case FIX_NO_SYSCALL_JMP:
1203 {
1204 uint32_t offTrg = *u.pu32++;
1205 Assert(offTrg < pSwitcher->cbCode);
1206 if (!CPUMIsHostUsingSysEnter(pVM))
1207 {
1208 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1209 *uSrc.pu32++ = offTrg - (offSrc + 5);
1210 }
1211 else
1212 {
1213 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1214 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1215 }
1216 break;
1217 }
1218
1219#ifdef __AMD64__
1220 /*
1221 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1222 */
1223 case FIX_HC_64BIT:
1224 {
1225 uint32_t offTrg = *u.pu32++;
1226 Assert(offSrc < pSwitcher->cbCode);
1227 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1228 *uSrc.pu64 = (uintptr_t)pu8CodeR0 + offTrg;
1229 break;
1230 }
1231
1232 /*
1233 * 64-bit HC pointer to the CPUM instance data (no argument).
1234 */
1235 case FIX_HC_64BIT_CPUM:
1236 {
1237 Assert(offSrc < pSwitcher->cbCode);
1238 *uSrc.pu64 = (uintptr_t)&pVM->cpum;
1239 break;
1240 }
1241#endif
1242
1243 /*
1244 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
1245 */
1246 case FIX_ID_32BIT:
1247 {
1248 uint32_t offTrg = *u.pu32++;
1249 Assert(offSrc < pSwitcher->cbCode);
1250 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1251 *uSrc.pu32 = u32IDCode + offTrg;
1252 break;
1253 }
1254
1255 /*
1256 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
1257 */
1258 case FIX_ID_64BIT:
1259 {
1260 uint32_t offTrg = *u.pu32++;
1261 Assert(offSrc < pSwitcher->cbCode);
1262 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1263 *uSrc.pu64 = u32IDCode + offTrg;
1264 break;
1265 }
1266
1267 /*
1268 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
1269 */
1270 case FIX_ID_FAR32_TO_64BIT_MODE:
1271 {
1272 uint32_t offTrg = *u.pu32++;
1273 Assert(offSrc < pSwitcher->cbCode);
1274 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1275 *uSrc.pu32++ = u32IDCode + offTrg;
1276 *uSrc.pu16 = SelCS64;
1277 AssertRelease(SelCS64);
1278 break;
1279 }
1280
1281#ifdef VBOX_WITH_NMI
1282 /*
1283 * 32-bit address to the APIC base.
1284 */
1285 case FIX_GC_APIC_BASE_32BIT:
1286 {
1287 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
1288 break;
1289 }
1290#endif
1291
1292 default:
1293 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
1294 break;
1295 }
1296 }
1297
1298#ifdef LOG_ENABLED
1299 /*
1300 * If Log2 is enabled disassemble the switcher code.
1301 *
1302 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
1303 */
1304 if (LogIs2Enabled())
1305 {
1306 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
1307 " pu8CodeR0 = %p\n"
1308 " pu8CodeR3 = %p\n"
1309 " GCPtrCode = %VGv\n"
1310 " u32IDCode = %08x\n"
1311 " pVMGC = %VGv\n"
1312 " pCPUMGC = %VGv\n"
1313 " pVMHC = %p\n"
1314 " pCPUMHC = %p\n"
1315 " GCPtrGDT = %VGv\n"
1316 " InterCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1317 " HyperCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1318 " SelCS = %04x\n"
1319 " SelDS = %04x\n"
1320 " SelCS64 = %04x\n"
1321 " SelTSS = %04x\n",
1322 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
1323 pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM),
1324 VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum,
1325 GCPtrGDT,
1326 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
1327 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
1328 SelCS, SelDS, SelCS64, SelTSS);
1329
1330 uint32_t offCode = 0;
1331 while (offCode < pSwitcher->cbCode)
1332 {
1333 /*
1334 * Figure out where this is.
1335 */
1336 const char *pszDesc = NULL;
1337 RTUINTPTR uBase;
1338 uint32_t cbCode;
1339 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
1340 {
1341 pszDesc = "HCCode0";
1342 uBase = (RTUINTPTR)pu8CodeR0;
1343 offCode = pSwitcher->offHCCode0;
1344 cbCode = pSwitcher->cbHCCode0;
1345 }
1346 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
1347 {
1348 pszDesc = "HCCode1";
1349 uBase = (RTUINTPTR)pu8CodeR0;
1350 offCode = pSwitcher->offHCCode1;
1351 cbCode = pSwitcher->cbHCCode1;
1352 }
1353 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
1354 {
1355 pszDesc = "GCCode";
1356 uBase = GCPtrCode;
1357 offCode = pSwitcher->offGCCode;
1358 cbCode = pSwitcher->cbGCCode;
1359 }
1360 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
1361 {
1362 pszDesc = "IDCode0";
1363 uBase = u32IDCode;
1364 offCode = pSwitcher->offIDCode0;
1365 cbCode = pSwitcher->cbIDCode0;
1366 }
1367 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
1368 {
1369 pszDesc = "IDCode1";
1370 uBase = u32IDCode;
1371 offCode = pSwitcher->offIDCode1;
1372 cbCode = pSwitcher->cbIDCode1;
1373 }
1374 else
1375 {
1376 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
1377 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1378 offCode++;
1379 continue;
1380 }
1381
1382 /*
1383 * Disassemble it.
1384 */
1385 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
1386 DISCPUSTATE Cpu = {0};
1387 Cpu.mode = CPUMODE_32BIT;
1388 while (cbCode > 0)
1389 {
1390 /* try label it */
1391 if (pSwitcher->offR0HostToGuest == offCode)
1392 RTLogPrintf(" *R0HostToGuest:\n");
1393 if (pSwitcher->offGCGuestToHost == offCode)
1394 RTLogPrintf(" *GCGuestToHost:\n");
1395 if (pSwitcher->offGCCallTrampoline == offCode)
1396 RTLogPrintf(" *GCCallTrampoline:\n");
1397 if (pSwitcher->offGCGuestToHostAsm == offCode)
1398 RTLogPrintf(" *GCGuestToHostAsm:\n");
1399 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
1400 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
1401 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
1402 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
1403
1404 /* disas */
1405 uint32_t cbInstr = 0;
1406 char szDisas[256];
1407 if (DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas))
1408 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
1409 else
1410 {
1411 RTLogPrintf(" %04x: %02x '%c'\n",
1412 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1413 cbInstr = 1;
1414 }
1415 offCode += cbInstr;
1416 cbCode -= RT_MIN(cbInstr, cbCode);
1417 }
1418 }
1419 }
1420#endif
1421}
1422
1423
1424/**
1425 * Relocator for the 32-Bit to 32-Bit world switcher.
1426 */
1427DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1428{
1429 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1430 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1431}
1432
1433
1434/**
1435 * Relocator for the 32-Bit to PAE world switcher.
1436 */
1437DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1438{
1439 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1440 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1441}
1442
1443
1444/**
1445 * Relocator for the PAE to 32-Bit world switcher.
1446 */
1447DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1448{
1449 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1450 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1451}
1452
1453
1454/**
1455 * Relocator for the PAE to PAE world switcher.
1456 */
1457DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1458{
1459 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1460 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1461}
1462
1463
1464/**
1465 * Relocator for the AMD64 to PAE world switcher.
1466 */
1467DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1468{
1469 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1470 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1471}
1472
1473
1474/**
1475 * Gets the pointer to g_szRTAssertMsg1 in GC.
1476 * @returns Pointer to VMMGC::g_szRTAssertMsg1.
1477 * Returns NULL if not present.
1478 * @param pVM The VM handle.
1479 */
1480VMMR3DECL(const char *) VMMR3GetGCAssertMsg1(PVM pVM)
1481{
1482 RTGCPTR GCPtr;
1483 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg1", &GCPtr);
1484 if (VBOX_SUCCESS(rc))
1485 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1486 return NULL;
1487}
1488
1489
1490/**
1491 * Gets the pointer to g_szRTAssertMsg2 in GC.
1492 * @returns Pointer to VMMGC::g_szRTAssertMsg2.
1493 * Returns NULL if not present.
1494 * @param pVM The VM handle.
1495 */
1496VMMR3DECL(const char *) VMMR3GetGCAssertMsg2(PVM pVM)
1497{
1498 RTGCPTR GCPtr;
1499 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg2", &GCPtr);
1500 if (VBOX_SUCCESS(rc))
1501 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1502 return NULL;
1503}
1504
1505
1506/**
1507 * Execute state save operation.
1508 *
1509 * @returns VBox status code.
1510 * @param pVM VM Handle.
1511 * @param pSSM SSM operation handle.
1512 */
1513static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1514{
1515 LogFlow(("vmmR3Save:\n"));
1516
1517 /*
1518 * The hypervisor stack.
1519 */
1520 SSMR3PutGCPtr(pSSM, pVM->vmm.s.pbGCStackBottom);
1521 RTGCPTR GCPtrESP = CPUMGetHyperESP(pVM);
1522 Assert(pVM->vmm.s.pbGCStackBottom - GCPtrESP <= VMM_STACK_SIZE);
1523 SSMR3PutGCPtr(pSSM, GCPtrESP);
1524 SSMR3PutMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1525 return SSMR3PutU32(pSSM, ~0); /* terminator */
1526}
1527
1528
1529/**
1530 * Execute state load operation.
1531 *
1532 * @returns VBox status code.
1533 * @param pVM VM Handle.
1534 * @param pSSM SSM operation handle.
1535 * @param u32Version Data layout version.
1536 */
1537static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
1538{
1539 LogFlow(("vmmR3Load:\n"));
1540
1541 /*
1542 * Validate version.
1543 */
1544 if (u32Version != VMM_SAVED_STATE_VERSION)
1545 {
1546 Log(("vmmR3Load: Invalid version u32Version=%d!\n", u32Version));
1547 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1548 }
1549
1550 /*
1551 * Check that the stack is in the same place, or that it's fearly empty.
1552 */
1553 RTGCPTR GCPtrStackBottom;
1554 SSMR3GetGCPtr(pSSM, &GCPtrStackBottom);
1555 RTGCPTR GCPtrESP;
1556 int rc = SSMR3GetGCPtr(pSSM, &GCPtrESP);
1557 if (VBOX_FAILURE(rc))
1558 return rc;
1559 if ( GCPtrStackBottom == pVM->vmm.s.pbGCStackBottom
1560 || (GCPtrStackBottom - GCPtrESP < 32)) /** @todo This will break if we start preemting the hypervisor. */
1561 {
1562 /*
1563 * We *must* set the ESP because the CPUM load + PGM load relocations will render
1564 * the ESP in CPUM fatally invalid.
1565 */
1566 CPUMSetHyperESP(pVM, GCPtrESP);
1567
1568 /* restore the stack. */
1569 SSMR3GetMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1570
1571 /* terminator */
1572 uint32_t u32;
1573 rc = SSMR3GetU32(pSSM, &u32);
1574 if (VBOX_FAILURE(rc))
1575 return rc;
1576 if (u32 != ~0U)
1577 {
1578 AssertMsgFailed(("u32=%#x\n", u32));
1579 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1580 }
1581 return VINF_SUCCESS;
1582 }
1583
1584 AssertMsgFailed(("The stack is not in the same place and it's not empty! GCPtrStackBottom=%VGv pbGCStackBottom=%VGv ESP=%VGv\n",
1585 GCPtrStackBottom, pVM->vmm.s.pbGCStackBottom, GCPtrESP));
1586 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1587}
1588
1589
1590/**
1591 * Selects the switcher to be used for switching to GC.
1592 *
1593 * @returns VBox status code.
1594 * @param pVM VM handle.
1595 * @param enmSwitcher The new switcher.
1596 * @remark This function may be called before the VMM is initialized.
1597 */
1598VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1599{
1600 /*
1601 * Validate input.
1602 */
1603 if ( enmSwitcher < VMMSWITCHER_INVALID
1604 || enmSwitcher >= VMMSWITCHER_MAX)
1605 {
1606 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1607 return VERR_INVALID_PARAMETER;
1608 }
1609
1610 /*
1611 * Select the new switcher.
1612 */
1613 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
1614 if (pSwitcher)
1615 {
1616 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
1617 pVM->vmm.s.enmSwitcher = enmSwitcher;
1618
1619 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvHCCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvHCCoreCodeR0 type */
1620 pVM->vmm.s.pfnR0HostToGuest = pbCodeR0 + pSwitcher->offR0HostToGuest;
1621
1622 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1623 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
1624 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
1625 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
1626 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
1627 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
1628 return VINF_SUCCESS;
1629 }
1630 return VERR_NOT_IMPLEMENTED;
1631}
1632
1633/**
1634 * Disable the switcher logic permanently.
1635 *
1636 * @returns VBox status code.
1637 * @param pVM VM handle.
1638 */
1639VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
1640{
1641/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
1642 * @code
1643 * mov eax, VERR_INTERNAL_ERROR
1644 * ret
1645 * @endcode
1646 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
1647 */
1648 pVM->vmm.s.fSwitcherDisabled = true;
1649 return VINF_SUCCESS;
1650}
1651
1652
1653/**
1654 * Resolve a builtin GC symbol.
1655 * Called by PDM when loading or relocating GC modules.
1656 *
1657 * @returns VBox status
1658 * @param pVM VM Handle.
1659 * @param pszSymbol Symbol to resolv
1660 * @param pGCPtrValue Where to store the symbol value.
1661 * @remark This has to work before VMMR3Relocate() is called.
1662 */
1663VMMR3DECL(int) VMMR3GetImportGC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue)
1664{
1665 if (!strcmp(pszSymbol, "g_Logger"))
1666 {
1667 if (pVM->vmm.s.pLoggerHC)
1668 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
1669 *pGCPtrValue = pVM->vmm.s.pLoggerGC;
1670 }
1671 else if (!strcmp(pszSymbol, "g_RelLogger"))
1672 {
1673#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1674 if (pVM->vmm.s.pRelLoggerHC)
1675 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
1676 *pGCPtrValue = pVM->vmm.s.pRelLoggerGC;
1677#else
1678 *pGCPtrValue = NIL_RTGCPTR;
1679#endif
1680 }
1681 else
1682 return VERR_SYMBOL_NOT_FOUND;
1683 return VINF_SUCCESS;
1684}
1685
1686
1687/**
1688 * Suspends the the CPU yielder.
1689 *
1690 * @param pVM The VM handle.
1691 */
1692VMMR3DECL(void) VMMR3YieldSuspend(PVM pVM)
1693{
1694 if (!pVM->vmm.s.cYieldResumeMillies)
1695 {
1696 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1697 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1698 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1699 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1700 else
1701 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1702 TMTimerStop(pVM->vmm.s.pYieldTimer);
1703 }
1704}
1705
1706
1707/**
1708 * Stops the the CPU yielder.
1709 *
1710 * @param pVM The VM handle.
1711 */
1712VMMR3DECL(void) VMMR3YieldStop(PVM pVM)
1713{
1714 if (!pVM->vmm.s.cYieldResumeMillies)
1715 TMTimerStop(pVM->vmm.s.pYieldTimer);
1716 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1717}
1718
1719
1720/**
1721 * Resumes the CPU yielder when it has been a suspended or stopped.
1722 *
1723 * @param pVM The VM handle.
1724 */
1725VMMR3DECL(void) VMMR3YieldResume(PVM pVM)
1726{
1727 if (pVM->vmm.s.cYieldResumeMillies)
1728 {
1729 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1730 pVM->vmm.s.cYieldResumeMillies = 0;
1731 }
1732}
1733
1734
1735/**
1736 * Internal timer callback function.
1737 *
1738 * @param pVM The VM.
1739 * @param pTimer The timer handle.
1740 * @param pvUser User argument specified upon timer creation.
1741 */
1742static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1743{
1744#ifdef LOG_ENABLED
1745 uint64_t u64Elapsed = RTTimeNanoTS();
1746#endif
1747 RTThreadYield();
1748 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1749 Log(("vmmR3YieldEMT: %RI64 ns\n", RTTimeNanoTS() - u64Elapsed));
1750}
1751
1752
1753/**
1754 * Acquire global VM lock.
1755 *
1756 * @returns VBox status code
1757 * @param pVM The VM to operate on.
1758 */
1759VMMR3DECL(int) VMMR3Lock(PVM pVM)
1760{
1761 return RTCritSectEnter(&pVM->vmm.s.CritSectVMLock);
1762}
1763
1764
1765/**
1766 * Release global VM lock.
1767 *
1768 * @returns VBox status code
1769 * @param pVM The VM to operate on.
1770 */
1771VMMR3DECL(int) VMMR3Unlock(PVM pVM)
1772{
1773 return RTCritSectLeave(&pVM->vmm.s.CritSectVMLock);
1774}
1775
1776
1777/**
1778 * Return global VM lock owner.
1779 *
1780 * @returns Thread id of owner.
1781 * @returns NIL_RTTHREAD if no owner.
1782 * @param pVM The VM to operate on.
1783 */
1784VMMR3DECL(RTNATIVETHREAD) VMMR3LockGetOwner(PVM pVM)
1785{
1786 return RTCritSectGetOwner(&pVM->vmm.s.CritSectVMLock);
1787}
1788
1789
1790/**
1791 * Checks if the current thread is the owner of the global VM lock.
1792 *
1793 * @returns true if owner.
1794 * @returns false if not owner.
1795 * @param pVM The VM to operate on.
1796 */
1797VMMR3DECL(bool) VMMR3LockIsOwner(PVM pVM)
1798{
1799 return RTCritSectIsOwner(&pVM->vmm.s.CritSectVMLock);
1800}
1801
1802
1803/**
1804 * Executes guest code.
1805 *
1806 * @param pVM VM handle.
1807 */
1808VMMR3DECL(int) VMMR3RawRunGC(PVM pVM)
1809{
1810 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1811
1812 /*
1813 * Set the EIP and ESP.
1814 */
1815 CPUMSetHyperEIP(pVM, CPUMGetGuestEFlags(pVM) & X86_EFL_VM
1816 ? pVM->vmm.s.pfnCPUMGCResumeGuestV86
1817 : pVM->vmm.s.pfnCPUMGCResumeGuest);
1818 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom);
1819
1820 /*
1821 * We hide log flushes (outer) and hypervisor interrupts (inner).
1822 */
1823 for (;;)
1824 {
1825 int rc;
1826 do
1827 {
1828#ifdef NO_SUPCALLR0VMM
1829 rc = VERR_GENERAL_FAILURE;
1830#else
1831 rc = SUPCallVMMR0(pVM, VMMR0_DO_RUN_GC, NULL);
1832#endif
1833 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1834
1835 /*
1836 * Flush the logs.
1837 */
1838#ifdef LOG_ENABLED
1839 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
1840 if ( pLogger
1841 && pLogger->offScratch > 0)
1842 RTLogFlushGC(NULL, pLogger);
1843#endif
1844#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1845 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
1846 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1847 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
1848#endif
1849 if (rc != VINF_VMM_CALL_HOST)
1850 {
1851 Log2(("VMMR3RawRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1852 return rc;
1853 }
1854 rc = vmmR3ServiceCallHostRequest(pVM);
1855 if (VBOX_FAILURE(rc))
1856 return rc;
1857 /* Resume GC */
1858 }
1859}
1860
1861
1862/**
1863 * Executes guest code (Intel VMX and AMD SVM).
1864 *
1865 * @param pVM VM handle.
1866 */
1867VMMR3DECL(int) VMMR3HwAccRunGC(PVM pVM)
1868{
1869 Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1870
1871 for (;;)
1872 {
1873 int rc;
1874 do
1875 {
1876#ifdef NO_SUPCALLR0VMM
1877 rc = VERR_GENERAL_FAILURE;
1878#else
1879 rc = SUPCallVMMR0(pVM, VMMR0_HWACC_RUN_GUEST, NULL);
1880#endif
1881 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1882
1883#ifdef LOG_ENABLED
1884 /*
1885 * Flush the log
1886 */
1887 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
1888 if ( pR0Logger
1889 && pR0Logger->Logger.offScratch > 0)
1890 RTLogFlushToLogger(&pR0Logger->Logger, NULL);
1891#endif /* !LOG_ENABLED */
1892 if (rc != VINF_VMM_CALL_HOST)
1893 {
1894 Log2(("VMMR3HwAccRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1895 return rc;
1896 }
1897 rc = vmmR3ServiceCallHostRequest(pVM);
1898 if (VBOX_FAILURE(rc))
1899 return rc;
1900 /* Resume R0 */
1901 }
1902}
1903
1904/**
1905 * Calls GC a function.
1906 *
1907 * @param pVM The VM handle.
1908 * @param GCPtrEntry The GC function address.
1909 * @param cArgs The number of arguments in the ....
1910 * @param ... Arguments to the function.
1911 */
1912VMMR3DECL(int) VMMR3CallGC(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, ...)
1913{
1914 va_list args;
1915 va_start(args, cArgs);
1916 int rc = VMMR3CallGCV(pVM, GCPtrEntry, cArgs, args);
1917 va_end(args);
1918 return rc;
1919}
1920
1921
1922/**
1923 * Calls GC a function.
1924 *
1925 * @param pVM The VM handle.
1926 * @param GCPtrEntry The GC function address.
1927 * @param cArgs The number of arguments in the ....
1928 * @param args Arguments to the function.
1929 */
1930VMMR3DECL(int) VMMR3CallGCV(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, va_list args)
1931{
1932 Log2(("VMMR3CallGCV: GCPtrEntry=%VGv cArgs=%d\n", GCPtrEntry, cArgs));
1933
1934 /*
1935 * Setup the call frame using the trampoline.
1936 */
1937 CPUMHyperSetCtxCore(pVM, NULL);
1938 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
1939 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom - cArgs * sizeof(RTGCUINTPTR));
1940 PRTGCUINTPTR pFrame = (PRTGCUINTPTR)(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE) - cArgs;
1941 int i = cArgs;
1942 while (i-- > 0)
1943 *pFrame++ = va_arg(args, RTGCUINTPTR);
1944
1945 CPUMPushHyper(pVM, cArgs * sizeof(RTGCUINTPTR)); /* stack frame size */
1946 CPUMPushHyper(pVM, GCPtrEntry); /* what to call */
1947 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
1948
1949 /*
1950 * We hide log flushes (outer) and hypervisor interrupts (inner).
1951 */
1952 for (;;)
1953 {
1954 int rc;
1955 do
1956 {
1957#ifdef NO_SUPCALLR0VMM
1958 rc = VERR_GENERAL_FAILURE;
1959#else
1960 rc = SUPCallVMMR0(pVM, VMMR0_DO_RUN_GC, NULL);
1961#endif
1962 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1963
1964 /*
1965 * Flush the logs.
1966 */
1967#ifdef LOG_ENABLED
1968 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
1969 if ( pLogger
1970 && pLogger->offScratch > 0)
1971 RTLogFlushGC(NULL, pLogger);
1972#endif
1973#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1974 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
1975 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1976 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
1977#endif
1978 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
1979 VMMR3FatalDump(pVM, rc);
1980 if (rc != VINF_VMM_CALL_HOST)
1981 {
1982 Log2(("VMMR3CallGCV: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1983 return rc;
1984 }
1985 rc = vmmR3ServiceCallHostRequest(pVM);
1986 if (VBOX_FAILURE(rc))
1987 return rc;
1988 }
1989}
1990
1991
1992/**
1993 * Resumes executing hypervisor code when interrupted
1994 * by a queue flush or a debug event.
1995 *
1996 * @returns VBox status code.
1997 * @param pVM VM handle.
1998 */
1999VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM)
2000{
2001 Log(("VMMR3ResumeHyper: eip=%VGv esp=%VGv\n", CPUMGetHyperEIP(pVM), CPUMGetHyperESP(pVM)));
2002
2003 /*
2004 * We hide log flushes (outer) and hypervisor interrupts (inner).
2005 */
2006 for (;;)
2007 {
2008 int rc;
2009 do
2010 {
2011#ifdef NO_SUPCALLR0VMM
2012 rc = VERR_GENERAL_FAILURE;
2013#else
2014 rc = SUPCallVMMR0(pVM, VMMR0_DO_RUN_GC, NULL);
2015#endif
2016 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2017
2018 /*
2019 * Flush the loggers,
2020 */
2021#ifdef LOG_ENABLED
2022 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
2023 if ( pLogger
2024 && pLogger->offScratch > 0)
2025 RTLogFlushGC(NULL, pLogger);
2026#endif
2027#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2028 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2029 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2030 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2031#endif
2032 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2033 VMMR3FatalDump(pVM, rc);
2034 if (rc != VINF_VMM_CALL_HOST)
2035 {
2036 Log(("VMMR3ResumeHyper: returns %Vrc\n", rc));
2037 return rc;
2038 }
2039 rc = vmmR3ServiceCallHostRequest(pVM);
2040 if (VBOX_FAILURE(rc))
2041 return rc;
2042 }
2043}
2044
2045
2046/**
2047 * Service a call to the ring-3 host code.
2048 *
2049 * @returns VBox status code.
2050 * @param pVM VM handle.
2051 * @remark Careful with critsects.
2052 */
2053static int vmmR3ServiceCallHostRequest(PVM pVM)
2054{
2055 switch (pVM->vmm.s.enmCallHostOperation)
2056 {
2057 /*
2058 * Acquire the PDM lock.
2059 */
2060 case VMMCALLHOST_PDM_LOCK:
2061 {
2062 pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM);
2063 break;
2064 }
2065
2066 /*
2067 * Flush a PDM queue.
2068 */
2069 case VMMCALLHOST_PDM_QUEUE_FLUSH:
2070 {
2071 PDMR3QueueFlushWorker(pVM, NULL);
2072 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
2073 break;
2074 }
2075
2076 /*
2077 * Grow the PGM pool.
2078 */
2079 case VMMCALLHOST_PGM_POOL_GROW:
2080 {
2081 pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
2082 break;
2083 }
2084
2085 /*
2086 * Acquire the PGM lock.
2087 */
2088 case VMMCALLHOST_PGM_LOCK:
2089 {
2090 pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM);
2091 break;
2092 }
2093
2094 /*
2095 * Flush REM handler notifications.
2096 */
2097 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
2098 {
2099 REMR3ReplayHandlerNotifications(pVM);
2100 break;
2101 }
2102
2103 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
2104 {
2105 pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, pVM->vmm.s.u64CallHostArg);
2106 break;
2107 }
2108
2109 /*
2110 * This is a noop. We just take this route to avoid unnecessary
2111 * tests in the loops.
2112 */
2113 case VMMCALLHOST_VMM_LOGGER_FLUSH:
2114 break;
2115
2116 /*
2117 * Set the VM error message.
2118 */
2119 case VMMCALLHOST_VM_SET_ERROR:
2120 VMR3SetErrorWorker(pVM);
2121 break;
2122
2123 default:
2124 AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation));
2125 return VERR_INTERNAL_ERROR;
2126 }
2127
2128 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
2129 return VINF_SUCCESS;
2130}
2131
2132
2133
2134/**
2135 * Structure to pass to DBGFR3Info() and for doing all other
2136 * output during fatal dump.
2137 */
2138typedef struct VMMR3FATALDUMPINFOHLP
2139{
2140 /** The helper core. */
2141 DBGFINFOHLP Core;
2142 /** The release logger instance. */
2143 PRTLOGGER pRelLogger;
2144 /** The saved release logger flags. */
2145 RTUINT fRelLoggerFlags;
2146 /** The logger instance. */
2147 PRTLOGGER pLogger;
2148 /** The saved logger flags. */
2149 RTUINT fLoggerFlags;
2150 /** The saved logger destination flags. */
2151 RTUINT fLoggerDestFlags;
2152 /** Whether to output to stderr or not. */
2153 bool fStdErr;
2154} VMMR3FATALDUMPINFOHLP, *PVMMR3FATALDUMPINFOHLP;
2155typedef const VMMR3FATALDUMPINFOHLP *PCVMMR3FATALDUMPINFOHLP;
2156
2157
2158/**
2159 * Print formatted string.
2160 *
2161 * @param pHlp Pointer to this structure.
2162 * @param pszFormat The format string.
2163 * @param ... Arguments.
2164 */
2165static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
2166{
2167 va_list args;
2168 va_start(args, pszFormat);
2169 pHlp->pfnPrintfV(pHlp, pszFormat, args);
2170 va_end(args);
2171}
2172
2173
2174/**
2175 * Print formatted string.
2176 *
2177 * @param pHlp Pointer to this structure.
2178 * @param pszFormat The format string.
2179 * @param args Argument list.
2180 */
2181static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
2182{
2183 PCVMMR3FATALDUMPINFOHLP pMyHlp = (PCVMMR3FATALDUMPINFOHLP)pHlp;
2184
2185 if (pMyHlp->pRelLogger)
2186 {
2187 va_list args2;
2188 va_copy(args2, args);
2189 RTLogLoggerV(pMyHlp->pRelLogger, pszFormat, args2);
2190 va_end(args2);
2191 }
2192 if (pMyHlp->pLogger)
2193 {
2194 va_list args2;
2195 va_copy(args2, args);
2196 RTLogLoggerV(pMyHlp->pLogger, pszFormat, args);
2197 va_end(args2);
2198 }
2199 if (pMyHlp->fStdErr)
2200 {
2201 va_list args2;
2202 va_copy(args2, args);
2203 RTStrmPrintfV(g_pStdErr, pszFormat, args);
2204 va_end(args2);
2205 }
2206}
2207
2208
2209/**
2210 * Initializes the fatal dump output helper.
2211 *
2212 * @param pHlp The structure to initialize.
2213 */
2214static void vmmR3FatalDumpInfoHlpInit(PVMMR3FATALDUMPINFOHLP pHlp)
2215{
2216 memset(pHlp, 0, sizeof(*pHlp));
2217
2218 pHlp->Core.pfnPrintf = vmmR3FatalDumpInfoHlp_pfnPrintf;
2219 pHlp->Core.pfnPrintfV = vmmR3FatalDumpInfoHlp_pfnPrintfV;
2220
2221 /*
2222 * The loggers.
2223 */
2224 pHlp->pRelLogger = RTLogRelDefaultInstance();
2225#ifndef LOG_ENABLED
2226 if (!pHlp->pRelLogger)
2227#endif
2228 pHlp->pLogger = RTLogDefaultInstance();
2229
2230 if (pHlp->pRelLogger)
2231 {
2232 pHlp->fRelLoggerFlags = pHlp->pRelLogger->fFlags;
2233 pHlp->pRelLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2234 }
2235
2236 if (pHlp->pLogger)
2237 {
2238 pHlp->fLoggerFlags = pHlp->pLogger->fFlags;
2239 pHlp->fLoggerDestFlags = pHlp->pLogger->fDestFlags;
2240 pHlp->pLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2241 pHlp->pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
2242 }
2243
2244 /*
2245 * Check if we need write to stderr.
2246 */
2247 pHlp->fStdErr = (!pHlp->pRelLogger || !(pHlp->pRelLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)))
2248 && (!pHlp->pLogger || !(pHlp->pLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)));
2249}
2250
2251
2252/**
2253 * Deletes the fatal dump output helper.
2254 *
2255 * @param pHlp The structure to delete.
2256 */
2257static void vmmR3FatalDumpInfoHlpDelete(PVMMR3FATALDUMPINFOHLP pHlp)
2258{
2259 if (pHlp->pRelLogger)
2260 {
2261 RTLogFlush(pHlp->pRelLogger);
2262 pHlp->pRelLogger->fFlags = pHlp->fRelLoggerFlags;
2263 }
2264
2265 if (pHlp->pLogger)
2266 {
2267 RTLogFlush(pHlp->pLogger);
2268 pHlp->pLogger->fFlags = pHlp->fLoggerFlags;
2269 pHlp->pLogger->fDestFlags = pHlp->fLoggerDestFlags;
2270 }
2271}
2272
2273
2274/**
2275 * Dumps the VM state on a fatal error.
2276 *
2277 * @param pVM VM Handle.
2278 * @param rcErr VBox status code.
2279 */
2280VMMR3DECL(void) VMMR3FatalDump(PVM pVM, int rcErr)
2281{
2282 /*
2283 * Create our output helper and sync it with the log settings.
2284 * This helper will be used for all the output.
2285 */
2286 VMMR3FATALDUMPINFOHLP Hlp;
2287 PCDBGFINFOHLP pHlp = &Hlp.Core;
2288 vmmR3FatalDumpInfoHlpInit(&Hlp);
2289
2290 /*
2291 * Header.
2292 */
2293 pHlp->pfnPrintf(pHlp,
2294 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
2295 "!!\n"
2296 "!! Guru Meditation %d (%Vrc)\n"
2297 "!!\n",
2298 rcErr, rcErr);
2299
2300 /*
2301 * Continue according to context.
2302 */
2303 bool fDoneHyper = false;
2304 switch (rcErr)
2305 {
2306 /*
2307 * Hyper visor errors.
2308 */
2309 case VINF_EM_DBG_HYPER_ASSERTION:
2310 pHlp->pfnPrintf(pHlp, "%s%s!!\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
2311 /* fall thru */
2312 case VERR_TRPM_DONT_PANIC:
2313 case VERR_TRPM_PANIC:
2314 case VINF_EM_RAW_STALE_SELECTOR:
2315 case VINF_EM_RAW_IRET_TRAP:
2316 case VINF_EM_DBG_HYPER_BREAKPOINT:
2317 case VINF_EM_DBG_HYPER_STEPPED:
2318 {
2319 /* Trap? */
2320 uint32_t uEIP = CPUMGetHyperEIP(pVM);
2321 bool fSoftwareInterrupt = false;
2322 uint8_t u8TrapNo = 0xce;
2323 RTGCUINT uErrorCode = 0xdeadface;
2324 RTGCUINTPTR uCR2 = 0xdeadface;
2325 int rc2 = TRPMQueryTrapAll(pVM, &u8TrapNo, &fSoftwareInterrupt, &uErrorCode, &uCR2);
2326 if (VBOX_SUCCESS(rc2))
2327 pHlp->pfnPrintf(pHlp,
2328 "!! TRAP=%02x ERRCD=%VGv CR2=%VGv EIP=%VGv fSoft=%d\n",
2329 u8TrapNo, uErrorCode, uCR2, uEIP, fSoftwareInterrupt);
2330 else
2331 pHlp->pfnPrintf(pHlp,
2332 "!! EIP=%VGv NOTRAP\n",
2333 uEIP);
2334
2335 /*
2336 * Try figure out where eip is.
2337 */
2338 /** @todo make query call for core code or move this function to VMM. */
2339 /* core code? */
2340 //if (uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode < pVM->vmm.s.cbCoreCode)
2341 // pHlp->pfnPrintf(pHlp,
2342 // "!! EIP is in CoreCode, offset %#x\n",
2343 // uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode);
2344 //else
2345 { /* ask PDM */
2346 /** @todo ask DBGFR3Sym later. */
2347 char szModName[64];
2348 RTGCPTR GCPtrMod;
2349 char szNearSym1[260];
2350 RTGCPTR GCPtrNearSym1;
2351 char szNearSym2[260];
2352 RTGCPTR GCPtrNearSym2;
2353 int rc = PDMR3QueryModFromEIP(pVM, uEIP,
2354 &szModName[0], sizeof(szModName), &GCPtrMod,
2355 &szNearSym1[0], sizeof(szNearSym1), &GCPtrNearSym1,
2356 &szNearSym2[0], sizeof(szNearSym2), &GCPtrNearSym2);
2357 if (VBOX_SUCCESS(rc))
2358 {
2359 pHlp->pfnPrintf(pHlp,
2360 "!! EIP in %s (%p) at rva %x near symbols:\n"
2361 "!! %VGv rva %VGv off %08x %s\n"
2362 "!! %VGv rva %VGv off -%08x %s\n",
2363 szModName, GCPtrMod, (unsigned)(uEIP - GCPtrMod),
2364 GCPtrNearSym1, GCPtrNearSym1 - GCPtrMod, (unsigned)(uEIP - GCPtrNearSym1), szNearSym1,
2365 GCPtrNearSym2, GCPtrNearSym2 - GCPtrMod, (unsigned)(GCPtrNearSym2 - uEIP), szNearSym2);
2366 }
2367 else
2368 pHlp->pfnPrintf(pHlp,
2369 "!! EIP is not in any code known to VMM!\n");
2370 }
2371
2372 /* Disassemble the instruction. */
2373 char szInstr[256];
2374 rc2 = DBGFR3DisasInstrEx(pVM, 0, 0, DBGF_DISAS_FLAGS_CURRENT_HYPER, &szInstr[0], sizeof(szInstr), NULL);
2375 if (VBOX_SUCCESS(rc2))
2376 pHlp->pfnPrintf(pHlp,
2377 "!! %s\n", szInstr);
2378
2379 /* Dump the hypervisor cpu state. */
2380 pHlp->pfnPrintf(pHlp,
2381 "!!\n"
2382 "!!\n"
2383 "!!\n");
2384 rc2 = DBGFR3Info(pVM, "cpumhyper", "verbose", pHlp);
2385 fDoneHyper = true;
2386
2387 /* Callstack. */
2388 DBGFSTACKFRAME Frame = {0};
2389 rc2 = DBGFR3StackWalkBeginHyper(pVM, &Frame);
2390 if (VBOX_SUCCESS(rc2))
2391 {
2392 pHlp->pfnPrintf(pHlp,
2393 "!!\n"
2394 "!! Call Stack:\n"
2395 "!!\n"
2396 "EBP Ret EBP Ret CS:EIP Arg0 Arg1 Arg2 Arg3 CS:EIP Symbol [line]\n");
2397 do
2398 {
2399 pHlp->pfnPrintf(pHlp,
2400 "%08RX32 %08RX32 %04RX32:%08RX32 %08RX32 %08RX32 %08RX32 %08RX32",
2401 (uint32_t)Frame.AddrFrame.off,
2402 (uint32_t)Frame.AddrReturnFrame.off,
2403 (uint32_t)Frame.AddrReturnPC.Sel,
2404 (uint32_t)Frame.AddrReturnPC.off,
2405 Frame.Args.au32[0],
2406 Frame.Args.au32[1],
2407 Frame.Args.au32[2],
2408 Frame.Args.au32[3]);
2409 pHlp->pfnPrintf(pHlp, " %RTsel:%08RGv", Frame.AddrPC.Sel, Frame.AddrPC.off);
2410 if (Frame.pSymPC)
2411 {
2412 RTGCINTPTR offDisp = Frame.AddrPC.FlatPtr - Frame.pSymPC->Value;
2413 if (offDisp > 0)
2414 pHlp->pfnPrintf(pHlp, " %s+%llx", Frame.pSymPC->szName, (int64_t)offDisp);
2415 else if (offDisp < 0)
2416 pHlp->pfnPrintf(pHlp, " %s-%llx", Frame.pSymPC->szName, -(int64_t)offDisp);
2417 else
2418 pHlp->pfnPrintf(pHlp, " %s", Frame.pSymPC->szName);
2419 }
2420 if (Frame.pLinePC)
2421 pHlp->pfnPrintf(pHlp, " [%s @ 0i%d]", Frame.pLinePC->szFilename, Frame.pLinePC->uLineNo);
2422 pHlp->pfnPrintf(pHlp, "\n");
2423
2424 /* next */
2425 rc2 = DBGFR3StackWalkNext(pVM, &Frame);
2426 } while (VBOX_SUCCESS(rc2));
2427 DBGFR3StackWalkEnd(pVM, &Frame);
2428 }
2429
2430 /* raw stack */
2431 pHlp->pfnPrintf(pHlp,
2432 "!!\n"
2433 "!! Raw stack (mind the direction).\n"
2434 "!!\n"
2435 "%.*Vhxd\n",
2436 VMM_STACK_SIZE, (char *)pVM->vmm.s.pbHCStack);
2437 break;
2438 }
2439
2440 default:
2441 {
2442 break;
2443 }
2444
2445 } /* switch (rcErr) */
2446
2447
2448 /*
2449 * Dump useful state information.
2450 */
2451 /** @todo convert these dumpers to DBGFR3Info() handlers!!! */
2452 pHlp->pfnPrintf(pHlp,
2453 "!!\n"
2454 "!! PGM Access Handlers & Stuff:\n"
2455 "!!\n");
2456 PGMR3DumpMappings(pVM);
2457
2458
2459 /*
2460 * Generic info dumper loop.
2461 */
2462 static struct
2463 {
2464 const char *pszInfo;
2465 const char *pszArgs;
2466 } const aInfo[] =
2467 {
2468 { "hma", NULL },
2469 { "cpumguest", "verbose" },
2470 { "cpumhyper", "verbose" },
2471 { "cpumhost", "verbose" },
2472 { "mode", "all" },
2473 { "cpuid", "verbose" },
2474 { "gdt", NULL },
2475 { "ldt", NULL },
2476 //{ "tss", NULL },
2477 { "ioport", NULL },
2478 { "mmio", NULL },
2479 { "phys", NULL },
2480 //{ "pgmpd", NULL }, - doesn't always work at init time...
2481 { "timers", NULL },
2482 { "activetimers", NULL },
2483 { "handlers", "phys virt stats" },
2484 { "cfgm", NULL },
2485 };
2486 for (unsigned i = 0; i < ELEMENTS(aInfo); i++)
2487 {
2488 if (fDoneHyper && !strcmp(aInfo[i].pszInfo, "cpumhyper"))
2489 continue;
2490 pHlp->pfnPrintf(pHlp,
2491 "!!\n"
2492 "!! {%s, %s}\n"
2493 "!!\n",
2494 aInfo[i].pszInfo, aInfo[i].pszArgs);
2495 DBGFR3Info(pVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
2496 }
2497
2498 /* done */
2499 pHlp->pfnPrintf(pHlp,
2500 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
2501
2502
2503 /*
2504 * Delete the output instance (flushing and restoring of flags).
2505 */
2506 vmmR3FatalDumpInfoHlpDelete(&Hlp);
2507}
2508
2509
2510/**
2511 * Performs a testcase.
2512 *
2513 * @returns return value from the test.
2514 * @param pVM The VM handle.
2515 * @param enmTestcase The testcase operation to perform.
2516 * @param uVariation The testcase variation id.
2517 */
2518static int vmmR3DoGCTest(PVM pVM, VMMGCOPERATION enmTestcase, unsigned uVariation)
2519{
2520 RTGCPTR GCPtrEP;
2521 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
2522 if (VBOX_FAILURE(rc))
2523 return rc;
2524
2525 CPUMHyperSetCtxCore(pVM, NULL);
2526 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE);
2527 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
2528 CPUMPushHyper(pVM, uVariation);
2529 CPUMPushHyper(pVM, enmTestcase);
2530 CPUMPushHyper(pVM, pVM->pVMGC);
2531 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */
2532 CPUMPushHyper(pVM, GCPtrEP); /* what to call */
2533 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2534 return SUPCallVMMR0(pVM, VMMR0_DO_RUN_GC, NULL);
2535}
2536
2537
2538/**
2539 * Performs a trap test.
2540 *
2541 * @returns Return value from the trap test.
2542 * @param pVM The VM handle.
2543 * @param u8Trap The trap number to test.
2544 * @param uVariation The testcase variation.
2545 * @param rcExpect The expected result.
2546 * @param u32Eax The expected eax value.
2547 * @param pszFaultEIP The fault address. Pass NULL if this isn't available or doesn't apply.
2548 * @param pszDesc The test description.
2549 */
2550static int vmmR3DoTrapTest(PVM pVM, uint8_t u8Trap, unsigned uVariation, int rcExpect, uint32_t u32Eax, const char *pszFaultEIP, const char *pszDesc)
2551{
2552 RTPrintf("VMM: testing 0%x / %d - %s\n", u8Trap, uVariation, pszDesc);
2553
2554 RTGCPTR GCPtrEP;
2555 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
2556 if (VBOX_FAILURE(rc))
2557 return rc;
2558
2559 CPUMHyperSetCtxCore(pVM, NULL);
2560 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE);
2561 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
2562 CPUMPushHyper(pVM, uVariation);
2563 CPUMPushHyper(pVM, u8Trap + VMMGC_DO_TESTCASE_TRAP_FIRST);
2564 CPUMPushHyper(pVM, pVM->pVMGC);
2565 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */
2566 CPUMPushHyper(pVM, GCPtrEP); /* what to call */
2567 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2568 rc = SUPCallVMMR0(pVM, VMMR0_DO_RUN_GC, NULL);
2569 bool fDump = false;
2570 if (rc != rcExpect)
2571 {
2572 RTPrintf("VMM: FAILURE - rc=%Vrc expected %Vrc\n", rc, rcExpect);
2573 if (rc != VERR_NOT_IMPLEMENTED)
2574 fDump = true;
2575 }
2576 else if ( u8Trap != 8 /* double fault doesn't dare setting TrapNo. */
2577 && u8Trap != 3 /* guest only, we're not in guest. */
2578 && u8Trap != 1 /* guest only, we're not in guest. */
2579 && u8Trap != TRPMGetTrapNo(pVM))
2580 {
2581 RTPrintf("VMM: FAILURE - Trap %#x expected %#x\n", TRPMGetTrapNo(pVM), u8Trap);
2582 fDump = true;
2583 }
2584 else if (pszFaultEIP)
2585 {
2586 RTGCPTR GCPtrFault;
2587 int rc2 = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, pszFaultEIP, &GCPtrFault);
2588 if (VBOX_FAILURE(rc2))
2589 RTPrintf("VMM: FAILURE - Failed to resolve symbol '%s', %Vrc!\n", pszFaultEIP, rc);
2590 else if (GCPtrFault != CPUMGetHyperEIP(pVM))
2591 {
2592 RTPrintf("VMM: FAILURE - EIP=%VGv expected %VGv (%s)\n", CPUMGetHyperEIP(pVM), GCPtrFault, pszFaultEIP);
2593 fDump = true;
2594 }
2595 }
2596 else
2597 {
2598 if (CPUMGetHyperSS(pVM) == SELMGetHyperDS(pVM))
2599 RTPrintf("VMM: FAILURE - ss=%x expected %x\n", CPUMGetHyperSS(pVM), SELMGetHyperDS(pVM));
2600 if (CPUMGetHyperES(pVM) == SELMGetHyperDS(pVM))
2601 RTPrintf("VMM: FAILURE - es=%x expected %x\n", CPUMGetHyperES(pVM), SELMGetHyperDS(pVM));
2602 if (CPUMGetHyperDS(pVM) == SELMGetHyperDS(pVM))
2603 RTPrintf("VMM: FAILURE - ds=%x expected %x\n", CPUMGetHyperDS(pVM), SELMGetHyperDS(pVM));
2604 if (CPUMGetHyperFS(pVM) == SELMGetHyperDS(pVM))
2605 RTPrintf("VMM: FAILURE - fs=%x expected %x\n", CPUMGetHyperFS(pVM), SELMGetHyperDS(pVM));
2606 if (CPUMGetHyperGS(pVM) == SELMGetHyperDS(pVM))
2607 RTPrintf("VMM: FAILURE - gs=%x expected %x\n", CPUMGetHyperGS(pVM), SELMGetHyperDS(pVM));
2608 if (CPUMGetHyperEDI(pVM) == 0x01234567)
2609 RTPrintf("VMM: FAILURE - edi=%x expected %x\n", CPUMGetHyperEDI(pVM), 0x01234567);
2610 if (CPUMGetHyperESI(pVM) == 0x42000042)
2611 RTPrintf("VMM: FAILURE - esi=%x expected %x\n", CPUMGetHyperESI(pVM), 0x42000042);
2612 if (CPUMGetHyperEBP(pVM) == 0xffeeddcc)
2613 RTPrintf("VMM: FAILURE - ebp=%x expected %x\n", CPUMGetHyperEBP(pVM), 0xffeeddcc);
2614 if (CPUMGetHyperEBX(pVM) == 0x89abcdef)
2615 RTPrintf("VMM: FAILURE - ebx=%x expected %x\n", CPUMGetHyperEBX(pVM), 0x89abcdef);
2616 if (CPUMGetHyperECX(pVM) == 0xffffaaaa)
2617 RTPrintf("VMM: FAILURE - ecx=%x expected %x\n", CPUMGetHyperECX(pVM), 0xffffaaaa);
2618 if (CPUMGetHyperEDX(pVM) == 0x77778888)
2619 RTPrintf("VMM: FAILURE - edx=%x expected %x\n", CPUMGetHyperEDX(pVM), 0x77778888);
2620 if (CPUMGetHyperEAX(pVM) == u32Eax)
2621 RTPrintf("VMM: FAILURE - eax=%x expected %x\n", CPUMGetHyperEAX(pVM), u32Eax);
2622 }
2623 if (fDump)
2624 VMMR3FatalDump(pVM, rc);
2625 return rc;
2626}
2627
2628#include <stdio.h>
2629
2630
2631/* execute the switch. */
2632VMMR3DECL(int) VMMDoTest(PVM pVM)
2633{
2634#if 1
2635#ifdef NO_SUPCALLR0VMM
2636 RTPrintf("NO_SUPCALLR0VMM\n");
2637 return VINF_SUCCESS;
2638#endif
2639
2640 /*
2641 * Setup stack for calling VMMGCEntry().
2642 */
2643 RTGCPTR GCPtrEP;
2644 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
2645 if (VBOX_SUCCESS(rc))
2646 {
2647 /*
2648 * Test various crashes which we must be able to recover from.
2649 */
2650 vmmR3DoTrapTest(pVM, 0x3, 0, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3");
2651 vmmR3DoTrapTest(pVM, 0x3, 1, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3 WP");
2652
2653#if defined(DEBUG_bird) /* guess most people would like to skip these since they write to com1. */
2654 vmmR3DoTrapTest(pVM, 0x8, 0, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG]");
2655 SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
2656 bool f;
2657 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f);
2658#if !defined(DEBUG_bird)
2659 if (VBOX_SUCCESS(rc) && f)
2660#endif
2661 {
2662 /* see tripple fault warnings in SELM and VMMGC.cpp. */
2663 vmmR3DoTrapTest(pVM, 0x8, 1, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG] WP");
2664 SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
2665 }
2666#endif
2667
2668 vmmR3DoTrapTest(pVM, 0xd, 0, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP");
2669 ///@todo find a better \#GP case, on intel ltr will \#PF (busy update?) and not \#GP.
2670 //vmmR3DoTrapTest(pVM, 0xd, 1, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP WP");
2671
2672 vmmR3DoTrapTest(pVM, 0xe, 0, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL)");
2673 vmmR3DoTrapTest(pVM, 0xe, 1, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL) WP");
2674
2675 /*
2676 * Set a debug register and perform a context switch.
2677 */
2678 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
2679 if (rc != VINF_SUCCESS)
2680 {
2681 RTPrintf("VMM: Nop test failed, rc=%Vrc not VINF_SUCCESS\n", rc);
2682 return rc;
2683 }
2684
2685 /* a harmless breakpoint */
2686 RTPrintf("VMM: testing hardware bp at 0x10000 (not hit)\n");
2687 DBGFADDRESS Addr;
2688 DBGFR3AddrFromFlat(pVM, &Addr, 0x10000);
2689 RTUINT iBp0;
2690 rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp0);
2691 AssertReleaseRC(rc);
2692 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
2693 if (rc != VINF_SUCCESS)
2694 {
2695 RTPrintf("VMM: DR0=0x10000 test failed with rc=%Vrc!\n", rc);
2696 return rc;
2697 }
2698
2699 /* a bad one at VMMGCEntry */
2700 RTPrintf("VMM: testing hardware bp at VMMGCEntry (hit)\n");
2701 DBGFR3AddrFromFlat(pVM, &Addr, GCPtrEP);
2702 RTUINT iBp1;
2703 rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp1);
2704 AssertReleaseRC(rc);
2705 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
2706 if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
2707 {
2708 RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Vrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
2709 return rc;
2710 }
2711
2712 /* resume the breakpoint */
2713 RTPrintf("VMM: resuming hyper after breakpoint\n");
2714 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_RF);
2715 rc = VMMR3ResumeHyper(pVM);
2716 if (rc != VINF_SUCCESS)
2717 {
2718 RTPrintf("VMM: failed to resume on hyper breakpoint, rc=%Vrc\n", rc);
2719 return rc;
2720 }
2721
2722 /* engage the breakpoint again and try single stepping. */
2723 RTPrintf("VMM: testing hardware bp at VMMGCEntry + stepping\n");
2724 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
2725 if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
2726 {
2727 RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Vrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
2728 return rc;
2729 }
2730
2731 RTGCUINTREG OldPc = CPUMGetHyperEIP(pVM);
2732 RTPrintf("%RGr=>", OldPc);
2733 unsigned i;
2734 for (i = 0; i < 8; i++)
2735 {
2736 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
2737 rc = VMMR3ResumeHyper(pVM);
2738 if (rc != VINF_EM_DBG_HYPER_STEPPED)
2739 {
2740 RTPrintf("\nVMM: failed to step on hyper breakpoint, rc=%Vrc\n", rc);
2741 return rc;
2742 }
2743 RTGCUINTREG Pc = CPUMGetHyperEIP(pVM);
2744 RTPrintf("%RGr=>", Pc);
2745 if (Pc == OldPc)
2746 {
2747 RTPrintf("\nVMM: step failed, PC: %RGr -> %RGr\n", OldPc, Pc);
2748 return VERR_GENERAL_FAILURE;
2749 }
2750 OldPc = Pc;
2751 }
2752 RTPrintf("ok\n");
2753
2754 /* done, clear it */
2755 if ( VBOX_FAILURE(DBGFR3BpClear(pVM, iBp0))
2756 || VBOX_FAILURE(DBGFR3BpClear(pVM, iBp1)))
2757 {
2758 RTPrintf("VMM: Failed to clear breakpoints!\n");
2759 return VERR_GENERAL_FAILURE;
2760 }
2761 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
2762 if (rc != VINF_SUCCESS)
2763 {
2764 RTPrintf("VMM: NOP failed, rc=%Vrc\n", rc);
2765 return rc;
2766 }
2767
2768 /*
2769 * Interrupt forwarding.
2770 */
2771 CPUMHyperSetCtxCore(pVM, NULL);
2772 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
2773 CPUMPushHyper(pVM, 0);
2774 CPUMPushHyper(pVM, VMMGC_DO_TESTCASE_HYPER_INTERRUPT);
2775 CPUMPushHyper(pVM, pVM->pVMGC);
2776 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */
2777 CPUMPushHyper(pVM, GCPtrEP); /* what to call */
2778 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2779 Log(("trampoline=%x\n", pVM->vmm.s.pfnGCCallTrampoline));
2780
2781 /*
2782 * Switch and do da thing.
2783 */
2784 RTPrintf("VMM: interrupt forwarding...\n");
2785 i = 0;
2786 uint64_t tsBegin = RTTimeNanoTS();
2787 uint64_t TickStart = ASMReadTSC();
2788 do
2789 {
2790 rc = SUPCallVMMR0(pVM, VMMR0_DO_RUN_GC, NULL);
2791 if (VBOX_FAILURE(rc))
2792 {
2793 Log(("VMM: GC returned fatal %Vra in iteration %d\n", rc, i));
2794 VMMR3FatalDump(pVM, rc);
2795 return rc;
2796 }
2797 i++;
2798 if (!(i % 32))
2799 Log(("VMM: iteration %d, esi=%08x edi=%08x ebx=%08x\n",
2800 i, CPUMGetHyperESI(pVM), CPUMGetHyperEDI(pVM), CPUMGetHyperEBX(pVM)));
2801 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2802 uint64_t TickEnd = ASMReadTSC();
2803 uint64_t tsEnd = RTTimeNanoTS();
2804
2805 uint64_t Elapsed = tsEnd - tsBegin;
2806 uint64_t PerIteration = Elapsed / (uint64_t)i;
2807 uint64_t cTicksElapsed = TickEnd - TickStart;
2808 uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;
2809
2810 RTPrintf("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n",
2811 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration);
2812 Log(("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n",
2813 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration));
2814
2815 /*
2816 * These forced actions are not necessary for the test and trigger breakpoints too.
2817 */
2818 VM_FF_CLEAR(pVM, VM_FF_TRPM_SYNC_IDT);
2819 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
2820
2821 /*
2822 * Profile switching.
2823 */
2824 RTPrintf("VMM: profiling switcher...\n");
2825 Log(("VMM: profiling switcher...\n"));
2826 uint64_t TickMin = ~0;
2827 tsBegin = RTTimeNanoTS();
2828 TickStart = ASMReadTSC();
2829 for (i = 0; i < 1000000; i++)
2830 {
2831 CPUMHyperSetCtxCore(pVM, NULL);
2832 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
2833 CPUMPushHyper(pVM, 0);
2834 CPUMPushHyper(pVM, VMMGC_DO_TESTCASE_NOP);
2835 CPUMPushHyper(pVM, pVM->pVMGC);
2836 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */
2837 CPUMPushHyper(pVM, GCPtrEP); /* what to call */
2838 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2839
2840 uint64_t TickThisStart = ASMReadTSC();
2841 rc = SUPCallVMMR0(pVM, VMMR0_DO_RUN_GC, NULL);
2842 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
2843 if (VBOX_FAILURE(rc))
2844 {
2845 Log(("VMM: GC returned fatal %Vra in iteration %d\n", rc, i));
2846 VMMR3FatalDump(pVM, rc);
2847 return rc;
2848 }
2849 if (TickThisElapsed < TickMin)
2850 TickMin = TickThisElapsed;
2851 }
2852 TickEnd = ASMReadTSC();
2853 tsEnd = RTTimeNanoTS();
2854
2855 Elapsed = tsEnd - tsBegin;
2856 PerIteration = Elapsed / (uint64_t)i;
2857 cTicksElapsed = TickEnd - TickStart;
2858 cTicksPerIteration = cTicksElapsed / (uint64_t)i;
2859
2860 RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
2861 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
2862 Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
2863 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
2864
2865 rc = VINF_SUCCESS;
2866 }
2867 else
2868 AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Vrc\n", rc));
2869#endif
2870 return rc;
2871}
2872
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette