VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 7954

Last change on this file since 7954 was 7923, checked in by vboxsync, 17 years ago

Different logging

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 36.6 KB
Line 
1/* $Id: VMMR0.cpp 7923 2008-04-11 15:46:18Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/trpm.h>
26#include <VBox/cpum.h>
27#include <VBox/stam.h>
28#include <VBox/tm.h>
29#include "VMMInternal.h"
30#include <VBox/vm.h>
31#include <VBox/gvmm.h>
32#include <VBox/gmm.h>
33#include <VBox/intnet.h>
34#include <VBox/hwaccm.h>
35#include <VBox/param.h>
36
37#include <VBox/err.h>
38#include <VBox/version.h>
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/stdarg.h>
42#include <iprt/mp.h>
43
44#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
45# pragma intrinsic(_AddressOfReturnAddress)
46#endif
47
48
49/*******************************************************************************
50* Internal Functions *
51*******************************************************************************/
52static int VMMR0Init(PVM pVM, unsigned uVersion);
53static int VMMR0Term(PVM pVM);
54__BEGIN_DECLS
55VMMR0DECL(int) ModuleInit(void);
56VMMR0DECL(void) ModuleTerm(void);
57__END_DECLS
58
59
60/*******************************************************************************
61* Global Variables *
62*******************************************************************************/
63#ifdef VBOX_WITH_INTERNAL_NETWORKING
64/** Pointer to the internal networking service instance. */
65PINTNET g_pIntNet = 0;
66#endif
67
68
69/**
70 * Initialize the module.
71 * This is called when we're first loaded.
72 *
73 * @returns 0 on success.
74 * @returns VBox status on failure.
75 */
76VMMR0DECL(int) ModuleInit(void)
77{
78 LogFlow(("ModuleInit:\n"));
79
80 /*
81 * Initialize the GVMM, GMM.& HWACCM
82 */
83 int rc = GVMMR0Init();
84 if (RT_SUCCESS(rc))
85 {
86 rc = GMMR0Init();
87 if (RT_SUCCESS(rc))
88 {
89 rc = HWACCMR0Init();
90 if (RT_SUCCESS(rc))
91 {
92#ifdef VBOX_WITH_INTERNAL_NETWORKING
93 LogFlow(("ModuleInit: g_pIntNet=%p\n", g_pIntNet));
94 g_pIntNet = NULL;
95 LogFlow(("ModuleInit: g_pIntNet=%p should be NULL now...\n", g_pIntNet));
96 rc = INTNETR0Create(&g_pIntNet);
97 if (VBOX_SUCCESS(rc))
98 {
99 LogFlow(("ModuleInit: returns success. g_pIntNet=%p\n", g_pIntNet));
100 return VINF_SUCCESS;
101 }
102 g_pIntNet = NULL;
103 LogFlow(("ModuleTerm: returns %Vrc\n", rc));
104#else
105 LogFlow(("ModuleInit: returns success.\n"));
106 return VINF_SUCCESS;
107#endif
108 }
109 }
110 }
111
112 LogFlow(("ModuleInit: failed %Rrc\n", rc));
113 return rc;
114}
115
116
117/**
118 * Terminate the module.
119 * This is called when we're finally unloaded.
120 */
121VMMR0DECL(void) ModuleTerm(void)
122{
123 LogFlow(("ModuleTerm:\n"));
124
125#ifdef VBOX_WITH_INTERNAL_NETWORKING
126 /*
127 * Destroy the internal networking instance.
128 */
129 if (g_pIntNet)
130 {
131 INTNETR0Destroy(g_pIntNet);
132 g_pIntNet = NULL;
133 }
134#endif
135
136 /* Global HWACCM cleanup */
137 HWACCMR0Term();
138
139 /*
140 * Destroy the GMM and GVMM instances.
141 */
142 GMMR0Term();
143 GVMMR0Term();
144
145 LogFlow(("ModuleTerm: returns\n"));
146}
147
148
149/**
150 * Initaties the R0 driver for a particular VM instance.
151 *
152 * @returns VBox status code.
153 *
154 * @param pVM The VM instance in question.
155 * @param uVersion The minimum module version required.
156 * @thread EMT.
157 */
158static int VMMR0Init(PVM pVM, unsigned uVersion)
159{
160 /*
161 * Check if compatible version.
162 */
163 if ( uVersion != VBOX_VERSION
164 && ( VBOX_GET_VERSION_MAJOR(uVersion) != VBOX_VERSION_MAJOR
165 || VBOX_GET_VERSION_MINOR(uVersion) < VBOX_VERSION_MINOR))
166 return VERR_VERSION_MISMATCH;
167 if ( !VALID_PTR(pVM)
168 || pVM->pVMR0 != pVM)
169 return VERR_INVALID_PARAMETER;
170
171 /*
172 * Register the EMT R0 logger instance.
173 */
174 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
175 if (pR0Logger)
176 {
177#if 0 /* testing of the logger. */
178 LogCom(("VMMR0Init: before %p\n", RTLogDefaultInstance()));
179 LogCom(("VMMR0Init: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
180 LogCom(("VMMR0Init: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
181 LogCom(("VMMR0Init: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
182
183 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
184 LogCom(("VMMR0Init: after %p reg\n", RTLogDefaultInstance()));
185 RTLogSetDefaultInstanceThread(NULL, 0);
186 LogCom(("VMMR0Init: after %p dereg\n", RTLogDefaultInstance()));
187
188 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
189 LogCom(("VMMR0Init: returned succesfully from direct logger call.\n"));
190 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
191 LogCom(("VMMR0Init: returned succesfully from direct flush call.\n"));
192
193 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
194 LogCom(("VMMR0Init: after %p reg2\n", RTLogDefaultInstance()));
195 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
196 LogCom(("VMMR0Init: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
197 RTLogSetDefaultInstanceThread(NULL, 0);
198 LogCom(("VMMR0Init: after %p dereg2\n", RTLogDefaultInstance()));
199
200 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
201 LogCom(("VMMR0Init: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
202
203 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
204 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
205 LogCom(("VMMR0Init: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
206#endif
207 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
208 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
209 }
210
211 /*
212 * Initialize the per VM data for GVMM and GMM.
213 */
214 int rc = GVMMR0InitVM(pVM);
215// if (RT_SUCCESS(rc))
216// rc = GMMR0InitPerVMData(pVM);
217 if (RT_SUCCESS(rc))
218 {
219 /*
220 * Init HWACCM.
221 */
222 rc = HWACCMR0InitVM(pVM);
223 if (RT_SUCCESS(rc))
224 {
225 /*
226 * Init CPUM.
227 */
228 rc = CPUMR0Init(pVM);
229 if (RT_SUCCESS(rc))
230 return rc;
231 }
232 }
233
234 /* failed */
235 RTLogSetDefaultInstanceThread(NULL, 0);
236 return rc;
237}
238
239
240/**
241 * Terminates the R0 driver for a particular VM instance.
242 *
243 * @returns VBox status code.
244 *
245 * @param pVM The VM instance in question.
246 * @thread EMT.
247 */
248static int VMMR0Term(PVM pVM)
249{
250 HWACCMR0TermVM(pVM);
251
252 /*
253 * Deregister the logger.
254 */
255 RTLogSetDefaultInstanceThread(NULL, 0);
256 return VINF_SUCCESS;
257}
258
259
260/**
261 * Calls the ring-3 host code.
262 *
263 * @returns VBox status code of the ring-3 call.
264 * @param pVM The VM handle.
265 * @param enmOperation The operation.
266 * @param uArg The argument to the operation.
267 */
268VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg)
269{
270/** @todo profile this! */
271 pVM->vmm.s.enmCallHostOperation = enmOperation;
272 pVM->vmm.s.u64CallHostArg = uArg;
273 pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
274 int rc = vmmR0CallHostLongJmp(&pVM->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);
275 if (rc == VINF_SUCCESS)
276 rc = pVM->vmm.s.rcCallHost;
277 return rc;
278}
279
280
281#ifdef VBOX_WITH_STATISTICS
282/**
283 * Record return code statistics
284 * @param pVM The VM handle.
285 * @param rc The status code.
286 */
287static void vmmR0RecordRC(PVM pVM, int rc)
288{
289 /*
290 * Collect statistics.
291 */
292 switch (rc)
293 {
294 case VINF_SUCCESS:
295 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetNormal);
296 break;
297 case VINF_EM_RAW_INTERRUPT:
298 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterrupt);
299 break;
300 case VINF_EM_RAW_INTERRUPT_HYPER:
301 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterruptHyper);
302 break;
303 case VINF_EM_RAW_GUEST_TRAP:
304 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetGuestTrap);
305 break;
306 case VINF_EM_RAW_RING_SWITCH:
307 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRingSwitch);
308 break;
309 case VINF_EM_RAW_RING_SWITCH_INT:
310 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRingSwitchInt);
311 break;
312 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
313 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetExceptionPrivilege);
314 break;
315 case VINF_EM_RAW_STALE_SELECTOR:
316 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetStaleSelector);
317 break;
318 case VINF_EM_RAW_IRET_TRAP:
319 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIRETTrap);
320 break;
321 case VINF_IOM_HC_IOPORT_READ:
322 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIORead);
323 break;
324 case VINF_IOM_HC_IOPORT_WRITE:
325 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIOWrite);
326 break;
327 case VINF_IOM_HC_MMIO_READ:
328 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIORead);
329 break;
330 case VINF_IOM_HC_MMIO_WRITE:
331 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOWrite);
332 break;
333 case VINF_IOM_HC_MMIO_READ_WRITE:
334 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOReadWrite);
335 break;
336 case VINF_PATM_HC_MMIO_PATCH_READ:
337 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOPatchRead);
338 break;
339 case VINF_PATM_HC_MMIO_PATCH_WRITE:
340 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOPatchWrite);
341 break;
342 case VINF_EM_RAW_EMULATE_INSTR:
343 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetEmulate);
344 break;
345 case VINF_PATCH_EMULATE_INSTR:
346 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchEmulate);
347 break;
348 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
349 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetLDTFault);
350 break;
351 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
352 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetGDTFault);
353 break;
354 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
355 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIDTFault);
356 break;
357 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
358 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetTSSFault);
359 break;
360 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
361 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDFault);
362 break;
363 case VINF_CSAM_PENDING_ACTION:
364 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetCSAMTask);
365 break;
366 case VINF_PGM_SYNC_CR3:
367 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetSyncCR3);
368 break;
369 case VINF_PATM_PATCH_INT3:
370 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchInt3);
371 break;
372 case VINF_PATM_PATCH_TRAP_PF:
373 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchPF);
374 break;
375 case VINF_PATM_PATCH_TRAP_GP:
376 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchGP);
377 break;
378 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
379 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchIretIRQ);
380 break;
381 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
382 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPageOverflow);
383 break;
384 case VINF_EM_RESCHEDULE_REM:
385 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRescheduleREM);
386 break;
387 case VINF_EM_RAW_TO_R3:
388 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetToR3);
389 break;
390 case VINF_EM_RAW_TIMER_PENDING:
391 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetTimerPending);
392 break;
393 case VINF_EM_RAW_INTERRUPT_PENDING:
394 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterruptPending);
395 break;
396 case VINF_VMM_CALL_HOST:
397 switch (pVM->vmm.s.enmCallHostOperation)
398 {
399 case VMMCALLHOST_PDM_LOCK:
400 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDMLock);
401 break;
402 case VMMCALLHOST_PDM_QUEUE_FLUSH:
403 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDMQueueFlush);
404 break;
405 case VMMCALLHOST_PGM_POOL_GROW:
406 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMPoolGrow);
407 break;
408 case VMMCALLHOST_PGM_LOCK:
409 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMLock);
410 break;
411 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
412 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRemReplay);
413 break;
414 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
415 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMGrowRAM);
416 break;
417 case VMMCALLHOST_VMM_LOGGER_FLUSH:
418 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetLogFlush);
419 break;
420 case VMMCALLHOST_VM_SET_ERROR:
421 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetVMSetError);
422 break;
423 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
424 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetVMSetRuntimeError);
425 break;
426 default:
427 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetCallHost);
428 break;
429 }
430 break;
431 case VINF_PATM_DUPLICATE_FUNCTION:
432 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPATMDuplicateFn);
433 break;
434 case VINF_PGM_CHANGE_MODE:
435 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMChangeMode);
436 break;
437 case VINF_EM_RAW_EMULATE_INSTR_HLT:
438 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetEmulHlt);
439 break;
440 case VINF_EM_PENDING_REQUEST:
441 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPendingRequest);
442 break;
443 default:
444 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMisc);
445 break;
446 }
447}
448#endif /* VBOX_WITH_STATISTICS */
449
450
451
452/**
453 * The Ring 0 entry point, called by the interrupt gate.
454 *
455 * @returns VBox status code.
456 * @param pVM The VM to operate on.
457 * @param enmOperation Which operation to execute.
458 * @param pvArg Argument to the operation.
459 * @remarks Assume called with interrupts disabled.
460 */
461VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
462{
463 switch (enmOperation)
464 {
465#ifdef VBOX_WITH_IDT_PATCHING
466 /*
467 * Switch to GC.
468 * These calls return whatever the GC returns.
469 */
470 case VMMR0_DO_RAW_RUN:
471 {
472 /* Safety precaution as VMX disables the switcher. */
473 Assert(!pVM->vmm.s.fSwitcherDisabled);
474 if (pVM->vmm.s.fSwitcherDisabled)
475 return VERR_NOT_SUPPORTED;
476
477 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
478 register int rc;
479 pVM->vmm.s.iLastGCRc = rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
480
481#ifdef VBOX_WITH_STATISTICS
482 vmmR0RecordRC(pVM, rc);
483#endif
484
485 /*
486 * We'll let TRPM change the stack frame so our return is different.
487 * Just keep in mind that after the call, things have changed!
488 */
489 if ( rc == VINF_EM_RAW_INTERRUPT
490 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
491 {
492 /*
493 * Don't trust the compiler to get this right.
494 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit
495 * mode too because we push the arguments on the stack in the IDT patch code.
496 */
497# if defined(__GNUC__)
498 void *pvRet = (uint8_t *)__builtin_frame_address(0) + sizeof(void *);
499# elif defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
500 void *pvRet = (uint8_t *)_AddressOfReturnAddress();
501# elif defined(RT_ARCH_X86)
502 void *pvRet = (uint8_t *)&pVM - sizeof(pVM);
503# else
504# error "huh?"
505# endif
506 if ( ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM
507 && ((uintptr_t *)pvRet)[2] == (uintptr_t)enmOperation
508 && ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg)
509 TRPMR0SetupInterruptDispatcherFrame(pVM, pvRet);
510 else
511 {
512# if defined(DEBUG) || defined(LOG_ENABLED)
513 static bool s_fHaveWarned = false;
514 if (!s_fHaveWarned)
515 {
516 s_fHaveWarned = true;
517 RTLogPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
518 RTLogComPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
519 }
520# endif
521 TRPMR0DispatchHostInterrupt(pVM);
522 }
523 }
524 return rc;
525 }
526
527 /*
528 * Switch to GC to execute Hypervisor function.
529 */
530 case VMMR0_DO_CALL_HYPERVISOR:
531 {
532 /* Safety precaution as VMX disables the switcher. */
533 Assert(!pVM->vmm.s.fSwitcherDisabled);
534 if (pVM->vmm.s.fSwitcherDisabled)
535 return VERR_NOT_SUPPORTED;
536
537 RTCCUINTREG fFlags = ASMIntDisableFlags();
538 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
539 /** @todo dispatch interrupts? */
540 ASMSetFlags(fFlags);
541 return rc;
542 }
543
544 /*
545 * For profiling.
546 */
547 case VMMR0_DO_NOP:
548 return VINF_SUCCESS;
549#endif /* VBOX_WITH_IDT_PATCHING */
550
551 default:
552 /*
553 * We're returning VERR_NOT_SUPPORT here so we've got something else
554 * than -1 which the interrupt gate glue code might return.
555 */
556 Log(("operation %#x is not supported\n", enmOperation));
557 return VERR_NOT_SUPPORTED;
558 }
559}
560
561
562/**
563 * The Ring 0 entry point, called by the fast-ioctl path.
564 *
565 * @returns VBox status code.
566 * @param pVM The VM to operate on.
567 * @param enmOperation Which operation to execute.
568 * @remarks Assume called with interrupts _enabled_.
569 */
570VMMR0DECL(int) VMMR0EntryFast(PVM pVM, VMMR0OPERATION enmOperation)
571{
572 switch (enmOperation)
573 {
574 /*
575 * Switch to GC and run guest raw mode code.
576 * Disable interrupts before doing the world switch.
577 */
578 case VMMR0_DO_RAW_RUN:
579 {
580 /* Safety precaution as hwaccm disables the switcher. */
581 if (RT_LIKELY(!pVM->vmm.s.fSwitcherDisabled))
582 {
583 RTCCUINTREG uFlags = ASMIntDisableFlags();
584
585 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
586 pVM->vmm.s.iLastGCRc = rc;
587
588 if ( rc == VINF_EM_RAW_INTERRUPT
589 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
590 TRPMR0DispatchHostInterrupt(pVM);
591
592 ASMSetFlags(uFlags);
593
594#ifdef VBOX_WITH_STATISTICS
595 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
596 vmmR0RecordRC(pVM, rc);
597#endif
598 return rc;
599 }
600
601 Assert(!pVM->vmm.s.fSwitcherDisabled);
602 return VERR_NOT_SUPPORTED;
603 }
604
605 /*
606 * Run guest code using the available hardware acceleration technology.
607 *
608 * Disable interrupts before we do anything interesting. On Windows we avoid
609 * this by having the support driver raise the IRQL before calling us, this way
610 * we hope to get away we page faults and later calling into the kernel.
611 */
612 case VMMR0_DO_HWACC_RUN:
613 {
614 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
615
616#ifndef RT_OS_WINDOWS /** @todo check other hosts */
617 RTCCUINTREG uFlags = ASMIntDisableFlags();
618#endif
619 int rc = HWACCMR0Enter(pVM);
620 if (VBOX_SUCCESS(rc))
621 {
622 rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM); /* this may resume code. */
623 int rc2 = HWACCMR0Leave(pVM);
624 AssertRC(rc2);
625 }
626 pVM->vmm.s.iLastGCRc = rc;
627#ifndef RT_OS_WINDOWS /** @todo check other hosts */
628 ASMSetFlags(uFlags);
629#endif
630
631#ifdef VBOX_WITH_STATISTICS
632 vmmR0RecordRC(pVM, rc);
633#endif
634 /* No special action required for external interrupts, just return. */
635 return rc;
636 }
637
638 /*
639 * For profiling.
640 */
641 case VMMR0_DO_NOP:
642 return VINF_SUCCESS;
643
644 /*
645 * Impossible.
646 */
647 default:
648 AssertMsgFailed(("%#x\n", enmOperation));
649 return VERR_NOT_SUPPORTED;
650 }
651}
652
653
654/**
655 * VMMR0EntryEx worker function, either called directly or when ever possible
656 * called thru a longjmp so we can exit safely on failure.
657 *
658 * @returns VBox status code.
659 * @param pVM The VM to operate on.
660 * @param enmOperation Which operation to execute.
661 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
662 * @param u64Arg Some simple constant argument.
663 * @remarks Assume called with interrupts _enabled_.
664 */
665static int vmmR0EntryExWorker(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg)
666{
667 /*
668 * Common VM pointer validation.
669 */
670 if (pVM)
671 {
672 if (RT_UNLIKELY( !VALID_PTR(pVM)
673 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
674 {
675 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
676 return VERR_INVALID_POINTER;
677 }
678 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
679 || pVM->enmVMState > VMSTATE_TERMINATED
680 || pVM->pVMR0 != pVM))
681 {
682 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
683 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
684 return VERR_INVALID_POINTER;
685 }
686 }
687
688 switch (enmOperation)
689 {
690 /*
691 * GVM requests
692 */
693 case VMMR0_DO_GVMM_CREATE_VM:
694 if (pVM || u64Arg)
695 return VERR_INVALID_PARAMETER;
696 SUPR0Printf("-> GVMMR0CreateVMReq\n");
697 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
698
699 case VMMR0_DO_GVMM_DESTROY_VM:
700 if (pReqHdr || u64Arg)
701 return VERR_INVALID_PARAMETER;
702 return GVMMR0DestroyVM(pVM);
703
704 case VMMR0_DO_GVMM_SCHED_HALT:
705 if (pReqHdr)
706 return VERR_INVALID_PARAMETER;
707 return GVMMR0SchedHalt(pVM, u64Arg);
708
709 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
710 if (pReqHdr || u64Arg)
711 return VERR_INVALID_PARAMETER;
712 return GVMMR0SchedWakeUp(pVM);
713
714 case VMMR0_DO_GVMM_SCHED_POLL:
715 if (pReqHdr || u64Arg > 1)
716 return VERR_INVALID_PARAMETER;
717 return GVMMR0SchedPoll(pVM, (bool)u64Arg);
718
719 case VMMR0_DO_GVMM_QUERY_STATISTICS:
720 if (u64Arg)
721 return VERR_INVALID_PARAMETER;
722 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
723
724 case VMMR0_DO_GVMM_RESET_STATISTICS:
725 if (u64Arg)
726 return VERR_INVALID_PARAMETER;
727 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
728
729 /*
730 * Initialize the R0 part of a VM instance.
731 */
732 case VMMR0_DO_VMMR0_INIT:
733 return VMMR0Init(pVM, (unsigned)u64Arg);
734
735 /*
736 * Terminate the R0 part of a VM instance.
737 */
738 case VMMR0_DO_VMMR0_TERM:
739 return VMMR0Term(pVM);
740
741 /*
742 * Attempt to enable hwacc mode and check the current setting.
743 *
744 */
745 case VMMR0_DO_HWACC_ENABLE:
746 return HWACCMR0EnableAllCpus(pVM, (HWACCMSTATE)u64Arg);
747
748 /*
749 * Setup the hardware accelerated raw-mode session.
750 */
751 case VMMR0_DO_HWACC_SETUP_VM:
752 {
753 RTCCUINTREG fFlags = ASMIntDisableFlags();
754 int rc = HWACCMR0SetupVM(pVM);
755 ASMSetFlags(fFlags);
756 return rc;
757 }
758
759 /*
760 * Switch to GC to execute Hypervisor function.
761 */
762 case VMMR0_DO_CALL_HYPERVISOR:
763 {
764 /* Safety precaution as HWACCM can disable the switcher. */
765 Assert(!pVM->vmm.s.fSwitcherDisabled);
766 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
767 return VERR_NOT_SUPPORTED;
768
769 RTCCUINTREG fFlags = ASMIntDisableFlags();
770 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
771 /** @todo dispatch interrupts? */
772 ASMSetFlags(fFlags);
773 return rc;
774 }
775
776 /*
777 * PGM wrappers.
778 */
779 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
780 return PGMR0PhysAllocateHandyPages(pVM);
781
782 /*
783 * GMM wrappers.
784 */
785 case VMMR0_DO_GMM_INITIAL_RESERVATION:
786 if (u64Arg)
787 return VERR_INVALID_PARAMETER;
788 return GMMR0InitialReservationReq(pVM, (PGMMINITIALRESERVATIONREQ)pReqHdr);
789 case VMMR0_DO_GMM_UPDATE_RESERVATION:
790 if (u64Arg)
791 return VERR_INVALID_PARAMETER;
792 return GMMR0UpdateReservationReq(pVM, (PGMMUPDATERESERVATIONREQ)pReqHdr);
793
794 case VMMR0_DO_GMM_ALLOCATE_PAGES:
795 if (u64Arg)
796 return VERR_INVALID_PARAMETER;
797 return GMMR0AllocatePagesReq(pVM, (PGMMALLOCATEPAGESREQ)pReqHdr);
798 case VMMR0_DO_GMM_FREE_PAGES:
799 if (u64Arg)
800 return VERR_INVALID_PARAMETER;
801 return GMMR0FreePagesReq(pVM, (PGMMFREEPAGESREQ)pReqHdr);
802 case VMMR0_DO_GMM_BALLOONED_PAGES:
803 if (u64Arg)
804 return VERR_INVALID_PARAMETER;
805 return GMMR0BalloonedPagesReq(pVM, (PGMMBALLOONEDPAGESREQ)pReqHdr);
806 case VMMR0_DO_GMM_DEFLATED_BALLOON:
807 if (pReqHdr)
808 return VERR_INVALID_PARAMETER;
809 return GMMR0DeflatedBalloon(pVM, (uint32_t)u64Arg);
810
811 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
812 if (u64Arg)
813 return VERR_INVALID_PARAMETER;
814 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
815 case VMMR0_DO_GMM_SEED_CHUNK:
816 if (pReqHdr)
817 return VERR_INVALID_PARAMETER;
818 return GMMR0SeedChunk(pVM, (RTR3PTR)u64Arg);
819
820 /*
821 * A quick GCFGM mock-up.
822 */
823 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
824 case VMMR0_DO_GCFGM_SET_VALUE:
825 case VMMR0_DO_GCFGM_QUERY_VALUE:
826 {
827 if (pVM || !pReqHdr || u64Arg)
828 return VERR_INVALID_PARAMETER;
829 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
830 if (pReq->Hdr.cbReq != sizeof(*pReq))
831 return VERR_INVALID_PARAMETER;
832 int rc;
833 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
834 {
835 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
836 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
837 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
838 }
839 else
840 {
841 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
842 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
843 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
844 }
845 return rc;
846 }
847
848
849#ifdef VBOX_WITH_INTERNAL_NETWORKING
850 /*
851 * Requests to the internal networking service.
852 */
853 case VMMR0_DO_INTNET_OPEN:
854 if (!pVM || u64Arg)
855 return VERR_INVALID_PARAMETER;
856 if (!g_pIntNet)
857 return VERR_NOT_SUPPORTED;
858 return INTNETR0OpenReq(g_pIntNet, pVM->pSession, (PINTNETOPENREQ)pReqHdr);
859
860 case VMMR0_DO_INTNET_IF_CLOSE:
861 if (!pVM || u64Arg)
862 return VERR_INVALID_PARAMETER;
863 if (!g_pIntNet)
864 return VERR_NOT_SUPPORTED;
865 return INTNETR0IfCloseReq(g_pIntNet, (PINTNETIFCLOSEREQ)pReqHdr);
866
867 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
868 if (!pVM || u64Arg)
869 return VERR_INVALID_PARAMETER;
870 if (!g_pIntNet)
871 return VERR_NOT_SUPPORTED;
872 return INTNETR0IfGetRing3BufferReq(g_pIntNet, (PINTNETIFGETRING3BUFFERREQ)pReqHdr);
873
874 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
875 if (!pVM || u64Arg)
876 return VERR_INVALID_PARAMETER;
877 if (!g_pIntNet)
878 return VERR_NOT_SUPPORTED;
879 return INTNETR0IfSetPromiscuousModeReq(g_pIntNet, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
880
881 case VMMR0_DO_INTNET_IF_SEND:
882 if (!pVM || u64Arg)
883 return VERR_INVALID_PARAMETER;
884 if (!g_pIntNet)
885 return VERR_NOT_SUPPORTED;
886 return INTNETR0IfSendReq(g_pIntNet, (PINTNETIFSENDREQ)pReqHdr);
887
888 case VMMR0_DO_INTNET_IF_WAIT:
889 if (!pVM || u64Arg)
890 return VERR_INVALID_PARAMETER;
891 if (!g_pIntNet)
892 return VERR_NOT_SUPPORTED;
893 return INTNETR0IfWaitReq(g_pIntNet, (PINTNETIFWAITREQ)pReqHdr);
894#endif /* VBOX_WITH_INTERNAL_NETWORKING */
895
896 /*
897 * For profiling.
898 */
899 case VMMR0_DO_NOP:
900 return VINF_SUCCESS;
901
902 /*
903 * For testing Ring-0 APIs invoked in this environment.
904 */
905 case VMMR0_DO_TESTS:
906 /** @todo make new test */
907 return VINF_SUCCESS;
908
909
910 default:
911 /*
912 * We're returning VERR_NOT_SUPPORT here so we've got something else
913 * than -1 which the interrupt gate glue code might return.
914 */
915 Log(("operation %#x is not supported\n", enmOperation));
916 return VERR_NOT_SUPPORTED;
917 }
918}
919
920
921/**
922 * Argument for vmmR0EntryExWrapper containing the argument s ofr VMMR0EntryEx.
923 */
924typedef struct VMMR0ENTRYEXARGS
925{
926 PVM pVM;
927 VMMR0OPERATION enmOperation;
928 PSUPVMMR0REQHDR pReq;
929 uint64_t u64Arg;
930} VMMR0ENTRYEXARGS;
931/** Pointer to a vmmR0EntryExWrapper argument package. */
932typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
933
934/**
935 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
936 *
937 * @returns VBox status code.
938 * @param pvArgs The argument package
939 */
940static int vmmR0EntryExWrapper(void *pvArgs)
941{
942 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
943 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
944 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
945 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg);
946}
947
948
949/**
950 * The Ring 0 entry point, called by the support library (SUP).
951 *
952 * @returns VBox status code.
953 * @param pVM The VM to operate on.
954 * @param enmOperation Which operation to execute.
955 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
956 * @param u64Arg Some simple constant argument.
957 * @remarks Assume called with interrupts _enabled_.
958 */
959VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg)
960{
961 /*
962 * Requests that should only happen on the EMT thread will be
963 * wrapped in a setjmp so we can assert without causing trouble.
964 */
965 if ( VALID_PTR(pVM)
966 && pVM->pVMR0)
967 {
968 switch (enmOperation)
969 {
970 case VMMR0_DO_VMMR0_INIT:
971 case VMMR0_DO_VMMR0_TERM:
972 case VMMR0_DO_GMM_INITIAL_RESERVATION:
973 case VMMR0_DO_GMM_UPDATE_RESERVATION:
974 case VMMR0_DO_GMM_ALLOCATE_PAGES:
975 case VMMR0_DO_GMM_FREE_PAGES:
976 case VMMR0_DO_GMM_BALLOONED_PAGES:
977 case VMMR0_DO_GMM_DEFLATED_BALLOON:
978 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
979 case VMMR0_DO_GMM_SEED_CHUNK:
980 {
981 /** @todo validate this EMT claim... GVM knows. */
982 VMMR0ENTRYEXARGS Args;
983 Args.pVM = pVM;
984 Args.enmOperation = enmOperation;
985 Args.pReq = pReq;
986 Args.u64Arg = u64Arg;
987 return vmmR0CallHostSetJmpEx(&pVM->vmm.s.CallHostR0JmpBuf, vmmR0EntryExWrapper, &Args);
988 }
989
990 default:
991 break;
992 }
993 }
994 return vmmR0EntryExWorker(pVM, enmOperation, pReq, u64Arg);
995}
996
997
998
999/**
1000 * Internal R0 logger worker: Flush logger.
1001 *
1002 * @param pLogger The logger instance to flush.
1003 * @remark This function must be exported!
1004 */
1005VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1006{
1007 /*
1008 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1009 * (This is a bit paranoid code.)
1010 */
1011 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1012 if ( !VALID_PTR(pR0Logger)
1013 || !VALID_PTR(pR0Logger + 1)
1014 || !VALID_PTR(pLogger)
1015 || pLogger->u32Magic != RTLOGGER_MAGIC)
1016 {
1017#ifdef DEBUG
1018 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1019#endif
1020 return;
1021 }
1022
1023 PVM pVM = pR0Logger->pVM;
1024 if ( !VALID_PTR(pVM)
1025 || pVM->pVMR0 != pVM)
1026 {
1027#ifdef DEBUG
1028 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1029#endif
1030 return;
1031 }
1032
1033 /*
1034 * Check that the jump buffer is armed.
1035 */
1036#ifdef RT_ARCH_X86
1037 if (!pVM->vmm.s.CallHostR0JmpBuf.eip)
1038#else
1039 if (!pVM->vmm.s.CallHostR0JmpBuf.rip)
1040#endif
1041 {
1042#ifdef DEBUG
1043 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1044#endif
1045 pLogger->offScratch = 0;
1046 return;
1047 }
1048 VMMR0CallHost(pVM, VMMCALLHOST_VMM_LOGGER_FLUSH, 0);
1049}
1050
1051
1052
1053/**
1054 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1055 *
1056 * @returns true if the breakpoint should be hit, false if it should be ignored.
1057 * @remark The RTDECL() makes this a bit difficult to override on windows. Sorry.
1058 */
1059DECLEXPORT(bool) RTCALL RTAssertDoBreakpoint(void)
1060{
1061 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1062 if (pVM)
1063 {
1064#ifdef RT_ARCH_X86
1065 if (pVM->vmm.s.CallHostR0JmpBuf.eip)
1066#else
1067 if (pVM->vmm.s.CallHostR0JmpBuf.rip)
1068#endif
1069 {
1070 int rc = VMMR0CallHost(pVM, VMMCALLHOST_VM_R0_HYPER_ASSERTION, 0);
1071 return RT_FAILURE_NP(rc);
1072 }
1073 }
1074#ifdef RT_OS_LINUX
1075 return true;
1076#else
1077 return false;
1078#endif
1079}
1080
1081
1082
1083# undef LOG_GROUP
1084# define LOG_GROUP LOG_GROUP_EM
1085
1086/**
1087 * Override this so we can push
1088 *
1089 * @param pszExpr Expression. Can be NULL.
1090 * @param uLine Location line number.
1091 * @param pszFile Location file name.
1092 * @param pszFunction Location function name.
1093 * @remark This API exists in HC Ring-3 and GC.
1094 */
1095DECLEXPORT(void) RTCALL AssertMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1096{
1097 SUPR0Printf("\n!!R0-Assertion Failed!!\n"
1098 "Expression: %s\n"
1099 "Location : %s(%d) %s\n",
1100 pszExpr, pszFile, uLine, pszFunction);
1101
1102 LogRel(("\n!!R0-Assertion Failed!!\n"
1103 "Expression: %s\n"
1104 "Location : %s(%d) %s\n",
1105 pszExpr, pszFile, uLine, pszFunction));
1106}
1107
1108
1109/**
1110 * Callback for RTLogFormatV which writes to the com port.
1111 * See PFNLOGOUTPUT() for details.
1112 */
1113static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1114{
1115 for (size_t i = 0; i < cbChars; i++)
1116 {
1117 LogRel(("%c", pachChars[i])); /** @todo this isn't any release logging in ring-0 from what I can tell... */
1118 SUPR0Printf("%c", pachChars[i]);
1119 }
1120
1121 return cbChars;
1122}
1123
1124
1125DECLEXPORT(void) RTCALL AssertMsg2(const char *pszFormat, ...)
1126{
1127 PRTLOGGER pLog = RTLogDefaultInstance();
1128 if (pLog)
1129 {
1130 va_list args;
1131
1132 va_start(args, pszFormat);
1133 RTLogFormatV(rtLogOutput, pLog, pszFormat, args);
1134 va_end(args);
1135 }
1136}
1137
1138
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette