VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 39333

Last change on this file since 39333 was 39303, checked in by vboxsync, 13 years ago

VMCPUSET changes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 53.6 KB
Line 
1/* $Id: VMMR0.cpp 39303 2011-11-15 10:55:12Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vmm/vm.h>
32#ifdef VBOX_WITH_PCI_PASSTHROUGH
33# include <VBox/vmm/pdmpci.h>
34#endif
35
36#include <VBox/vmm/gvmm.h>
37#include <VBox/vmm/gmm.h>
38#include <VBox/intnet.h>
39#include <VBox/vmm/hwaccm.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44
45#include <iprt/asm-amd64-x86.h>
46#include <iprt/assert.h>
47#include <iprt/crc.h>
48#include <iprt/mp.h>
49#include <iprt/once.h>
50#include <iprt/stdarg.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53#include <iprt/timer.h>
54
55#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
56# pragma intrinsic(_AddressOfReturnAddress)
57#endif
58
59
60/*******************************************************************************
61* Internal Functions *
62*******************************************************************************/
63RT_C_DECLS_BEGIN
64VMMR0DECL(int) ModuleInit(void);
65VMMR0DECL(void) ModuleTerm(void);
66
67#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
68extern uint64_t __udivdi3(uint64_t, uint64_t);
69extern uint64_t __umoddi3(uint64_t, uint64_t);
70#endif
71RT_C_DECLS_END
72
73
74/*******************************************************************************
75* Global Variables *
76*******************************************************************************/
77/** Drag in necessary library bits.
78 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
79PFNRT g_VMMGCDeps[] =
80{
81 (PFNRT)RTCrc32,
82 (PFNRT)RTOnce,
83#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
84 (PFNRT)__udivdi3,
85 (PFNRT)__umoddi3,
86#endif
87 NULL
88};
89
90#ifdef RT_OS_SOLARIS
91/* Dependency information for the native solaris loader. */
92extern "C" { char _depends_on[] = "vboxdrv"; }
93#endif
94
95
96
97/**
98 * Initialize the module.
99 * This is called when we're first loaded.
100 *
101 * @returns 0 on success.
102 * @returns VBox status on failure.
103 */
104VMMR0DECL(int) ModuleInit(void)
105{
106 LogFlow(("ModuleInit:\n"));
107
108 /*
109 * Initialize the VMM, GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
110 */
111 int rc = vmmInitFormatTypes();
112 if (RT_SUCCESS(rc))
113 {
114 rc = GVMMR0Init();
115 if (RT_SUCCESS(rc))
116 {
117 rc = GMMR0Init();
118 if (RT_SUCCESS(rc))
119 {
120 rc = HWACCMR0Init();
121 if (RT_SUCCESS(rc))
122 {
123 rc = PGMRegisterStringFormatTypes();
124 if (RT_SUCCESS(rc))
125 {
126#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
127 rc = PGMR0DynMapInit();
128#endif
129 if (RT_SUCCESS(rc))
130 {
131 rc = IntNetR0Init();
132 if (RT_SUCCESS(rc))
133 {
134#ifdef VBOX_WITH_PCI_PASSTHROUGH
135 rc = PciRawR0Init();
136#endif
137 if (RT_SUCCESS(rc))
138 {
139 rc = CPUMR0ModuleInit();
140 if (RT_SUCCESS(rc))
141 {
142#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
143 rc = vmmR0TripleFaultHackInit();
144 if (RT_SUCCESS(rc))
145#endif
146 {
147 LogFlow(("ModuleInit: returns success.\n"));
148 return VINF_SUCCESS;
149 }
150
151 /*
152 * Bail out.
153 */
154#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
155 vmmR0TripleFaultHackTerm();
156#endif
157 }
158#ifdef VBOX_WITH_PCI_PASSTHROUGH
159 PciRawR0Term();
160#endif
161 }
162 IntNetR0Term();
163 }
164#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
165 PGMR0DynMapTerm();
166#endif
167 }
168 PGMDeregisterStringFormatTypes();
169 }
170 HWACCMR0Term();
171 }
172 GMMR0Term();
173 }
174 GVMMR0Term();
175 }
176 vmmTermFormatTypes();
177 }
178
179 LogFlow(("ModuleInit: failed %Rrc\n", rc));
180 return rc;
181}
182
183
184/**
185 * Terminate the module.
186 * This is called when we're finally unloaded.
187 */
188VMMR0DECL(void) ModuleTerm(void)
189{
190 LogFlow(("ModuleTerm:\n"));
191
192 /*
193 * Terminate the CPUM module (Local APIC cleanup).
194 */
195 CPUMR0ModuleTerm();
196
197 /*
198 * Terminate the internal network service.
199 */
200 IntNetR0Term();
201
202 /*
203 * PGM (Darwin), HWACCM and PciRaw global cleanup.
204 */
205#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
206 PGMR0DynMapTerm();
207#endif
208#ifdef VBOX_WITH_PCI_PASSTHROUGH
209 PciRawR0Term();
210#endif
211 PGMDeregisterStringFormatTypes();
212 HWACCMR0Term();
213#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
214 vmmR0TripleFaultHackTerm();
215#endif
216
217 /*
218 * Destroy the GMM and GVMM instances.
219 */
220 GMMR0Term();
221 GVMMR0Term();
222
223 vmmTermFormatTypes();
224
225 LogFlow(("ModuleTerm: returns\n"));
226}
227
228
229/**
230 * Initiates the R0 driver for a particular VM instance.
231 *
232 * @returns VBox status code.
233 *
234 * @param pVM The VM instance in question.
235 * @param uSvnRev The SVN revision of the ring-3 part.
236 * @thread EMT.
237 */
238static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
239{
240 /*
241 * Match the SVN revisions.
242 */
243 if (uSvnRev != VMMGetSvnRev())
244 {
245 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
246 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
247 return VERR_VMM_R0_VERSION_MISMATCH;
248 }
249 if ( !VALID_PTR(pVM)
250 || pVM->pVMR0 != pVM)
251 return VERR_INVALID_PARAMETER;
252
253#ifdef LOG_ENABLED
254 /*
255 * Register the EMT R0 logger instance for VCPU 0.
256 */
257 PVMCPU pVCpu = &pVM->aCpus[0];
258
259 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
260 if (pR0Logger)
261 {
262# if 0 /* testing of the logger. */
263 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
264 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
265 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
266 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
267
268 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
269 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
270 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
271 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
272
273 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
274 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
275 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
276 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
277
278 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
279 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
280 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
281 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
282 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
283 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
284
285 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
286 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
287
288 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
289 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
290 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
291# endif
292 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
293 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
294 pR0Logger->fRegistered = true;
295 }
296#endif /* LOG_ENABLED */
297
298 /*
299 * Check if the host supports high resolution timers or not.
300 */
301 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
302 && !RTTimerCanDoHighResolution())
303 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
304
305 /*
306 * Initialize the per VM data for GVMM and GMM.
307 */
308 int rc = GVMMR0InitVM(pVM);
309// if (RT_SUCCESS(rc))
310// rc = GMMR0InitPerVMData(pVM);
311 if (RT_SUCCESS(rc))
312 {
313 /*
314 * Init HWACCM, CPUM and PGM (Darwin only).
315 */
316 rc = HWACCMR0InitVM(pVM);
317 if (RT_SUCCESS(rc))
318 {
319 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
320 if (RT_SUCCESS(rc))
321 {
322#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
323 rc = PGMR0DynMapInitVM(pVM);
324#endif
325 if (RT_SUCCESS(rc))
326 {
327#ifdef VBOX_WITH_PCI_PASSTHROUGH
328 rc = PciRawR0InitVM(pVM);
329#endif
330 if (RT_SUCCESS(rc))
331 {
332 GVMMR0DoneInitVM(pVM);
333 return rc;
334 }
335 }
336
337 /* bail out */
338 }
339#ifdef VBOX_WITH_PCI_PASSTHROUGH
340 PciRawR0TermVM(pVM);
341#endif
342 HWACCMR0TermVM(pVM);
343 }
344 }
345
346
347 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
348 return rc;
349}
350
351
352/**
353 * Terminates the R0 driver for a particular VM instance.
354 *
355 * This is normally called by ring-3 as part of the VM termination process, but
356 * may alternatively be called during the support driver session cleanup when
357 * the VM object is destroyed (see GVMM).
358 *
359 * @returns VBox status code.
360 *
361 * @param pVM The VM instance in question.
362 * @param pGVM Pointer to the global VM structure. Optional.
363 * @thread EMT or session clean up thread.
364 */
365VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
366{
367#ifdef VBOX_WITH_PCI_PASSTHROUGH
368 PciRawR0TermVM(pVM);
369#endif
370
371 /*
372 * Tell GVMM what we're up to and check that we only do this once.
373 */
374 if (GVMMR0DoingTermVM(pVM, pGVM))
375 {
376#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
377 PGMR0DynMapTermVM(pVM);
378#endif
379 HWACCMR0TermVM(pVM);
380 }
381
382 /*
383 * Deregister the logger.
384 */
385 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
386 return VINF_SUCCESS;
387}
388
389
390#ifdef VBOX_WITH_STATISTICS
391/**
392 * Record return code statistics
393 * @param pVM The VM handle.
394 * @param pVCpu The VMCPU handle.
395 * @param rc The status code.
396 */
397static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
398{
399 /*
400 * Collect statistics.
401 */
402 switch (rc)
403 {
404 case VINF_SUCCESS:
405 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
406 break;
407 case VINF_EM_RAW_INTERRUPT:
408 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
409 break;
410 case VINF_EM_RAW_INTERRUPT_HYPER:
411 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
412 break;
413 case VINF_EM_RAW_GUEST_TRAP:
414 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
415 break;
416 case VINF_EM_RAW_RING_SWITCH:
417 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
418 break;
419 case VINF_EM_RAW_RING_SWITCH_INT:
420 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
421 break;
422 case VINF_EM_RAW_STALE_SELECTOR:
423 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
424 break;
425 case VINF_EM_RAW_IRET_TRAP:
426 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
427 break;
428 case VINF_IOM_HC_IOPORT_READ:
429 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
430 break;
431 case VINF_IOM_HC_IOPORT_WRITE:
432 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
433 break;
434 case VINF_IOM_HC_MMIO_READ:
435 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
436 break;
437 case VINF_IOM_HC_MMIO_WRITE:
438 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
439 break;
440 case VINF_IOM_HC_MMIO_READ_WRITE:
441 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
442 break;
443 case VINF_PATM_HC_MMIO_PATCH_READ:
444 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
445 break;
446 case VINF_PATM_HC_MMIO_PATCH_WRITE:
447 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
448 break;
449 case VINF_EM_RAW_EMULATE_INSTR:
450 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
451 break;
452 case VINF_EM_RAW_EMULATE_IO_BLOCK:
453 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
454 break;
455 case VINF_PATCH_EMULATE_INSTR:
456 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
457 break;
458 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
459 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
460 break;
461 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
462 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
463 break;
464 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
465 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
466 break;
467 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
468 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
469 break;
470 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
471 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
472 break;
473 case VINF_CSAM_PENDING_ACTION:
474 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
475 break;
476 case VINF_PGM_SYNC_CR3:
477 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
478 break;
479 case VINF_PATM_PATCH_INT3:
480 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
481 break;
482 case VINF_PATM_PATCH_TRAP_PF:
483 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
484 break;
485 case VINF_PATM_PATCH_TRAP_GP:
486 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
487 break;
488 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
489 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
490 break;
491 case VINF_EM_RESCHEDULE_REM:
492 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
493 break;
494 case VINF_EM_RAW_TO_R3:
495 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
496 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
497 else if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
498 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
499 else if (VM_FF_ISPENDING(pVM, VM_FF_PDM_QUEUES))
500 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
501 else if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
502 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
503 else if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA))
504 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
505 else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))
506 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
507 else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
508 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
509 else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3))
510 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
511 else
512 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
513 break;
514
515 case VINF_EM_RAW_TIMER_PENDING:
516 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
517 break;
518 case VINF_EM_RAW_INTERRUPT_PENDING:
519 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
520 break;
521 case VINF_VMM_CALL_HOST:
522 switch (pVCpu->vmm.s.enmCallRing3Operation)
523 {
524 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
525 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
526 break;
527 case VMMCALLRING3_PDM_LOCK:
528 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
529 break;
530 case VMMCALLRING3_PGM_POOL_GROW:
531 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
532 break;
533 case VMMCALLRING3_PGM_LOCK:
534 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
535 break;
536 case VMMCALLRING3_PGM_MAP_CHUNK:
537 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
538 break;
539 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
540 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
541 break;
542 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
543 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
544 break;
545 case VMMCALLRING3_VMM_LOGGER_FLUSH:
546 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
547 break;
548 case VMMCALLRING3_VM_SET_ERROR:
549 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
550 break;
551 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
552 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
553 break;
554 case VMMCALLRING3_VM_R0_ASSERTION:
555 default:
556 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
557 break;
558 }
559 break;
560 case VINF_PATM_DUPLICATE_FUNCTION:
561 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
562 break;
563 case VINF_PGM_CHANGE_MODE:
564 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
565 break;
566 case VINF_PGM_POOL_FLUSH_PENDING:
567 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
568 break;
569 case VINF_EM_PENDING_REQUEST:
570 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
571 break;
572 case VINF_EM_HWACCM_PATCH_TPR_INSTR:
573 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
574 break;
575 default:
576 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
577 break;
578 }
579}
580#endif /* VBOX_WITH_STATISTICS */
581
582
583/**
584 * Unused ring-0 entry point that used to be called from the interrupt gate.
585 *
586 * Will be removed one of the next times we do a major SUPDrv version bump.
587 *
588 * @returns VBox status code.
589 * @param pVM The VM to operate on.
590 * @param enmOperation Which operation to execute.
591 * @param pvArg Argument to the operation.
592 * @remarks Assume called with interrupts disabled.
593 */
594VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
595{
596 /*
597 * We're returning VERR_NOT_SUPPORT here so we've got something else
598 * than -1 which the interrupt gate glue code might return.
599 */
600 Log(("operation %#x is not supported\n", enmOperation));
601 NOREF(enmOperation); NOREF(pvArg); NOREF(pVM);
602 return VERR_NOT_SUPPORTED;
603}
604
605
606/**
607 * The Ring 0 entry point, called by the fast-ioctl path.
608 *
609 * @param pVM The VM to operate on.
610 * The return code is stored in pVM->vmm.s.iLastGZRc.
611 * @param idCpu The Virtual CPU ID of the calling EMT.
612 * @param enmOperation Which operation to execute.
613 * @remarks Assume called with interrupts _enabled_.
614 */
615VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
616{
617 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
618 return;
619 PVMCPU pVCpu = &pVM->aCpus[idCpu];
620
621 switch (enmOperation)
622 {
623 /*
624 * Switch to GC and run guest raw mode code.
625 * Disable interrupts before doing the world switch.
626 */
627 case VMMR0_DO_RAW_RUN:
628 {
629 /* Some safety precautions first. */
630#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
631 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled /* hwaccm */
632 && pVM->cCpus == 1 /* !smp */
633 && PGMGetHyperCR3(pVCpu)))
634#else
635 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled
636 && pVM->cCpus == 1))
637#endif
638 {
639 /* Disable preemption and update the periodic preemption timer. */
640 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
641 RTThreadPreemptDisable(&PreemptState);
642 RTCPUID idHostCpu = RTMpCpuId();
643#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
644 CPUMR0SetLApic(pVM, idHostCpu);
645#endif
646 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
647 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
648 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
649
650 /* We might need to disable VT-x if the active switcher turns off paging. */
651 bool fVTxDisabled;
652 int rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
653 if (RT_SUCCESS(rc))
654 {
655 RTCCUINTREG uFlags = ASMIntDisableFlags();
656 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
657
658 TMNotifyStartOfExecution(pVCpu);
659 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
660 pVCpu->vmm.s.iLastGZRc = rc;
661 TMNotifyEndOfExecution(pVCpu);
662
663 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
664
665 /* Re-enable VT-x if previously turned off. */
666 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
667
668 if ( rc == VINF_EM_RAW_INTERRUPT
669 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
670 TRPMR0DispatchHostInterrupt(pVM);
671
672 ASMSetFlags(uFlags);
673
674#ifdef VBOX_WITH_STATISTICS
675 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
676 vmmR0RecordRC(pVM, pVCpu, rc);
677#endif
678 }
679 else
680 pVCpu->vmm.s.iLastGZRc = rc;
681 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
682 RTThreadPreemptRestore(&PreemptState);
683 }
684 else
685 {
686 Assert(!pVM->vmm.s.fSwitcherDisabled);
687 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
688 if (pVM->cCpus != 1)
689 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
690#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
691 if (!PGMGetHyperCR3(pVCpu))
692 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
693#endif
694 }
695 break;
696 }
697
698 /*
699 * Run guest code using the available hardware acceleration technology.
700 *
701 * Disable interrupts before we do anything interesting. On Windows we avoid
702 * this by having the support driver raise the IRQL before calling us, this way
703 * we hope to get away with page faults and later calling into the kernel.
704 */
705 case VMMR0_DO_HWACC_RUN:
706 {
707#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
708 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
709 RTThreadPreemptDisable(&PreemptState);
710#elif !defined(RT_OS_WINDOWS)
711 RTCCUINTREG uFlags = ASMIntDisableFlags();
712#endif
713 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
714 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
715 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
716
717#ifdef LOG_ENABLED
718 if (pVCpu->idCpu > 0)
719 {
720 /* Lazy registration of ring 0 loggers. */
721 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
722 if ( pR0Logger
723 && !pR0Logger->fRegistered)
724 {
725 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
726 pR0Logger->fRegistered = true;
727 }
728 }
729#endif
730 int rc;
731 if (!HWACCMR0SuspendPending())
732 {
733 rc = HWACCMR0Enter(pVM, pVCpu);
734 if (RT_SUCCESS(rc))
735 {
736 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
737 int rc2 = HWACCMR0Leave(pVM, pVCpu);
738 AssertRC(rc2);
739 }
740 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
741 }
742 else
743 {
744 /* System is about to go into suspend mode; go back to ring 3. */
745 rc = VINF_EM_RAW_INTERRUPT;
746 }
747 pVCpu->vmm.s.iLastGZRc = rc;
748
749 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
750#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
751 RTThreadPreemptRestore(&PreemptState);
752#elif !defined(RT_OS_WINDOWS)
753 ASMSetFlags(uFlags);
754#endif
755
756#ifdef VBOX_WITH_STATISTICS
757 vmmR0RecordRC(pVM, pVCpu, rc);
758#endif
759 /* No special action required for external interrupts, just return. */
760 break;
761 }
762
763 /*
764 * For profiling.
765 */
766 case VMMR0_DO_NOP:
767 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
768 break;
769
770 /*
771 * Impossible.
772 */
773 default:
774 AssertMsgFailed(("%#x\n", enmOperation));
775 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
776 break;
777 }
778}
779
780
781/**
782 * Validates a session or VM session argument.
783 *
784 * @returns true / false accordingly.
785 * @param pVM The VM argument.
786 * @param pSession The session argument.
787 */
788DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
789{
790 /* This must be set! */
791 if (!pSession)
792 return false;
793
794 /* Only one out of the two. */
795 if (pVM && pClaimedSession)
796 return false;
797 if (pVM)
798 pClaimedSession = pVM->pSession;
799 return pClaimedSession == pSession;
800}
801
802
803/**
804 * VMMR0EntryEx worker function, either called directly or when ever possible
805 * called thru a longjmp so we can exit safely on failure.
806 *
807 * @returns VBox status code.
808 * @param pVM The VM to operate on.
809 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
810 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
811 * @param enmOperation Which operation to execute.
812 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
813 * The support driver validates this if it's present.
814 * @param u64Arg Some simple constant argument.
815 * @param pSession The session of the caller.
816 * @remarks Assume called with interrupts _enabled_.
817 */
818static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
819{
820 /*
821 * Common VM pointer validation.
822 */
823 if (pVM)
824 {
825 if (RT_UNLIKELY( !VALID_PTR(pVM)
826 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
827 {
828 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
829 return VERR_INVALID_POINTER;
830 }
831 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
832 || pVM->enmVMState > VMSTATE_TERMINATED
833 || pVM->pVMR0 != pVM))
834 {
835 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
836 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
837 return VERR_INVALID_POINTER;
838 }
839
840 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
841 {
842 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
843 return VERR_INVALID_PARAMETER;
844 }
845 }
846 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
847 {
848 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
849 return VERR_INVALID_PARAMETER;
850 }
851
852
853 switch (enmOperation)
854 {
855 /*
856 * GVM requests
857 */
858 case VMMR0_DO_GVMM_CREATE_VM:
859 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
860 return VERR_INVALID_PARAMETER;
861 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
862
863 case VMMR0_DO_GVMM_DESTROY_VM:
864 if (pReqHdr || u64Arg)
865 return VERR_INVALID_PARAMETER;
866 return GVMMR0DestroyVM(pVM);
867
868 case VMMR0_DO_GVMM_REGISTER_VMCPU:
869 {
870 if (!pVM)
871 return VERR_INVALID_PARAMETER;
872 return GVMMR0RegisterVCpu(pVM, idCpu);
873 }
874
875 case VMMR0_DO_GVMM_SCHED_HALT:
876 if (pReqHdr)
877 return VERR_INVALID_PARAMETER;
878 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
879
880 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
881 if (pReqHdr || u64Arg)
882 return VERR_INVALID_PARAMETER;
883 return GVMMR0SchedWakeUp(pVM, idCpu);
884
885 case VMMR0_DO_GVMM_SCHED_POKE:
886 if (pReqHdr || u64Arg)
887 return VERR_INVALID_PARAMETER;
888 return GVMMR0SchedPoke(pVM, idCpu);
889
890 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
891 if (u64Arg)
892 return VERR_INVALID_PARAMETER;
893 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
894
895 case VMMR0_DO_GVMM_SCHED_POLL:
896 if (pReqHdr || u64Arg > 1)
897 return VERR_INVALID_PARAMETER;
898 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
899
900 case VMMR0_DO_GVMM_QUERY_STATISTICS:
901 if (u64Arg)
902 return VERR_INVALID_PARAMETER;
903 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
904
905 case VMMR0_DO_GVMM_RESET_STATISTICS:
906 if (u64Arg)
907 return VERR_INVALID_PARAMETER;
908 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
909
910 /*
911 * Initialize the R0 part of a VM instance.
912 */
913 case VMMR0_DO_VMMR0_INIT:
914 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
915
916 /*
917 * Terminate the R0 part of a VM instance.
918 */
919 case VMMR0_DO_VMMR0_TERM:
920 return VMMR0TermVM(pVM, NULL);
921
922 /*
923 * Attempt to enable hwacc mode and check the current setting.
924 */
925 case VMMR0_DO_HWACC_ENABLE:
926 return HWACCMR0EnableAllCpus(pVM);
927
928 /*
929 * Setup the hardware accelerated session.
930 */
931 case VMMR0_DO_HWACC_SETUP_VM:
932 return HWACCMR0SetupVM(pVM);
933
934 /*
935 * Switch to RC to execute Hypervisor function.
936 */
937 case VMMR0_DO_CALL_HYPERVISOR:
938 {
939 int rc;
940 bool fVTxDisabled;
941
942 /* Safety precaution as HWACCM can disable the switcher. */
943 Assert(!pVM->vmm.s.fSwitcherDisabled);
944 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
945 return VERR_NOT_SUPPORTED;
946
947#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
948 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
949 return VERR_PGM_NO_CR3_SHADOW_ROOT;
950#endif
951
952 RTCCUINTREG fFlags = ASMIntDisableFlags();
953
954#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
955 RTCPUID idHostCpu = RTMpCpuId();
956 CPUMR0SetLApic(pVM, idHostCpu);
957#endif
958
959 /* We might need to disable VT-x if the active switcher turns off paging. */
960 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
961 if (RT_FAILURE(rc))
962 return rc;
963
964 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
965
966 /* Re-enable VT-x if previously turned off. */
967 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
968
969 /** @todo dispatch interrupts? */
970 ASMSetFlags(fFlags);
971 return rc;
972 }
973
974 /*
975 * PGM wrappers.
976 */
977 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
978 if (idCpu == NIL_VMCPUID)
979 return VERR_INVALID_CPU_ID;
980 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
981
982 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
983 if (idCpu == NIL_VMCPUID)
984 return VERR_INVALID_CPU_ID;
985 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
986
987 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
988 if (idCpu != 0)
989 return VERR_INVALID_CPU_ID;
990 return PGMR0PhysSetupIommu(pVM);
991
992 /*
993 * GMM wrappers.
994 */
995 case VMMR0_DO_GMM_INITIAL_RESERVATION:
996 if (u64Arg)
997 return VERR_INVALID_PARAMETER;
998 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
999
1000 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1001 if (u64Arg)
1002 return VERR_INVALID_PARAMETER;
1003 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1004
1005 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1006 if (u64Arg)
1007 return VERR_INVALID_PARAMETER;
1008 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1009
1010 case VMMR0_DO_GMM_FREE_PAGES:
1011 if (u64Arg)
1012 return VERR_INVALID_PARAMETER;
1013 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1014
1015 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1016 if (u64Arg)
1017 return VERR_INVALID_PARAMETER;
1018 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1019
1020 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1021 if (u64Arg)
1022 return VERR_INVALID_PARAMETER;
1023 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1024
1025 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1026 if (idCpu == NIL_VMCPUID)
1027 return VERR_INVALID_CPU_ID;
1028 if (u64Arg)
1029 return VERR_INVALID_PARAMETER;
1030 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1031
1032 case VMMR0_DO_GMM_BALLOONED_PAGES:
1033 if (u64Arg)
1034 return VERR_INVALID_PARAMETER;
1035 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1036
1037 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1038 if (u64Arg)
1039 return VERR_INVALID_PARAMETER;
1040 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1041
1042 case VMMR0_DO_GMM_SEED_CHUNK:
1043 if (pReqHdr)
1044 return VERR_INVALID_PARAMETER;
1045 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1046
1047 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1048 if (idCpu == NIL_VMCPUID)
1049 return VERR_INVALID_CPU_ID;
1050 if (u64Arg)
1051 return VERR_INVALID_PARAMETER;
1052 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1053
1054 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1055 if (idCpu == NIL_VMCPUID)
1056 return VERR_INVALID_CPU_ID;
1057 if (u64Arg)
1058 return VERR_INVALID_PARAMETER;
1059 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1060
1061 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1062 if (idCpu == NIL_VMCPUID)
1063 return VERR_INVALID_CPU_ID;
1064 if ( u64Arg
1065 || pReqHdr)
1066 return VERR_INVALID_PARAMETER;
1067 return GMMR0ResetSharedModules(pVM, idCpu);
1068
1069#ifdef VBOX_WITH_PAGE_SHARING
1070 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1071 {
1072 if (idCpu == NIL_VMCPUID)
1073 return VERR_INVALID_CPU_ID;
1074 if ( u64Arg
1075 || pReqHdr)
1076 return VERR_INVALID_PARAMETER;
1077
1078 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1079 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1080
1081# ifdef DEBUG_sandervl
1082 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1083 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1084 int rc = GMMR0CheckSharedModulesStart(pVM);
1085 if (rc == VINF_SUCCESS)
1086 {
1087 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1088 Assert( rc == VINF_SUCCESS
1089 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1090 GMMR0CheckSharedModulesEnd(pVM);
1091 }
1092# else
1093 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1094# endif
1095 return rc;
1096 }
1097#endif
1098
1099#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1100 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1101 {
1102 if (u64Arg)
1103 return VERR_INVALID_PARAMETER;
1104 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1105 }
1106#endif
1107
1108 /*
1109 * A quick GCFGM mock-up.
1110 */
1111 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1112 case VMMR0_DO_GCFGM_SET_VALUE:
1113 case VMMR0_DO_GCFGM_QUERY_VALUE:
1114 {
1115 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1116 return VERR_INVALID_PARAMETER;
1117 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1118 if (pReq->Hdr.cbReq != sizeof(*pReq))
1119 return VERR_INVALID_PARAMETER;
1120 int rc;
1121 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1122 {
1123 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1124 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1125 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1126 }
1127 else
1128 {
1129 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1130 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1131 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1132 }
1133 return rc;
1134 }
1135
1136 /*
1137 * PDM Wrappers.
1138 */
1139 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1140 {
1141 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1142 return VERR_INVALID_PARAMETER;
1143 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1144 }
1145
1146 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1147 {
1148 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1149 return VERR_INVALID_PARAMETER;
1150 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1151 }
1152
1153 /*
1154 * Requests to the internal networking service.
1155 */
1156 case VMMR0_DO_INTNET_OPEN:
1157 {
1158 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1159 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1160 return VERR_INVALID_PARAMETER;
1161 return IntNetR0OpenReq(pSession, pReq);
1162 }
1163
1164 case VMMR0_DO_INTNET_IF_CLOSE:
1165 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1166 return VERR_INVALID_PARAMETER;
1167 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1168
1169 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1170 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1171 return VERR_INVALID_PARAMETER;
1172 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1173
1174 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1175 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1176 return VERR_INVALID_PARAMETER;
1177 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1178
1179 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1180 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1181 return VERR_INVALID_PARAMETER;
1182 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1183
1184 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1185 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1186 return VERR_INVALID_PARAMETER;
1187 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1188
1189 case VMMR0_DO_INTNET_IF_SEND:
1190 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1191 return VERR_INVALID_PARAMETER;
1192 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1193
1194 case VMMR0_DO_INTNET_IF_WAIT:
1195 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1196 return VERR_INVALID_PARAMETER;
1197 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1198
1199 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1200 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1201 return VERR_INVALID_PARAMETER;
1202 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1203
1204#ifdef VBOX_WITH_PCI_PASSTHROUGH
1205 /*
1206 * Requests to host PCI driver service.
1207 */
1208 case VMMR0_DO_PCIRAW_REQ:
1209 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1210 return VERR_INVALID_PARAMETER;
1211 return PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1212#endif
1213 /*
1214 * For profiling.
1215 */
1216 case VMMR0_DO_NOP:
1217 case VMMR0_DO_SLOW_NOP:
1218 return VINF_SUCCESS;
1219
1220 /*
1221 * For testing Ring-0 APIs invoked in this environment.
1222 */
1223 case VMMR0_DO_TESTS:
1224 /** @todo make new test */
1225 return VINF_SUCCESS;
1226
1227
1228#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1229 case VMMR0_DO_TEST_SWITCHER3264:
1230 if (idCpu == NIL_VMCPUID)
1231 return VERR_INVALID_CPU_ID;
1232 return HWACCMR0TestSwitcher3264(pVM);
1233#endif
1234 default:
1235 /*
1236 * We're returning VERR_NOT_SUPPORT here so we've got something else
1237 * than -1 which the interrupt gate glue code might return.
1238 */
1239 Log(("operation %#x is not supported\n", enmOperation));
1240 return VERR_NOT_SUPPORTED;
1241 }
1242}
1243
1244
1245/**
1246 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1247 */
1248typedef struct VMMR0ENTRYEXARGS
1249{
1250 PVM pVM;
1251 VMCPUID idCpu;
1252 VMMR0OPERATION enmOperation;
1253 PSUPVMMR0REQHDR pReq;
1254 uint64_t u64Arg;
1255 PSUPDRVSESSION pSession;
1256} VMMR0ENTRYEXARGS;
1257/** Pointer to a vmmR0EntryExWrapper argument package. */
1258typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1259
1260/**
1261 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1262 *
1263 * @returns VBox status code.
1264 * @param pvArgs The argument package
1265 */
1266static int vmmR0EntryExWrapper(void *pvArgs)
1267{
1268 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1269 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1270 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1271 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1272 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1273 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1274}
1275
1276
1277/**
1278 * The Ring 0 entry point, called by the support library (SUP).
1279 *
1280 * @returns VBox status code.
1281 * @param pVM The VM to operate on.
1282 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1283 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1284 * @param enmOperation Which operation to execute.
1285 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1286 * @param u64Arg Some simple constant argument.
1287 * @param pSession The session of the caller.
1288 * @remarks Assume called with interrupts _enabled_.
1289 */
1290VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1291{
1292 /*
1293 * Requests that should only happen on the EMT thread will be
1294 * wrapped in a setjmp so we can assert without causing trouble.
1295 */
1296 if ( VALID_PTR(pVM)
1297 && pVM->pVMR0
1298 && idCpu < pVM->cCpus)
1299 {
1300 switch (enmOperation)
1301 {
1302 /* These might/will be called before VMMR3Init. */
1303 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1304 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1305 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1306 case VMMR0_DO_GMM_FREE_PAGES:
1307 case VMMR0_DO_GMM_BALLOONED_PAGES:
1308 /* On the mac we might not have a valid jmp buf, so check these as well. */
1309 case VMMR0_DO_VMMR0_INIT:
1310 case VMMR0_DO_VMMR0_TERM:
1311 {
1312 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1313
1314 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1315 break;
1316
1317 /** @todo validate this EMT claim... GVM knows. */
1318 VMMR0ENTRYEXARGS Args;
1319 Args.pVM = pVM;
1320 Args.idCpu = idCpu;
1321 Args.enmOperation = enmOperation;
1322 Args.pReq = pReq;
1323 Args.u64Arg = u64Arg;
1324 Args.pSession = pSession;
1325 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1326 }
1327
1328 default:
1329 break;
1330 }
1331 }
1332 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1333}
1334
1335/**
1336 * Internal R0 logger worker: Flush logger.
1337 *
1338 * @param pLogger The logger instance to flush.
1339 * @remark This function must be exported!
1340 */
1341VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1342{
1343#ifdef LOG_ENABLED
1344 /*
1345 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1346 * (This is a bit paranoid code.)
1347 */
1348 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1349 if ( !VALID_PTR(pR0Logger)
1350 || !VALID_PTR(pR0Logger + 1)
1351 || pLogger->u32Magic != RTLOGGER_MAGIC)
1352 {
1353# ifdef DEBUG
1354 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1355# endif
1356 return;
1357 }
1358 if (pR0Logger->fFlushingDisabled)
1359 return; /* quietly */
1360
1361 PVM pVM = pR0Logger->pVM;
1362 if ( !VALID_PTR(pVM)
1363 || pVM->pVMR0 != pVM)
1364 {
1365# ifdef DEBUG
1366 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1367# endif
1368 return;
1369 }
1370
1371 PVMCPU pVCpu = VMMGetCpu(pVM);
1372 if (pVCpu)
1373 {
1374 /*
1375 * Check that the jump buffer is armed.
1376 */
1377# ifdef RT_ARCH_X86
1378 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1379 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1380# else
1381 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1382 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1383# endif
1384 {
1385# ifdef DEBUG
1386 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1387# endif
1388 return;
1389 }
1390 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1391 }
1392# ifdef DEBUG
1393 else
1394 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1395# endif
1396#endif
1397}
1398
1399/**
1400 * Internal R0 logger worker: Custom prefix.
1401 *
1402 * @returns Number of chars written.
1403 *
1404 * @param pLogger The logger instance.
1405 * @param pchBuf The output buffer.
1406 * @param cchBuf The size of the buffer.
1407 * @param pvUser User argument (ignored).
1408 */
1409VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1410{
1411 NOREF(pvUser);
1412#ifdef LOG_ENABLED
1413 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1414 if ( !VALID_PTR(pR0Logger)
1415 || !VALID_PTR(pR0Logger + 1)
1416 || pLogger->u32Magic != RTLOGGER_MAGIC
1417 || cchBuf < 2)
1418 return 0;
1419
1420 static const char s_szHex[17] = "0123456789abcdef";
1421 VMCPUID const idCpu = pR0Logger->idCpu;
1422 pchBuf[1] = s_szHex[ idCpu & 15];
1423 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1424
1425 return 2;
1426#else
1427 return 0;
1428#endif
1429}
1430
1431#ifdef LOG_ENABLED
1432
1433/**
1434 * Disables flushing of the ring-0 debug log.
1435 *
1436 * @param pVCpu The shared virtual cpu structure.
1437 */
1438VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1439{
1440 if (pVCpu->vmm.s.pR0LoggerR0)
1441 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1442}
1443
1444
1445/**
1446 * Enables flushing of the ring-0 debug log.
1447 *
1448 * @param pVCpu The shared virtual cpu structure.
1449 */
1450VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1451{
1452 if (pVCpu->vmm.s.pR0LoggerR0)
1453 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1454}
1455
1456#endif /* LOG_ENABLED */
1457
1458/**
1459 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1460 *
1461 * @returns true if the breakpoint should be hit, false if it should be ignored.
1462 */
1463DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1464{
1465#if 0
1466 return true;
1467#else
1468 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1469 if (pVM)
1470 {
1471 PVMCPU pVCpu = VMMGetCpu(pVM);
1472
1473 if (pVCpu)
1474 {
1475#ifdef RT_ARCH_X86
1476 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1477 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1478#else
1479 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1480 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1481#endif
1482 {
1483 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1484 return RT_FAILURE_NP(rc);
1485 }
1486 }
1487 }
1488#ifdef RT_OS_LINUX
1489 return true;
1490#else
1491 return false;
1492#endif
1493#endif
1494}
1495
1496
1497/**
1498 * Override this so we can push it up to ring-3.
1499 *
1500 * @param pszExpr Expression. Can be NULL.
1501 * @param uLine Location line number.
1502 * @param pszFile Location file name.
1503 * @param pszFunction Location function name.
1504 */
1505DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1506{
1507 /*
1508 * To the log.
1509 */
1510 LogAlways(("\n!!R0-Assertion Failed!!\n"
1511 "Expression: %s\n"
1512 "Location : %s(%d) %s\n",
1513 pszExpr, pszFile, uLine, pszFunction));
1514
1515 /*
1516 * To the global VMM buffer.
1517 */
1518 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1519 if (pVM)
1520 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1521 "\n!!R0-Assertion Failed!!\n"
1522 "Expression: %s\n"
1523 "Location : %s(%d) %s\n",
1524 pszExpr, pszFile, uLine, pszFunction);
1525
1526 /*
1527 * Continue the normal way.
1528 */
1529 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1530}
1531
1532
1533/**
1534 * Callback for RTLogFormatV which writes to the ring-3 log port.
1535 * See PFNLOGOUTPUT() for details.
1536 */
1537static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1538{
1539 for (size_t i = 0; i < cbChars; i++)
1540 LogAlways(("%c", pachChars[i]));
1541
1542 NOREF(pv);
1543 return cbChars;
1544}
1545
1546
1547/**
1548 * Override this so we can push it up to ring-3.
1549 *
1550 * @param pszFormat The format string.
1551 * @param va Arguments.
1552 */
1553DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1554{
1555 va_list vaCopy;
1556
1557 /*
1558 * Push the message to the logger.
1559 */
1560 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1561 if (pLog)
1562 {
1563 va_copy(vaCopy, va);
1564 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1565 va_end(vaCopy);
1566 }
1567
1568 /*
1569 * Push it to the global VMM buffer.
1570 */
1571 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1572 if (pVM)
1573 {
1574 va_copy(vaCopy, va);
1575 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1576 va_end(vaCopy);
1577 }
1578
1579 /*
1580 * Continue the normal way.
1581 */
1582 RTAssertMsg2V(pszFormat, va);
1583}
1584
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette