VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMMAll.cpp@ 58126

Last change on this file since 58126 was 58126, checked in by vboxsync, 9 years ago

VMM: Fixed almost all the Doxygen warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 12.9 KB
Line 
1/* $Id: VMMAll.cpp 58126 2015-10-08 20:59:48Z vboxsync $ */
2/** @file
3 * VMM All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include "VMMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/vmcpuset.h>
28#include <VBox/param.h>
29#include <iprt/thread.h>
30#include <iprt/mp.h>
31
32
33/*********************************************************************************************************************************
34* Global Variables *
35*********************************************************************************************************************************/
36/** User counter for the vmmInitFormatTypes function (pro forma). */
37static volatile uint32_t g_cFormatTypeUsers = 0;
38
39
40/**
41 * Helper that formats a decimal number in the range 0..9999.
42 *
43 * @returns The length of the formatted number.
44 * @param pszBuf Output buffer with sufficient space.
45 * @param uNumber The number to format.
46 */
47static unsigned vmmFormatTypeShortNumber(char *pszBuf, uint32_t uNumber)
48{
49 unsigned off = 0;
50 if (uNumber >= 10)
51 {
52 if (uNumber >= 100)
53 {
54 if (uNumber >= 1000)
55 pszBuf[off++] = ((uNumber / 1000) % 10) + '0';
56 pszBuf[off++] = ((uNumber / 100) % 10) + '0';
57 }
58 pszBuf[off++] = ((uNumber / 10) % 10) + '0';
59 }
60 pszBuf[off++] = (uNumber % 10) + '0';
61 pszBuf[off] = '\0';
62 return off;
63}
64
65
66/**
67 * @callback_method_impl{FNRTSTRFORMATTYPE, vmsetcpu}
68 */
69static DECLCALLBACK(size_t) vmmFormatTypeVmCpuSet(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
70 const char *pszType, void const *pvValue,
71 int cchWidth, int cchPrecision, unsigned fFlags,
72 void *pvUser)
73{
74 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(fFlags);
75
76 PCVMCPUSET pSet = (PCVMCPUSET)pvValue;
77 uint32_t cCpus = 0;
78 uint32_t iCpu = RT_ELEMENTS(pSet->au32Bitmap) * 32;
79 while (iCpu--)
80 if (VMCPUSET_IS_PRESENT(pSet, iCpu))
81 cCpus++;
82
83 char szTmp[32];
84 AssertCompile(RT_ELEMENTS(pSet->au32Bitmap) * 32 < 999);
85 if (cCpus == 1)
86 {
87 iCpu = RT_ELEMENTS(pSet->au32Bitmap) * 32;
88 while (iCpu--)
89 if (VMCPUSET_IS_PRESENT(pSet, iCpu))
90 {
91 szTmp[0] = 'c';
92 szTmp[1] = 'p';
93 szTmp[2] = 'u';
94 return pfnOutput(pvArgOutput, szTmp, 3 + vmmFormatTypeShortNumber(&szTmp[3], iCpu));
95 }
96 cCpus = 0;
97 }
98 if (cCpus == 0)
99 return pfnOutput(pvArgOutput, RT_STR_TUPLE("<empty>"));
100 if (cCpus == RT_ELEMENTS(pSet->au32Bitmap) * 32)
101 return pfnOutput(pvArgOutput, RT_STR_TUPLE("<full>"));
102
103 /*
104 * Print cpus that are present: {1,2,7,9 ... }
105 */
106 size_t cchRet = pfnOutput(pvArgOutput, "{", 1);
107
108 cCpus = 0;
109 iCpu = 0;
110 while (iCpu < RT_ELEMENTS(pSet->au32Bitmap) * 32)
111 {
112 if (VMCPUSET_IS_PRESENT(pSet, iCpu))
113 {
114 /* Output the first cpu number. */
115 int off = 0;
116 if (cCpus != 0)
117 szTmp[off++] = ',';
118 off += vmmFormatTypeShortNumber(&szTmp[off], iCpu);
119
120 /* Check for sequence. */
121 uint32_t const iStart = ++iCpu;
122 while ( iCpu < RT_ELEMENTS(pSet->au32Bitmap) * 32
123 && VMCPUSET_IS_PRESENT(pSet, iCpu))
124 iCpu++;
125 if (iCpu != iStart)
126 {
127 szTmp[off++] = '-';
128 off += vmmFormatTypeShortNumber(&szTmp[off], iCpu);
129 }
130
131 /* Terminate and output. */
132 szTmp[off] = '\0';
133 cchRet += pfnOutput(pvArgOutput, szTmp, off);
134 }
135 iCpu++;
136 }
137
138 cchRet += pfnOutput(pvArgOutput, "}", 1);
139 NOREF(pvUser);
140 return cchRet;
141}
142
143
144/**
145 * Registers the VMM wide format types.
146 *
147 * Called by VMMR3Init, VMMR0Init and VMMRCInit.
148 */
149int vmmInitFormatTypes(void)
150{
151 int rc = VINF_SUCCESS;
152 if (ASMAtomicIncU32(&g_cFormatTypeUsers) == 1)
153 rc = RTStrFormatTypeRegister("vmcpuset", vmmFormatTypeVmCpuSet, NULL);
154 return rc;
155}
156
157
158#ifndef IN_RC
159/**
160 * Counterpart to vmmInitFormatTypes, called by VMMR3Term and VMMR0Term.
161 */
162void vmmTermFormatTypes(void)
163{
164 if (ASMAtomicDecU32(&g_cFormatTypeUsers) == 0)
165 RTStrFormatTypeDeregister("vmcpuset");
166}
167#endif
168
169
170/**
171 * Gets the bottom of the hypervisor stack - RC Ptr.
172 *
173 * (The returned address is not actually writable, only after it's decremented
174 * by a push/ret/whatever does it become writable.)
175 *
176 * @returns bottom of the stack.
177 * @param pVCpu The cross context virtual CPU structure.
178 */
179VMM_INT_DECL(RTRCPTR) VMMGetStackRC(PVMCPU pVCpu)
180{
181 return (RTRCPTR)pVCpu->vmm.s.pbEMTStackBottomRC;
182}
183
184
185/**
186 * Gets the ID of the virtual CPU associated with the calling thread.
187 *
188 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
189 *
190 * @param pVM The cross context VM structure.
191 * @internal
192 */
193VMMDECL(VMCPUID) VMMGetCpuId(PVM pVM)
194{
195#if defined(IN_RING3)
196 return VMR3GetVMCPUId(pVM);
197
198#elif defined(IN_RING0)
199 if (pVM->cCpus == 1)
200 return 0;
201
202 /* Search first by host cpu id (most common case)
203 * and then by native thread id (page fusion case).
204 */
205 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
206 {
207 /** @todo r=ramshankar: This doesn't buy us anything in terms of performance
208 * leaving it here for hysterical raisins and as a reference if we
209 * implemented a hashing approach in the future. */
210 RTCPUID idHostCpu = RTMpCpuId();
211
212 /** @todo optimize for large number of VCPUs when that becomes more common. */
213 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
214 {
215 PVMCPU pVCpu = &pVM->aCpus[idCpu];
216
217 if (pVCpu->idHostCpu == idHostCpu)
218 return pVCpu->idCpu;
219 }
220 }
221
222 /* RTThreadGetNativeSelf had better be cheap. */
223 RTNATIVETHREAD hThread = RTThreadNativeSelf();
224
225 /** @todo optimize for large number of VCPUs when that becomes more common. */
226 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
227 {
228 PVMCPU pVCpu = &pVM->aCpus[idCpu];
229
230 if (pVCpu->hNativeThreadR0 == hThread)
231 return pVCpu->idCpu;
232 }
233 return NIL_VMCPUID;
234
235#else /* RC: Always EMT(0) */
236 NOREF(pVM);
237 return 0;
238#endif
239}
240
241
242/**
243 * Returns the VMCPU of the calling EMT.
244 *
245 * @returns The VMCPU pointer. NULL if not an EMT.
246 *
247 * @param pVM The cross context VM structure.
248 * @internal
249 */
250VMMDECL(PVMCPU) VMMGetCpu(PVM pVM)
251{
252#ifdef IN_RING3
253 VMCPUID idCpu = VMR3GetVMCPUId(pVM);
254 if (idCpu == NIL_VMCPUID)
255 return NULL;
256 Assert(idCpu < pVM->cCpus);
257 return &pVM->aCpus[idCpu];
258
259#elif defined(IN_RING0)
260 if (pVM->cCpus == 1)
261 return &pVM->aCpus[0];
262
263 /*
264 * Search first by host cpu id (most common case)
265 * and then by native thread id (page fusion case).
266 */
267 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
268 {
269 /** @todo r=ramshankar: This doesn't buy us anything in terms of performance
270 * leaving it here for hysterical raisins and as a reference if we
271 * implemented a hashing approach in the future. */
272 RTCPUID idHostCpu = RTMpCpuId();
273
274 /** @todo optimize for large number of VCPUs when that becomes more common. */
275 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
276 {
277 PVMCPU pVCpu = &pVM->aCpus[idCpu];
278
279 if (pVCpu->idHostCpu == idHostCpu)
280 return pVCpu;
281 }
282 }
283
284 /* RTThreadGetNativeSelf had better be cheap. */
285 RTNATIVETHREAD hThread = RTThreadNativeSelf();
286
287 /** @todo optimize for large number of VCPUs when that becomes more common.
288 * Use a map like GIP does that's indexed by the host CPU index. */
289 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
290 {
291 PVMCPU pVCpu = &pVM->aCpus[idCpu];
292
293 if (pVCpu->hNativeThreadR0 == hThread)
294 return pVCpu;
295 }
296 return NULL;
297
298#else /* RC: Always EMT(0) */
299 return &pVM->aCpus[0];
300#endif /* IN_RING0 */
301}
302
303
304/**
305 * Returns the VMCPU of the first EMT thread.
306 *
307 * @returns The VMCPU pointer.
308 * @param pVM The cross context VM structure.
309 * @internal
310 */
311VMMDECL(PVMCPU) VMMGetCpu0(PVM pVM)
312{
313 Assert(pVM->cCpus == 1);
314 return &pVM->aCpus[0];
315}
316
317
318/**
319 * Returns the VMCPU of the specified virtual CPU.
320 *
321 * @returns The VMCPU pointer. NULL if idCpu is invalid.
322 *
323 * @param pVM The cross context VM structure.
324 * @param idCpu The ID of the virtual CPU.
325 * @internal
326 */
327VMMDECL(PVMCPU) VMMGetCpuById(PVM pVM, RTCPUID idCpu)
328{
329 AssertReturn(idCpu < pVM->cCpus, NULL);
330 return &pVM->aCpus[idCpu];
331}
332
333
334/**
335 * Gets the VBOX_SVN_REV.
336 *
337 * This is just to avoid having to compile a bunch of big files
338 * and requires less Makefile mess.
339 *
340 * @returns VBOX_SVN_REV.
341 */
342VMM_INT_DECL(uint32_t) VMMGetSvnRev(void)
343{
344 return VBOX_SVN_REV;
345}
346
347
348/**
349 * Queries the current switcher
350 *
351 * @returns active switcher
352 * @param pVM The cross context VM structure.
353 */
354VMM_INT_DECL(VMMSWITCHER) VMMGetSwitcher(PVM pVM)
355{
356 return pVM->vmm.s.enmSwitcher;
357}
358
359
360/**
361 * Checks whether we're in a ring-3 call or not.
362 *
363 * @returns true / false.
364 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
365 * @thread EMT
366 */
367VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPU pVCpu)
368{
369#ifdef RT_ARCH_X86
370 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
371#else
372 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
373#endif
374}
375
376
377/**
378 * Returns the build type for matching components.
379 *
380 * @returns Build type value.
381 */
382uint32_t vmmGetBuildType(void)
383{
384 uint32_t uRet = 0xbeef0000;
385#ifdef DEBUG
386 uRet |= RT_BIT_32(0);
387#endif
388#ifdef VBOX_WITH_STATISTICS
389 uRet |= RT_BIT_32(1);
390#endif
391 return uRet;
392}
393
394
395/**
396 * Patches the instructions necessary for making a hypercall to the hypervisor.
397 * Used by GIM.
398 *
399 * @returns VBox status code.
400 * @param pVM The cross context VM structure.
401 * @param pvBuf The buffer in the hypercall page(s) to be patched.
402 * @param cbBuf The size of the buffer.
403 * @param pcbWritten Where to store the number of bytes patched. This
404 * is reliably updated only when this function returns
405 * VINF_SUCCESS.
406 */
407VMM_INT_DECL(int) VMMPatchHypercall(PVM pVM, void *pvBuf, size_t cbBuf, size_t *pcbWritten)
408{
409 AssertReturn(pvBuf, VERR_INVALID_POINTER);
410 AssertReturn(pcbWritten, VERR_INVALID_POINTER);
411
412 NOREF(pVM);
413
414 if (ASMIsAmdCpu())
415 {
416 uint8_t abHypercall[] = { 0x0F, 0x01, 0xD9 }; /* VMMCALL */
417 if (RT_LIKELY(cbBuf >= sizeof(abHypercall)))
418 {
419 memcpy(pvBuf, abHypercall, sizeof(abHypercall));
420 *pcbWritten = sizeof(abHypercall);
421 return VINF_SUCCESS;
422 }
423 return VERR_BUFFER_OVERFLOW;
424 }
425 else
426 {
427 AssertReturn(ASMIsIntelCpu() || ASMIsViaCentaurCpu(), VERR_UNSUPPORTED_CPU);
428 uint8_t abHypercall[] = { 0x0F, 0x01, 0xC1 }; /* VMCALL */
429 if (RT_LIKELY(cbBuf >= sizeof(abHypercall)))
430 {
431 memcpy(pvBuf, abHypercall, sizeof(abHypercall));
432 *pcbWritten = sizeof(abHypercall);
433 return VINF_SUCCESS;
434 }
435 return VERR_BUFFER_OVERFLOW;
436 }
437}
438
439
440/**
441 * Notifies VMM that paravirtualized hypercalls are now enabled.
442 *
443 * @param pVCpu The cross context virtual CPU structure.
444 */
445VMM_INT_DECL(void) VMMHypercallsEnable(PVMCPU pVCpu)
446{
447 /* If there is anything to do for raw-mode, do it here. */
448#ifndef IN_RC
449 if (HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
450 HMHypercallsEnable(pVCpu);
451#endif
452}
453
454
455/**
456 * Notifies VMM that paravirtualized hypercalls are now disabled.
457 *
458 * @param pVCpu The cross context virtual CPU structure.
459 */
460VMM_INT_DECL(void) VMMHypercallsDisable(PVMCPU pVCpu)
461{
462 /* If there is anything to do for raw-mode, do it here. */
463#ifndef IN_RC
464 if (HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
465 HMHypercallsDisable(pVCpu);
466#endif
467}
468
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette