VirtualBox

source: vbox/trunk/src/VBox/VMM/VMReq.cpp@ 30111

Last change on this file since 30111 was 30111, checked in by vboxsync, 14 years ago

iprt/asm.h,*: Revised the ASMAtomic*Ptr functions and macros. The new saves lots of unsafe (void * volatile *) casts as well as adding some type safety when using GCC (typeof rulez).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 46.8 KB
Line 
1/* $Id: VMReq.cpp 30111 2010-06-09 12:14:59Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_VM
23#include <VBox/mm.h>
24#include <VBox/vmm.h>
25#include "VMInternal.h"
26#include <VBox/vm.h>
27#include <VBox/uvm.h>
28
29#include <VBox/err.h>
30#include <VBox/param.h>
31#include <VBox/log.h>
32#include <iprt/assert.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/time.h>
36#include <iprt/semaphore.h>
37#include <iprt/thread.h>
38
39
40/*******************************************************************************
41* Internal Functions *
42*******************************************************************************/
43static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq);
44
45
46/**
47 * Allocate and queue a call request.
48 *
49 * If it's desired to poll on the completion of the request set cMillies
50 * to 0 and use VMR3ReqWait() to check for completation. In the other case
51 * use RT_INDEFINITE_WAIT.
52 * The returned request packet must be freed using VMR3ReqFree().
53 *
54 * @returns VBox status code.
55 * Will not return VERR_INTERRUPTED.
56 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
57 *
58 * @param pVM The VM handle.
59 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
60 * one of the following special values:
61 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
62 * @param ppReq Where to store the pointer to the request.
63 * This will be NULL or a valid request pointer not matter what happends.
64 * @param cMillies Number of milliseconds to wait for the request to
65 * be completed. Use RT_INDEFINITE_WAIT to only
66 * wait till it's completed.
67 * @param fFlags A combination of the VMREQFLAGS values.
68 * @param pfnFunction Pointer to the function to call.
69 * @param cArgs Number of arguments following in the ellipsis.
70 * @param ... Function arguments.
71 *
72 * @remarks See remarks on VMR3ReqCallVU.
73 */
74VMMR3DECL(int) VMR3ReqCall(PVM pVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
75 PFNRT pfnFunction, unsigned cArgs, ...)
76{
77 va_list va;
78 va_start(va, cArgs);
79 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
80 va_end(va);
81 return rc;
82}
83
84
85/**
86 * Convenience wrapper for VMR3ReqCallU.
87 *
88 * This assumes (1) you're calling a function that returns an VBox status code,
89 * (2) that you want it's return code on success, and (3) that you wish to wait
90 * for ever for it to return.
91 *
92 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
93 * its status code is return. Otherwise, the status of pfnFunction is
94 * returned.
95 *
96 * @param pVM Pointer to the shared VM structure.
97 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
98 * one of the following special values:
99 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
100 * @param pfnFunction Pointer to the function to call.
101 * @param cArgs Number of arguments following in the ellipsis.
102 * @param ... Function arguments.
103 *
104 * @remarks See remarks on VMR3ReqCallVU.
105 */
106VMMR3DECL(int) VMR3ReqCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
107{
108 PVMREQ pReq;
109 va_list va;
110 va_start(va, cArgs);
111 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
112 pfnFunction, cArgs, va);
113 va_end(va);
114 if (RT_SUCCESS(rc))
115 rc = pReq->iStatus;
116 VMR3ReqFree(pReq);
117 return rc;
118}
119
120
121/**
122 * Convenience wrapper for VMR3ReqCallU.
123 *
124 * This assumes (1) you're calling a function that returns an VBox status code,
125 * (2) that you want it's return code on success, and (3) that you wish to wait
126 * for ever for it to return.
127 *
128 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
129 * its status code is return. Otherwise, the status of pfnFunction is
130 * returned.
131 *
132 * @param pUVM Pointer to the user mode VM structure.
133 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
134 * one of the following special values:
135 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
136 * @param pfnFunction Pointer to the function to call.
137 * @param cArgs Number of arguments following in the ellipsis.
138 * @param ... Function arguments.
139 *
140 * @remarks See remarks on VMR3ReqCallVU.
141 */
142VMMR3DECL(int) VMR3ReqCallWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
143{
144 PVMREQ pReq;
145 va_list va;
146 va_start(va, cArgs);
147 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
148 pfnFunction, cArgs, va);
149 va_end(va);
150 if (RT_SUCCESS(rc))
151 rc = pReq->iStatus;
152 VMR3ReqFree(pReq);
153 return rc;
154}
155
156
157/**
158 * Convenience wrapper for VMR3ReqCallU.
159 *
160 * This assumes (1) you're calling a function that returns an VBox status code
161 * and that you do not wish to wait for it to complete.
162 *
163 * @returns VBox status code returned by VMR3ReqCallVU.
164 *
165 * @param pVM Pointer to the shared VM structure.
166 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
167 * one of the following special values:
168 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
169 * @param pfnFunction Pointer to the function to call.
170 * @param cArgs Number of arguments following in the ellipsis.
171 * @param ... Function arguments.
172 *
173 * @remarks See remarks on VMR3ReqCallVU.
174 */
175VMMR3DECL(int) VMR3ReqCallNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
176{
177 va_list va;
178 va_start(va, cArgs);
179 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
180 pfnFunction, cArgs, va);
181 va_end(va);
182 return rc;
183}
184
185
186/**
187 * Convenience wrapper for VMR3ReqCallU.
188 *
189 * This assumes (1) you're calling a function that returns an VBox status code
190 * and that you do not wish to wait for it to complete.
191 *
192 * @returns VBox status code returned by VMR3ReqCallVU.
193 *
194 * @param pUVM Pointer to the user mode VM structure.
195 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
196 * one of the following special values:
197 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
198 * @param pfnFunction Pointer to the function to call.
199 * @param cArgs Number of arguments following in the ellipsis.
200 * @param ... Function arguments.
201 *
202 * @remarks See remarks on VMR3ReqCallVU.
203 */
204VMMR3DECL(int) VMR3ReqCallNoWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
205{
206 va_list va;
207 va_start(va, cArgs);
208 int rc = VMR3ReqCallVU(pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
209 pfnFunction, cArgs, va);
210 va_end(va);
211 return rc;
212}
213
214
215/**
216 * Convenience wrapper for VMR3ReqCallU.
217 *
218 * This assumes (1) you're calling a function that returns void, and (2) that
219 * you wish to wait for ever for it to return.
220 *
221 * @returns VBox status code of VMR3ReqCallVU.
222 *
223 * @param pVM Pointer to the shared VM structure.
224 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
225 * one of the following special values:
226 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
227 * @param pfnFunction Pointer to the function to call.
228 * @param cArgs Number of arguments following in the ellipsis.
229 * @param ... Function arguments.
230 *
231 * @remarks See remarks on VMR3ReqCallVU.
232 */
233VMMR3DECL(int) VMR3ReqCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
234{
235 PVMREQ pReq;
236 va_list va;
237 va_start(va, cArgs);
238 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
239 pfnFunction, cArgs, va);
240 va_end(va);
241 VMR3ReqFree(pReq);
242 return rc;
243}
244
245
246/**
247 * Convenience wrapper for VMR3ReqCallU.
248 *
249 * This assumes (1) you're calling a function that returns void, and (2) that
250 * you wish to wait for ever for it to return.
251 *
252 * @returns VBox status code of VMR3ReqCallVU.
253 *
254 * @param pUVM Pointer to the user mode VM structure.
255 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
256 * one of the following special values:
257 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
258 * @param pfnFunction Pointer to the function to call.
259 * @param cArgs Number of arguments following in the ellipsis.
260 * @param ... Function arguments.
261 *
262 * @remarks See remarks on VMR3ReqCallVU.
263 */
264VMMR3DECL(int) VMR3ReqCallVoidWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
265{
266 PVMREQ pReq;
267 va_list va;
268 va_start(va, cArgs);
269 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
270 pfnFunction, cArgs, va);
271 va_end(va);
272 VMR3ReqFree(pReq);
273 return rc;
274}
275
276
277/**
278 * Convenience wrapper for VMR3ReqCallU.
279 *
280 * This assumes (1) you're calling a function that returns void, and (2) that
281 * you do not wish to wait for it to complete.
282 *
283 * @returns VBox status code of VMR3ReqCallVU.
284 *
285 * @param pVM Pointer to the shared VM structure.
286 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
287 * one of the following special values:
288 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
289 * @param pfnFunction Pointer to the function to call.
290 * @param cArgs Number of arguments following in the ellipsis.
291 * @param ... Function arguments.
292 *
293 * @remarks See remarks on VMR3ReqCallVU.
294 */
295VMMR3DECL(int) VMR3ReqCallVoidNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
296{
297 PVMREQ pReq;
298 va_list va;
299 va_start(va, cArgs);
300 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_NO_WAIT,
301 pfnFunction, cArgs, va);
302 va_end(va);
303 VMR3ReqFree(pReq);
304 return rc;
305}
306
307
308/**
309 * Convenience wrapper for VMR3ReqCallU.
310 *
311 * This assumes (1) you're calling a function that returns void, and (2) that
312 * you do not wish to wait for it to complete.
313 *
314 * @returns VBox status code of VMR3ReqCallVU.
315 *
316 * @param pUVM Pointer to the user mode VM structure.
317 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
318 * one of the following special values:
319 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
320 * @param pfnFunction Pointer to the function to call.
321 * @param cArgs Number of arguments following in the ellipsis.
322 * @param ... Function arguments.
323 *
324 * @remarks See remarks on VMR3ReqCallVU.
325 */
326VMMR3DECL(int) VMR3ReqCallVoidNoWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
327{
328 PVMREQ pReq;
329 va_list va;
330 va_start(va, cArgs);
331 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_NO_WAIT,
332 pfnFunction, cArgs, va);
333 va_end(va);
334 VMR3ReqFree(pReq);
335 return rc;
336}
337
338
339/**
340 * Allocate and queue a call request to a void function.
341 *
342 * If it's desired to poll on the completion of the request set cMillies
343 * to 0 and use VMR3ReqWait() to check for completation. In the other case
344 * use RT_INDEFINITE_WAIT.
345 * The returned request packet must be freed using VMR3ReqFree().
346 *
347 * @returns VBox status code.
348 * Will not return VERR_INTERRUPTED.
349 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
350 *
351 * @param pUVM Pointer to the user mode VM structure.
352 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
353 * one of the following special values:
354 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
355 * @param ppReq Where to store the pointer to the request.
356 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
357 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
358 * @param cMillies Number of milliseconds to wait for the request to
359 * be completed. Use RT_INDEFINITE_WAIT to only
360 * wait till it's completed.
361 * @param fFlags A combination of the VMREQFLAGS values.
362 * @param pfnFunction Pointer to the function to call.
363 * @param cArgs Number of arguments following in the ellipsis.
364 * @param ... Function arguments.
365 *
366 * @remarks See remarks on VMR3ReqCallVU.
367 */
368VMMR3DECL(int) VMR3ReqCallU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
369 PFNRT pfnFunction, unsigned cArgs, ...)
370{
371 va_list va;
372 va_start(va, cArgs);
373 int rc = VMR3ReqCallVU(pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
374 va_end(va);
375 return rc;
376}
377
378
379/**
380 * Allocate and queue a call request.
381 *
382 * If it's desired to poll on the completion of the request set cMillies
383 * to 0 and use VMR3ReqWait() to check for completation. In the other case
384 * use RT_INDEFINITE_WAIT.
385 * The returned request packet must be freed using VMR3ReqFree().
386 *
387 * @returns VBox status code.
388 * Will not return VERR_INTERRUPTED.
389 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
390 *
391 * @param pUVM Pointer to the user mode VM structure.
392 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
393 * one of the following special values:
394 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
395 * @param ppReq Where to store the pointer to the request.
396 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
397 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
398 * @param cMillies Number of milliseconds to wait for the request to
399 * be completed. Use RT_INDEFINITE_WAIT to only
400 * wait till it's completed.
401 * @param pfnFunction Pointer to the function to call.
402 * @param fFlags A combination of the VMREQFLAGS values.
403 * @param cArgs Number of arguments following in the ellipsis.
404 * Stuff which differs in size from uintptr_t is gonna make trouble, so don't try!
405 * @param Args Argument vector.
406 *
407 * @remarks Caveats:
408 * - Do not pass anything which is larger than an uintptr_t.
409 * - 64-bit integers are larger than uintptr_t on 32-bit hosts.
410 * Pass integers > 32-bit by reference (pointers).
411 * - Don't use NULL since it should be the integer 0 in C++ and may
412 * therefore end up with garbage in the bits 63:32 on 64-bit
413 * hosts because 'int' is 32-bit.
414 * Use (void *)NULL or (uintptr_t)0 instead of NULL.
415 */
416VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
417 PFNRT pfnFunction, unsigned cArgs, va_list Args)
418{
419 LogFlow(("VMR3ReqCallV: idDstCpu=%u cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", idDstCpu, cMillies, fFlags, pfnFunction, cArgs));
420
421 /*
422 * Validate input.
423 */
424 AssertPtrReturn(pfnFunction, VERR_INVALID_POINTER);
425 AssertPtrReturn(pUVM, VERR_INVALID_POINTER);
426 AssertReturn(!(fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE)), VERR_INVALID_PARAMETER);
427 if (!(fFlags & VMREQFLAGS_NO_WAIT) || ppReq)
428 {
429 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
430 *ppReq = NULL;
431 }
432 PVMREQ pReq = NULL;
433 AssertMsgReturn(cArgs * sizeof(uintptr_t) <= sizeof(pReq->u.Internal.aArgs),
434 ("cArg=%d\n", cArgs),
435 VERR_TOO_MUCH_DATA);
436
437 /*
438 * Allocate request
439 */
440 int rc = VMR3ReqAllocU(pUVM, &pReq, VMREQTYPE_INTERNAL, idDstCpu);
441 if (RT_FAILURE(rc))
442 return rc;
443
444 /*
445 * Initialize the request data.
446 */
447 pReq->fFlags = fFlags;
448 pReq->u.Internal.pfn = pfnFunction;
449 pReq->u.Internal.cArgs = cArgs;
450 for (unsigned iArg = 0; iArg < cArgs; iArg++)
451 pReq->u.Internal.aArgs[iArg] = va_arg(Args, uintptr_t);
452
453 /*
454 * Queue the request and return.
455 */
456 rc = VMR3ReqQueue(pReq, cMillies);
457 if ( RT_FAILURE(rc)
458 && rc != VERR_TIMEOUT)
459 {
460 VMR3ReqFree(pReq);
461 pReq = NULL;
462 }
463 if (!(fFlags & VMREQFLAGS_NO_WAIT))
464 {
465 *ppReq = pReq;
466 LogFlow(("VMR3ReqCallV: returns %Rrc *ppReq=%p\n", rc, pReq));
467 }
468 else
469 LogFlow(("VMR3ReqCallV: returns %Rrc\n", rc));
470 Assert(rc != VERR_INTERRUPTED);
471 return rc;
472}
473
474
475/**
476 * Joins the list pList with whatever is linked up at *pHead.
477 */
478static void vmr3ReqJoinFreeSub(volatile PVMREQ *ppHead, PVMREQ pList)
479{
480 for (unsigned cIterations = 0;; cIterations++)
481 {
482 PVMREQ pHead = ASMAtomicXchgPtrT(ppHead, pList, PVMREQ);
483 if (!pHead)
484 return;
485 PVMREQ pTail = pHead;
486 while (pTail->pNext)
487 pTail = pTail->pNext;
488 ASMAtomicWritePtr(&pTail->pNext, pList);
489 ASMCompilerBarrier();
490 if (ASMAtomicCmpXchgPtr(ppHead, pHead, pList))
491 return;
492 ASMAtomicWritePtr(&pTail->pNext, NULL);
493 ASMCompilerBarrier();
494 if (ASMAtomicCmpXchgPtr(ppHead, pHead, NULL))
495 return;
496 pList = pHead;
497 Assert(cIterations != 32);
498 Assert(cIterations != 64);
499 }
500}
501
502
503/**
504 * Joins the list pList with whatever is linked up at *pHead.
505 */
506static void vmr3ReqJoinFree(PVMINTUSERPERVM pVMInt, PVMREQ pList)
507{
508 /*
509 * Split the list if it's too long.
510 */
511 unsigned cReqs = 1;
512 PVMREQ pTail = pList;
513 while (pTail->pNext)
514 {
515 if (cReqs++ > 25)
516 {
517 const uint32_t i = pVMInt->iReqFree;
518 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
519
520 pTail->pNext = NULL;
521 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2 + (i == pVMInt->iReqFree)) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
522 return;
523 }
524 pTail = pTail->pNext;
525 }
526 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(pVMInt->iReqFree + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pList);
527}
528
529
530/**
531 * Allocates a request packet.
532 *
533 * The caller allocates a request packet, fills in the request data
534 * union and queues the request.
535 *
536 * @returns VBox status code.
537 *
538 * @param pVM VM handle.
539 * @param ppReq Where to store the pointer to the allocated packet.
540 * @param enmType Package type.
541 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
542 * one of the following special values:
543 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
544 */
545VMMR3DECL(int) VMR3ReqAlloc(PVM pVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
546{
547 return VMR3ReqAllocU(pVM->pUVM, ppReq, enmType, idDstCpu);
548}
549
550
551/**
552 * Allocates a request packet.
553 *
554 * The caller allocates a request packet, fills in the request data
555 * union and queues the request.
556 *
557 * @returns VBox status code.
558 *
559 * @param pUVM Pointer to the user mode VM structure.
560 * @param ppReq Where to store the pointer to the allocated packet.
561 * @param enmType Package type.
562 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
563 * one of the following special values:
564 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
565 */
566VMMR3DECL(int) VMR3ReqAllocU(PUVM pUVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
567{
568 /*
569 * Validate input.
570 */
571 AssertMsgReturn(enmType > VMREQTYPE_INVALID && enmType < VMREQTYPE_MAX,
572 ("Invalid package type %d valid range %d-%d inclusivly.\n",
573 enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
574 VERR_VM_REQUEST_INVALID_TYPE);
575 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
576 AssertMsgReturn( idDstCpu == VMCPUID_ANY
577 || idDstCpu == VMCPUID_ANY_QUEUE
578 || idDstCpu < pUVM->cCpus
579 || idDstCpu == VMCPUID_ALL
580 || idDstCpu == VMCPUID_ALL_REVERSE,
581 ("Invalid destination %u (max=%u)\n", idDstCpu, pUVM->cCpus), VERR_INVALID_PARAMETER);
582
583 /*
584 * Try get a recycled packet.
585 * While this could all be solved with a single list with a lock, it's a sport
586 * of mine to avoid locks.
587 */
588 int cTries = RT_ELEMENTS(pUVM->vm.s.apReqFree) * 2;
589 while (--cTries >= 0)
590 {
591 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
592#if 0 /* sad, but this won't work safely because the reading of pReq->pNext. */
593 PVMREQ pNext = NULL;
594 PVMREQ pReq = *ppHead;
595 if ( pReq
596 && !ASMAtomicCmpXchgPtr(ppHead, (pNext = pReq->pNext), pReq)
597 && (pReq = *ppHead)
598 && !ASMAtomicCmpXchgPtr(ppHead, (pNext = pReq->pNext), pReq))
599 pReq = NULL;
600 if (pReq)
601 {
602 Assert(pReq->pNext == pNext); NOREF(pReq);
603#else
604 PVMREQ pReq = ASMAtomicXchgPtrT(ppHead, NULL, PVMREQ);
605 if (pReq)
606 {
607 PVMREQ pNext = pReq->pNext;
608 if ( pNext
609 && !ASMAtomicCmpXchgPtr(ppHead, pNext, NULL))
610 {
611 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRaces);
612 vmr3ReqJoinFree(&pUVM->vm.s, pReq->pNext);
613 }
614#endif
615 ASMAtomicDecU32(&pUVM->vm.s.cReqFree);
616
617 /*
618 * Make sure the event sem is not signaled.
619 */
620 if (!pReq->fEventSemClear)
621 {
622 int rc = RTSemEventWait(pReq->EventSem, 0);
623 if (rc != VINF_SUCCESS && rc != VERR_TIMEOUT)
624 {
625 /*
626 * This shall not happen, but if it does we'll just destroy
627 * the semaphore and create a new one.
628 */
629 AssertMsgFailed(("rc=%Rrc from RTSemEventWait(%#x).\n", rc, pReq->EventSem));
630 RTSemEventDestroy(pReq->EventSem);
631 rc = RTSemEventCreate(&pReq->EventSem);
632 AssertRC(rc);
633 if (RT_FAILURE(rc))
634 return rc;
635#if 0 ///@todo @bugref{4725} - def RT_LOCK_STRICT
636 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
637 RTSemEventAddSignaller(pReq->EventSem, pUVM->aCpus[idCpu].vm.s.ThreadEMT);
638#endif
639 }
640 pReq->fEventSemClear = true;
641 }
642 else
643 Assert(RTSemEventWait(pReq->EventSem, 0) == VERR_TIMEOUT);
644
645 /*
646 * Initialize the packet and return it.
647 */
648 Assert(pReq->enmType == VMREQTYPE_INVALID);
649 Assert(pReq->enmState == VMREQSTATE_FREE);
650 Assert(pReq->pUVM == pUVM);
651 ASMAtomicXchgSize(&pReq->pNext, NULL);
652 pReq->enmState = VMREQSTATE_ALLOCATED;
653 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
654 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
655 pReq->enmType = enmType;
656 pReq->idDstCpu = idDstCpu;
657
658 *ppReq = pReq;
659 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRecycled);
660 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p recycled\n", pReq));
661 return VINF_SUCCESS;
662 }
663 }
664
665 /*
666 * Ok allocate one.
667 */
668 PVMREQ pReq = (PVMREQ)MMR3HeapAllocU(pUVM, MM_TAG_VM_REQ, sizeof(*pReq));
669 if (!pReq)
670 return VERR_NO_MEMORY;
671
672 /*
673 * Create the semaphore.
674 */
675 int rc = RTSemEventCreate(&pReq->EventSem);
676 AssertRC(rc);
677 if (RT_FAILURE(rc))
678 {
679 MMR3HeapFree(pReq);
680 return rc;
681 }
682#if 0 ///@todo @bugref{4725} - def RT_LOCK_STRICT
683 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
684 RTSemEventAddSignaller(pReq->EventSem, pUVM->aCpus[idCpu].vm.s.ThreadEMT);
685#endif
686
687 /*
688 * Initialize the packet and return it.
689 */
690 pReq->pNext = NULL;
691 pReq->pUVM = pUVM;
692 pReq->enmState = VMREQSTATE_ALLOCATED;
693 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
694 pReq->fEventSemClear = true;
695 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
696 pReq->enmType = enmType;
697 pReq->idDstCpu = idDstCpu;
698
699 *ppReq = pReq;
700 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocNew);
701 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p new\n", pReq));
702 return VINF_SUCCESS;
703}
704
705
706/**
707 * Free a request packet.
708 *
709 * @returns VBox status code.
710 *
711 * @param pReq Package to free.
712 * @remark The request packet must be in allocated or completed state!
713 */
714VMMR3DECL(int) VMR3ReqFree(PVMREQ pReq)
715{
716 /*
717 * Ignore NULL (all free functions should do this imho).
718 */
719 if (!pReq)
720 return VINF_SUCCESS;
721
722 /*
723 * Check packet state.
724 */
725 switch (pReq->enmState)
726 {
727 case VMREQSTATE_ALLOCATED:
728 case VMREQSTATE_COMPLETED:
729 break;
730 default:
731 AssertMsgFailed(("Invalid state %d!\n", pReq->enmState));
732 return VERR_VM_REQUEST_STATE;
733 }
734
735 /*
736 * Make it a free packet and put it into one of the free packet lists.
737 */
738 pReq->enmState = VMREQSTATE_FREE;
739 pReq->iStatus = VERR_VM_REQUEST_STATUS_FREED;
740 pReq->enmType = VMREQTYPE_INVALID;
741
742 PUVM pUVM = pReq->pUVM;
743 STAM_COUNTER_INC(&pUVM->vm.s.StatReqFree);
744
745 if (pUVM->vm.s.cReqFree < 128)
746 {
747 ASMAtomicIncU32(&pUVM->vm.s.cReqFree);
748 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
749 PVMREQ pNext;
750 do
751 {
752 pNext = ASMAtomicUoReadPtrT(ppHead, PVMREQ);
753 ASMAtomicWritePtr(&pReq->pNext, pNext);
754 ASMCompilerBarrier();
755 } while (!ASMAtomicCmpXchgPtr(ppHead, pReq, pNext));
756 }
757 else
758 {
759 STAM_COUNTER_INC(&pReq->pUVM->vm.s.StatReqFreeOverflow);
760 RTSemEventDestroy(pReq->EventSem);
761 MMR3HeapFree(pReq);
762 }
763 return VINF_SUCCESS;
764}
765
766
767/**
768 * Queue a request.
769 *
770 * The quest must be allocated using VMR3ReqAlloc() and contain
771 * all the required data.
772 * If it's desired to poll on the completion of the request set cMillies
773 * to 0 and use VMR3ReqWait() to check for completation. In the other case
774 * use RT_INDEFINITE_WAIT.
775 *
776 * @returns VBox status code.
777 * Will not return VERR_INTERRUPTED.
778 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
779 *
780 * @param pReq The request to queue.
781 * @param cMillies Number of milliseconds to wait for the request to
782 * be completed. Use RT_INDEFINITE_WAIT to only
783 * wait till it's completed.
784 */
785VMMR3DECL(int) VMR3ReqQueue(PVMREQ pReq, RTMSINTERVAL cMillies)
786{
787 LogFlow(("VMR3ReqQueue: pReq=%p cMillies=%d\n", pReq, cMillies));
788 /*
789 * Verify the supplied package.
790 */
791 AssertMsgReturn(pReq->enmState == VMREQSTATE_ALLOCATED, ("%d\n", pReq->enmState), VERR_VM_REQUEST_STATE);
792 AssertMsgReturn( VALID_PTR(pReq->pUVM)
793 && !pReq->pNext
794 && pReq->EventSem != NIL_RTSEMEVENT,
795 ("Invalid request package! Anyone cooking their own packages???\n"),
796 VERR_VM_REQUEST_INVALID_PACKAGE);
797 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
798 && pReq->enmType < VMREQTYPE_MAX,
799 ("Invalid package type %d valid range %d-%d inclusivly. This was verified on alloc too...\n",
800 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
801 VERR_VM_REQUEST_INVALID_TYPE);
802 Assert(!(pReq->fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE)));
803
804 /*
805 * Are we the EMT or not?
806 * Also, store pVM (and fFlags) locally since pReq may be invalid after queuing it.
807 */
808 int rc = VINF_SUCCESS;
809 PUVM pUVM = ((VMREQ volatile *)pReq)->pUVM; /* volatile paranoia */
810 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
811
812 if (pReq->idDstCpu == VMCPUID_ALL)
813 {
814 /* One-by-one. */
815 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
816 for (unsigned i = 0; i < pUVM->cCpus; i++)
817 {
818 /* Reinit some members. */
819 pReq->enmState = VMREQSTATE_ALLOCATED;
820 pReq->idDstCpu = i;
821 rc = VMR3ReqQueue(pReq, cMillies);
822 if (RT_FAILURE(rc))
823 break;
824 }
825 }
826 else if (pReq->idDstCpu == VMCPUID_ALL_REVERSE)
827 {
828 /* One-by-one. */
829 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
830 for (int i = pUVM->cCpus-1; i >= 0; i--)
831 {
832 /* Reinit some members. */
833 pReq->enmState = VMREQSTATE_ALLOCATED;
834 pReq->idDstCpu = i;
835 rc = VMR3ReqQueue(pReq, cMillies);
836 if (RT_FAILURE(rc))
837 break;
838 }
839 }
840 else if ( pReq->idDstCpu != VMCPUID_ANY /* for a specific VMCPU? */
841 && pReq->idDstCpu != VMCPUID_ANY_QUEUE
842 && ( !pUVCpu /* and it's not the current thread. */
843 || pUVCpu->idCpu != pReq->idDstCpu))
844 {
845 VMCPUID idTarget = pReq->idDstCpu; Assert(idTarget < pUVM->cCpus);
846 PVMCPU pVCpu = &pUVM->pVM->aCpus[idTarget];
847 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
848
849 /* Fetch the right UVMCPU */
850 pUVCpu = &pUVM->aCpus[idTarget];
851
852 /*
853 * Insert it.
854 */
855 pReq->enmState = VMREQSTATE_QUEUED;
856 PVMREQ pNext;
857 do
858 {
859 pNext = ASMAtomicUoReadPtrT(&pUVCpu->vm.s.pReqs, PVMREQ);
860 ASMAtomicWritePtr(&pReq->pNext, pNext);
861 ASMCompilerBarrier();
862 } while (!ASMAtomicCmpXchgPtr(&pUVCpu->vm.s.pReqs, pReq, pNext));
863
864 /*
865 * Notify EMT.
866 */
867 if (pUVM->pVM)
868 VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
869 VMR3NotifyCpuFFU(pUVCpu, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
870
871 /*
872 * Wait and return.
873 */
874 if (!(fFlags & VMREQFLAGS_NO_WAIT))
875 rc = VMR3ReqWait(pReq, cMillies);
876 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
877 }
878 else if ( ( pReq->idDstCpu == VMCPUID_ANY
879 && !pUVCpu /* only EMT threads have a valid pointer stored in the TLS slot. */)
880 || pReq->idDstCpu == VMCPUID_ANY_QUEUE)
881 {
882 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
883
884 Assert(pReq->idDstCpu != VMCPUID_ANY_QUEUE || pUVCpu);
885
886 /*
887 * Insert it.
888 */
889 pReq->enmState = VMREQSTATE_QUEUED;
890 PVMREQ pNext;
891 do
892 {
893 pNext = ASMAtomicUoReadPtrT(&pUVM->vm.s.pReqs, PVMREQ);
894 ASMAtomicWritePtr(&pReq->pNext, pNext);
895 ASMCompilerBarrier();
896 } while (!ASMAtomicCmpXchgPtr(&pUVM->vm.s.pReqs, pReq, pNext));
897
898 /*
899 * Notify EMT.
900 */
901 if (pUVM->pVM)
902 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
903 VMR3NotifyGlobalFFU(pUVM, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
904
905 /*
906 * Wait and return.
907 */
908 if (!(fFlags & VMREQFLAGS_NO_WAIT))
909 rc = VMR3ReqWait(pReq, cMillies);
910 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
911 }
912 else
913 {
914 Assert(pUVCpu);
915
916 /*
917 * The requester was an EMT, just execute it.
918 */
919 pReq->enmState = VMREQSTATE_QUEUED;
920 rc = vmR3ReqProcessOneU(pUVM, pReq);
921 LogFlow(("VMR3ReqQueue: returns %Rrc (processed)\n", rc));
922 }
923 return rc;
924}
925
926
927/**
928 * Wait for a request to be completed.
929 *
930 * @returns VBox status code.
931 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
932 *
933 * @param pReq The request to wait for.
934 * @param cMillies Number of milliseconds to wait.
935 * Use RT_INDEFINITE_WAIT to only wait till it's completed.
936 */
937VMMR3DECL(int) VMR3ReqWait(PVMREQ pReq, RTMSINTERVAL cMillies)
938{
939 LogFlow(("VMR3ReqWait: pReq=%p cMillies=%d\n", pReq, cMillies));
940
941 /*
942 * Verify the supplied package.
943 */
944 AssertMsgReturn( pReq->enmState == VMREQSTATE_QUEUED
945 || pReq->enmState == VMREQSTATE_PROCESSING
946 || pReq->enmState == VMREQSTATE_COMPLETED,
947 ("Invalid state %d\n", pReq->enmState),
948 VERR_VM_REQUEST_STATE);
949 AssertMsgReturn( VALID_PTR(pReq->pUVM)
950 && pReq->EventSem != NIL_RTSEMEVENT,
951 ("Invalid request package! Anyone cooking their own packages???\n"),
952 VERR_VM_REQUEST_INVALID_PACKAGE);
953 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
954 && pReq->enmType < VMREQTYPE_MAX,
955 ("Invalid package type %d valid range %d-%d inclusivly. This was verified on alloc too...\n",
956 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
957 VERR_VM_REQUEST_INVALID_TYPE);
958
959 /*
960 * Check for deadlock condition
961 */
962 PUVM pUVM = pReq->pUVM;
963 NOREF(pUVM);
964
965 /*
966 * Wait on the package.
967 */
968 int rc;
969 if (cMillies != RT_INDEFINITE_WAIT)
970 rc = RTSemEventWait(pReq->EventSem, cMillies);
971 else
972 {
973 do
974 {
975 rc = RTSemEventWait(pReq->EventSem, RT_INDEFINITE_WAIT);
976 Assert(rc != VERR_TIMEOUT);
977 } while ( pReq->enmState != VMREQSTATE_COMPLETED
978 && pReq->enmState != VMREQSTATE_INVALID);
979 }
980 if (RT_SUCCESS(rc))
981 ASMAtomicXchgSize(&pReq->fEventSemClear, true);
982 if (pReq->enmState == VMREQSTATE_COMPLETED)
983 rc = VINF_SUCCESS;
984 LogFlow(("VMR3ReqWait: returns %Rrc\n", rc));
985 Assert(rc != VERR_INTERRUPTED);
986 return rc;
987}
988
989
990/**
991 * VMR3ReqProcessU helper that handles cases where there are more than one
992 * pending request.
993 *
994 * @returns The oldest request.
995 * @param pUVM Pointer to the user mode VM structure
996 * @param idDstCpu VMCPUID_ANY or virtual CPU ID.
997 * @param pReqList The list of requests.
998 * @param ppReqs Pointer to the list head.
999 */
1000static PVMREQ vmR3ReqProcessUTooManyHelper(PUVM pUVM, VMCPUID idDstCpu, PVMREQ pReqList, PVMREQ volatile *ppReqs)
1001{
1002 STAM_COUNTER_INC(&pUVM->vm.s.StatReqMoreThan1);
1003 /* Chop off the last one (pReq). */
1004 PVMREQ pPrev;
1005 PVMREQ pReqRet = pReqList;
1006 do
1007 {
1008 pPrev = pReqRet;
1009 pReqRet = pReqRet->pNext;
1010 } while (pReqRet->pNext);
1011 ASMAtomicWritePtr(&pPrev->pNext, NULL);
1012
1013 /* Push the others back onto the list (end of it). */
1014 Log2(("VMR3ReqProcess: Pushing back %p %p...\n", pReqList, pReqList->pNext));
1015 if (RT_UNLIKELY(!ASMAtomicCmpXchgPtr(ppReqs, pReqList, NULL)))
1016 {
1017 STAM_COUNTER_INC(&pUVM->vm.s.StatReqPushBackRaces);
1018 do
1019 {
1020 ASMNopPause();
1021 PVMREQ pReqList2 = ASMAtomicXchgPtrT(ppReqs, NULL, PVMREQ);
1022 if (pReqList2)
1023 {
1024 PVMREQ pLast = pReqList2;
1025 while (pLast->pNext)
1026 pLast = pLast->pNext;
1027 ASMAtomicWritePtr(&pLast->pNext, pReqList);
1028 pReqList = pReqList2;
1029 }
1030 } while (!ASMAtomicCmpXchgPtr(ppReqs, pReqList, NULL));
1031 }
1032
1033 if (RT_LIKELY(pUVM->pVM))
1034 {
1035 if (idDstCpu == VMCPUID_ANY)
1036 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
1037 else
1038 VMCPU_FF_SET(&pUVM->pVM->aCpus[idDstCpu], VMCPU_FF_REQUEST);
1039 }
1040
1041 return pReqRet;
1042}
1043
1044
1045/**
1046 * Process pending request(s).
1047 *
1048 * This function is called from a forced action handler in the EMT
1049 * or from one of the EMT loops.
1050 *
1051 * @returns VBox status code.
1052 *
1053 * @param pUVM Pointer to the user mode VM structure.
1054 * @param idDstCpu Pass VMCPUID_ANY to process the common request queue
1055 * and the CPU ID for a CPU specific one. In the latter
1056 * case the calling thread must be the EMT of that CPU.
1057 *
1058 * @note SMP safe (multiple EMTs trying to satisfy VM_FF_REQUESTs).
1059 *
1060 * @remarks This was made reentrant for
1061 */
1062VMMR3DECL(int) VMR3ReqProcessU(PUVM pUVM, VMCPUID idDstCpu)
1063{
1064 LogFlow(("VMR3ReqProcessU: (enmVMState=%d) idDstCpu=%d\n", pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING, idDstCpu));
1065
1066 /*
1067 * Process loop.
1068 *
1069 * We do not repeat the outer loop if we've got an informational status code
1070 * since that code needs processing by our caller.
1071 */
1072 int rc = VINF_SUCCESS;
1073 while (rc <= VINF_SUCCESS)
1074 {
1075 /*
1076 * Get the pending requests.
1077 * If there are more than one request, unlink the oldest and put the
1078 * rest back so that we're reentrant.
1079 */
1080 PVMREQ volatile *ppReqs;
1081 if (idDstCpu == VMCPUID_ANY)
1082 {
1083 ppReqs = &pUVM->vm.s.pReqs;
1084 if (RT_LIKELY(pUVM->pVM))
1085 VM_FF_CLEAR(pUVM->pVM, VM_FF_REQUEST);
1086 }
1087 else
1088 {
1089 Assert(idDstCpu < pUVM->cCpus);
1090 Assert(pUVM->aCpus[idDstCpu].vm.s.NativeThreadEMT == RTThreadNativeSelf());
1091 ppReqs = &pUVM->aCpus[idDstCpu].vm.s.pReqs;
1092 if (RT_LIKELY(pUVM->pVM))
1093 VMCPU_FF_CLEAR(&pUVM->pVM->aCpus[idDstCpu], VMCPU_FF_REQUEST);
1094 }
1095
1096 PVMREQ pReq = ASMAtomicXchgPtrT(ppReqs, NULL, PVMREQ);
1097 if (!pReq)
1098 break;
1099 if (RT_UNLIKELY(pReq->pNext))
1100 pReq = vmR3ReqProcessUTooManyHelper(pUVM, idDstCpu, pReq, ppReqs);
1101
1102 /*
1103 * Process the request.
1104 * Note! The status code handling here extremely important and yet very
1105 * fragile.
1106 */
1107 STAM_COUNTER_INC(&pUVM->vm.s.StatReqProcessed);
1108 int rc2 = vmR3ReqProcessOneU(pUVM, pReq);
1109 if ( rc2 >= VINF_EM_FIRST
1110 && rc2 <= VINF_EM_LAST
1111 && ( rc == VINF_SUCCESS
1112 || rc2 < rc) )
1113 rc = rc2;
1114 /** @todo may have to abort processing to propagate EM scheduling status codes
1115 * up to the caller... See the ugly hacks after VMMR3EmtRendezvousFF
1116 * and VMR3ReqProcessU in EM.cpp. */
1117 }
1118
1119 LogFlow(("VMR3ReqProcess: returns %Rrc (enmVMState=%d)\n", rc, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
1120 return rc;
1121}
1122
1123
1124/**
1125 * Process one request.
1126 *
1127 * @returns VBox status code.
1128 *
1129 * @param pVM VM handle.
1130 * @param pReq Request packet to process.
1131 */
1132static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq)
1133{
1134 LogFlow(("vmR3ReqProcessOneU: pReq=%p type=%d fFlags=%#x\n", pReq, pReq->enmType, pReq->fFlags));
1135
1136 /*
1137 * Process the request.
1138 */
1139 Assert(pReq->enmState == VMREQSTATE_QUEUED);
1140 pReq->enmState = VMREQSTATE_PROCESSING;
1141 int rcRet = VINF_SUCCESS; /* the return code of this function. */
1142 int rcReq = VERR_NOT_IMPLEMENTED; /* the request status. */
1143 switch (pReq->enmType)
1144 {
1145 /*
1146 * A packed down call frame.
1147 */
1148 case VMREQTYPE_INTERNAL:
1149 {
1150 uintptr_t *pauArgs = &pReq->u.Internal.aArgs[0];
1151 union
1152 {
1153 PFNRT pfn;
1154 DECLCALLBACKMEMBER(int, pfn00)(void);
1155 DECLCALLBACKMEMBER(int, pfn01)(uintptr_t);
1156 DECLCALLBACKMEMBER(int, pfn02)(uintptr_t, uintptr_t);
1157 DECLCALLBACKMEMBER(int, pfn03)(uintptr_t, uintptr_t, uintptr_t);
1158 DECLCALLBACKMEMBER(int, pfn04)(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1159 DECLCALLBACKMEMBER(int, pfn05)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1160 DECLCALLBACKMEMBER(int, pfn06)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1161 DECLCALLBACKMEMBER(int, pfn07)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1162 DECLCALLBACKMEMBER(int, pfn08)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1163 DECLCALLBACKMEMBER(int, pfn09)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1164 DECLCALLBACKMEMBER(int, pfn10)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1165 DECLCALLBACKMEMBER(int, pfn11)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1166 DECLCALLBACKMEMBER(int, pfn12)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1167 } u;
1168 u.pfn = pReq->u.Internal.pfn;
1169#ifdef RT_ARCH_AMD64
1170 switch (pReq->u.Internal.cArgs)
1171 {
1172 case 0: rcRet = u.pfn00(); break;
1173 case 1: rcRet = u.pfn01(pauArgs[0]); break;
1174 case 2: rcRet = u.pfn02(pauArgs[0], pauArgs[1]); break;
1175 case 3: rcRet = u.pfn03(pauArgs[0], pauArgs[1], pauArgs[2]); break;
1176 case 4: rcRet = u.pfn04(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3]); break;
1177 case 5: rcRet = u.pfn05(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4]); break;
1178 case 6: rcRet = u.pfn06(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5]); break;
1179 case 7: rcRet = u.pfn07(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6]); break;
1180 case 8: rcRet = u.pfn08(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7]); break;
1181 case 9: rcRet = u.pfn09(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8]); break;
1182 case 10: rcRet = u.pfn10(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9]); break;
1183 case 11: rcRet = u.pfn11(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10]); break;
1184 case 12: rcRet = u.pfn12(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11]); break;
1185 default:
1186 AssertReleaseMsgFailed(("cArgs=%d\n", pReq->u.Internal.cArgs));
1187 rcRet = rcReq = VERR_INTERNAL_ERROR;
1188 break;
1189 }
1190#else /* x86: */
1191 size_t cbArgs = pReq->u.Internal.cArgs * sizeof(uintptr_t);
1192# ifdef __GNUC__
1193 __asm__ __volatile__("movl %%esp, %%edx\n\t"
1194 "subl %2, %%esp\n\t"
1195 "andl $0xfffffff0, %%esp\n\t"
1196 "shrl $2, %2\n\t"
1197 "movl %%esp, %%edi\n\t"
1198 "rep movsl\n\t"
1199 "movl %%edx, %%edi\n\t"
1200 "call *%%eax\n\t"
1201 "mov %%edi, %%esp\n\t"
1202 : "=a" (rcRet),
1203 "=S" (pauArgs),
1204 "=c" (cbArgs)
1205 : "0" (u.pfn),
1206 "1" (pauArgs),
1207 "2" (cbArgs)
1208 : "edi", "edx");
1209# else
1210 __asm
1211 {
1212 xor edx, edx /* just mess it up. */
1213 mov eax, u.pfn
1214 mov ecx, cbArgs
1215 shr ecx, 2
1216 mov esi, pauArgs
1217 mov ebx, esp
1218 sub esp, cbArgs
1219 and esp, 0xfffffff0
1220 mov edi, esp
1221 rep movsd
1222 call eax
1223 mov esp, ebx
1224 mov rcRet, eax
1225 }
1226# endif
1227#endif /* x86 */
1228 if ((pReq->fFlags & (VMREQFLAGS_RETURN_MASK)) == VMREQFLAGS_VOID)
1229 rcRet = VINF_SUCCESS;
1230 rcReq = rcRet;
1231 break;
1232 }
1233
1234 default:
1235 AssertMsgFailed(("pReq->enmType=%d\n", pReq->enmType));
1236 rcReq = VERR_NOT_IMPLEMENTED;
1237 break;
1238 }
1239
1240 /*
1241 * Complete the request.
1242 */
1243 pReq->iStatus = rcReq;
1244 pReq->enmState = VMREQSTATE_COMPLETED;
1245 if (pReq->fFlags & VMREQFLAGS_NO_WAIT)
1246 {
1247 /* Free the packet, nobody is waiting. */
1248 LogFlow(("vmR3ReqProcessOneU: Completed request %p: rcReq=%Rrc rcRet=%Rrc - freeing it\n",
1249 pReq, rcReq, rcRet));
1250 VMR3ReqFree(pReq);
1251 }
1252 else
1253 {
1254 /* Notify the waiter and him free up the packet. */
1255 LogFlow(("vmR3ReqProcessOneU: Completed request %p: rcReq=%Rrc rcRet=%Rrc - notifying waiting thread\n",
1256 pReq, rcReq, rcRet));
1257 ASMAtomicXchgSize(&pReq->fEventSemClear, false);
1258 int rc2 = RTSemEventSignal(pReq->EventSem);
1259 if (RT_FAILURE(rc2))
1260 {
1261 AssertRC(rc2);
1262 rcRet = rc2;
1263 }
1264 }
1265 return rcRet;
1266}
1267
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette