VirtualBox

source: vbox/trunk/src/VBox/VMM/VMReq.cpp@ 21194

Last change on this file since 21194 was 20880, checked in by vboxsync, 15 years ago

Queue suspend and power off calls from the VM for SMP guests (deadlock prevention).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.9 KB
Line 
1/* $Id: VMReq.cpp 20880 2009-06-24 08:10:25Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/mm.h>
28#include <VBox/vmm.h>
29#include "VMInternal.h"
30#include <VBox/vm.h>
31#include <VBox/uvm.h>
32
33#include <VBox/err.h>
34#include <VBox/param.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <iprt/time.h>
40#include <iprt/semaphore.h>
41#include <iprt/thread.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq);
48
49
50/**
51 * Allocate and queue a call request.
52 *
53 * If it's desired to poll on the completion of the request set cMillies
54 * to 0 and use VMR3ReqWait() to check for completation. In the other case
55 * use RT_INDEFINITE_WAIT.
56 * The returned request packet must be freed using VMR3ReqFree().
57 *
58 * @returns VBox status code.
59 * Will not return VERR_INTERRUPTED.
60 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
61 *
62 * @param pVM The VM handle.
63 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
64 * one of the following special values:
65 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
66 * @param ppReq Where to store the pointer to the request.
67 * This will be NULL or a valid request pointer not matter what happends.
68 * @param cMillies Number of milliseconds to wait for the request to
69 * be completed. Use RT_INDEFINITE_WAIT to only
70 * wait till it's completed.
71 * @param pfnFunction Pointer to the function to call.
72 * @param cArgs Number of arguments following in the ellipsis.
73 * Not possible to pass 64-bit arguments!
74 * @param ... Function arguments.
75 */
76VMMR3DECL(int) VMR3ReqCall(PVM pVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
77{
78 va_list va;
79 va_start(va, cArgs);
80 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, ppReq, cMillies, VMREQFLAGS_VBOX_STATUS, pfnFunction, cArgs, va);
81 va_end(va);
82 return rc;
83}
84
85
86/**
87 * Allocate and queue a call request to a void function.
88 *
89 * If it's desired to poll on the completion of the request set cMillies
90 * to 0 and use VMR3ReqWait() to check for completation. In the other case
91 * use RT_INDEFINITE_WAIT.
92 * The returned request packet must be freed using VMR3ReqFree().
93 *
94 * @returns VBox status code.
95 * Will not return VERR_INTERRUPTED.
96 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
97 *
98 * @param pUVM Pointer to the user mode VM structure.
99 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
100 * one of the following special values:
101 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
102 * @param ppReq Where to store the pointer to the request.
103 * This will be NULL or a valid request pointer not matter what happends.
104 * @param cMillies Number of milliseconds to wait for the request to
105 * be completed. Use RT_INDEFINITE_WAIT to only
106 * wait till it's completed.
107 * @param pfnFunction Pointer to the function to call.
108 * @param cArgs Number of arguments following in the ellipsis.
109 * Not possible to pass 64-bit arguments!
110 * @param ... Function arguments.
111 */
112VMMR3DECL(int) VMR3ReqCallVoidU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
113{
114 va_list va;
115 va_start(va, cArgs);
116 int rc = VMR3ReqCallVU(pUVM, idDstCpu, ppReq, cMillies, VMREQFLAGS_VOID, pfnFunction, cArgs, va);
117 va_end(va);
118 return rc;
119}
120
121
122/**
123 * Allocate and queue a call request to a void function.
124 *
125 * If it's desired to poll on the completion of the request set cMillies
126 * to 0 and use VMR3ReqWait() to check for completation. In the other case
127 * use RT_INDEFINITE_WAIT.
128 * The returned request packet must be freed using VMR3ReqFree().
129 *
130 * @returns VBox status code.
131 * Will not return VERR_INTERRUPTED.
132 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
133 *
134 * @param pVM The VM handle.
135 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
136 * one of the following special values:
137 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
138 * @param ppReq Where to store the pointer to the request.
139 * This will be NULL or a valid request pointer not matter what happends.
140 * @param cMillies Number of milliseconds to wait for the request to
141 * be completed. Use RT_INDEFINITE_WAIT to only
142 * wait till it's completed.
143 * @param pfnFunction Pointer to the function to call.
144 * @param cArgs Number of arguments following in the ellipsis.
145 * Not possible to pass 64-bit arguments!
146 * @param ... Function arguments.
147 */
148VMMR3DECL(int) VMR3ReqCallVoid(PVM pVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
149{
150 va_list va;
151 va_start(va, cArgs);
152 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, ppReq, cMillies, VMREQFLAGS_VOID, pfnFunction, cArgs, va);
153 va_end(va);
154 return rc;
155}
156
157
158/**
159 * Allocate and queue a call request to a void function.
160 *
161 * If it's desired to poll on the completion of the request set cMillies
162 * to 0 and use VMR3ReqWait() to check for completation. In the other case
163 * use RT_INDEFINITE_WAIT.
164 * The returned request packet must be freed using VMR3ReqFree().
165 *
166 * @returns VBox status code.
167 * Will not return VERR_INTERRUPTED.
168 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
169 *
170 * @param pVM The VM handle.
171 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
172 * one of the following special values:
173 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
174 * @param ppReq Where to store the pointer to the request.
175 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
176 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
177 * @param cMillies Number of milliseconds to wait for the request to
178 * be completed. Use RT_INDEFINITE_WAIT to only
179 * wait till it's completed.
180 * @param fFlags A combination of the VMREQFLAGS values.
181 * @param pfnFunction Pointer to the function to call.
182 * @param cArgs Number of arguments following in the ellipsis.
183 * Not possible to pass 64-bit arguments!
184 * @param ... Function arguments.
185 */
186VMMR3DECL(int) VMR3ReqCallEx(PVM pVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
187{
188 va_list va;
189 va_start(va, cArgs);
190 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
191 va_end(va);
192 return rc;
193}
194
195
196/**
197 * Allocate and queue a call request to a void function.
198 *
199 * If it's desired to poll on the completion of the request set cMillies
200 * to 0 and use VMR3ReqWait() to check for completation. In the other case
201 * use RT_INDEFINITE_WAIT.
202 * The returned request packet must be freed using VMR3ReqFree().
203 *
204 * @returns VBox status code.
205 * Will not return VERR_INTERRUPTED.
206 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
207 *
208 * @param pUVM Pointer to the user mode VM structure.
209 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
210 * one of the following special values:
211 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
212 * @param ppReq Where to store the pointer to the request.
213 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
214 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
215 * @param cMillies Number of milliseconds to wait for the request to
216 * be completed. Use RT_INDEFINITE_WAIT to only
217 * wait till it's completed.
218 * @param fFlags A combination of the VMREQFLAGS values.
219 * @param pfnFunction Pointer to the function to call.
220 * @param cArgs Number of arguments following in the ellipsis.
221 * Not possible to pass 64-bit arguments!
222 * @param ... Function arguments.
223 */
224VMMR3DECL(int) VMR3ReqCallU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
225{
226 va_list va;
227 va_start(va, cArgs);
228 int rc = VMR3ReqCallVU(pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
229 va_end(va);
230 return rc;
231}
232
233
234/**
235 * Allocate and queue a call request.
236 *
237 * If it's desired to poll on the completion of the request set cMillies
238 * to 0 and use VMR3ReqWait() to check for completation. In the other case
239 * use RT_INDEFINITE_WAIT.
240 * The returned request packet must be freed using VMR3ReqFree().
241 *
242 * @returns VBox status code.
243 * Will not return VERR_INTERRUPTED.
244 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
245 *
246 * @param pUVM Pointer to the user mode VM structure.
247 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
248 * one of the following special values:
249 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
250 * @param ppReq Where to store the pointer to the request.
251 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
252 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
253 * @param cMillies Number of milliseconds to wait for the request to
254 * be completed. Use RT_INDEFINITE_WAIT to only
255 * wait till it's completed.
256 * @param pfnFunction Pointer to the function to call.
257 * @param fFlags A combination of the VMREQFLAGS values.
258 * @param cArgs Number of arguments following in the ellipsis.
259 * Stuff which differs in size from uintptr_t is gonna make trouble, so don't try!
260 * @param Args Argument vector.
261 */
262VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, va_list Args)
263{
264 LogFlow(("VMR3ReqCallV: idDstCpu=%u cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", idDstCpu, cMillies, fFlags, pfnFunction, cArgs));
265
266 /*
267 * Validate input.
268 */
269 AssertPtrReturn(pfnFunction, VERR_INVALID_POINTER);
270 AssertPtrReturn(pUVM, VERR_INVALID_POINTER);
271 AssertReturn(!(fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE)), VERR_INVALID_PARAMETER);
272 if (!(fFlags & VMREQFLAGS_NO_WAIT) || ppReq)
273 {
274 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
275 *ppReq = NULL;
276 }
277 PVMREQ pReq = NULL;
278 AssertMsgReturn(cArgs * sizeof(uintptr_t) <= sizeof(pReq->u.Internal.aArgs),
279 ("cArg=%d\n", cArgs),
280 VERR_TOO_MUCH_DATA);
281
282 /*
283 * Allocate request
284 */
285 int rc = VMR3ReqAllocU(pUVM, &pReq, VMREQTYPE_INTERNAL, idDstCpu);
286 if (RT_FAILURE(rc))
287 return rc;
288
289 /*
290 * Initialize the request data.
291 */
292 pReq->fFlags = fFlags;
293 pReq->u.Internal.pfn = pfnFunction;
294 pReq->u.Internal.cArgs = cArgs;
295 for (unsigned iArg = 0; iArg < cArgs; iArg++)
296 pReq->u.Internal.aArgs[iArg] = va_arg(Args, uintptr_t);
297
298 /*
299 * Queue the request and return.
300 */
301 rc = VMR3ReqQueue(pReq, cMillies);
302 if ( RT_FAILURE(rc)
303 && rc != VERR_TIMEOUT)
304 {
305 VMR3ReqFree(pReq);
306 pReq = NULL;
307 }
308 if (!(fFlags & VMREQFLAGS_NO_WAIT))
309 {
310 *ppReq = pReq;
311 LogFlow(("VMR3ReqCallV: returns %Rrc *ppReq=%p\n", rc, pReq));
312 }
313 else
314 LogFlow(("VMR3ReqCallV: returns %Rrc\n", rc));
315 Assert(rc != VERR_INTERRUPTED);
316 return rc;
317}
318
319
320/**
321 * Joins the list pList with whatever is linked up at *pHead.
322 */
323static void vmr3ReqJoinFreeSub(volatile PVMREQ *ppHead, PVMREQ pList)
324{
325 for (unsigned cIterations = 0;; cIterations++)
326 {
327 PVMREQ pHead = (PVMREQ)ASMAtomicXchgPtr((void * volatile *)ppHead, pList);
328 if (!pHead)
329 return;
330 PVMREQ pTail = pHead;
331 while (pTail->pNext)
332 pTail = pTail->pNext;
333 ASMAtomicWritePtr((void * volatile *)&pTail->pNext, pList);
334 ASMCompilerBarrier();
335 if (ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (void *)pHead, pList))
336 return;
337 ASMAtomicWritePtr((void * volatile *)&pTail->pNext, NULL);
338 ASMCompilerBarrier();
339 if (ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (void *)pHead, NULL))
340 return;
341 pList = pHead;
342 Assert(cIterations != 32);
343 Assert(cIterations != 64);
344 }
345}
346
347
348/**
349 * Joins the list pList with whatever is linked up at *pHead.
350 */
351static void vmr3ReqJoinFree(PVMINTUSERPERVM pVMInt, PVMREQ pList)
352{
353 /*
354 * Split the list if it's too long.
355 */
356 unsigned cReqs = 1;
357 PVMREQ pTail = pList;
358 while (pTail->pNext)
359 {
360 if (cReqs++ > 25)
361 {
362 const uint32_t i = pVMInt->iReqFree;
363 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
364
365 pTail->pNext = NULL;
366 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2 + (i == pVMInt->iReqFree)) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
367 return;
368 }
369 pTail = pTail->pNext;
370 }
371 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(pVMInt->iReqFree + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pList);
372}
373
374
375/**
376 * Allocates a request packet.
377 *
378 * The caller allocates a request packet, fills in the request data
379 * union and queues the request.
380 *
381 * @returns VBox status code.
382 *
383 * @param pVM VM handle.
384 * @param ppReq Where to store the pointer to the allocated packet.
385 * @param enmType Package type.
386 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
387 * one of the following special values:
388 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
389 */
390VMMR3DECL(int) VMR3ReqAlloc(PVM pVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
391{
392 return VMR3ReqAllocU(pVM->pUVM, ppReq, enmType, idDstCpu);
393}
394
395
396/**
397 * Allocates a request packet.
398 *
399 * The caller allocates a request packet, fills in the request data
400 * union and queues the request.
401 *
402 * @returns VBox status code.
403 *
404 * @param pUVM Pointer to the user mode VM structure.
405 * @param ppReq Where to store the pointer to the allocated packet.
406 * @param enmType Package type.
407 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
408 * one of the following special values:
409 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
410 */
411VMMR3DECL(int) VMR3ReqAllocU(PUVM pUVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
412{
413 /*
414 * Validate input.
415 */
416 AssertMsgReturn(enmType > VMREQTYPE_INVALID && enmType < VMREQTYPE_MAX,
417 ("Invalid package type %d valid range %d-%d inclusivly.\n",
418 enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
419 VERR_VM_REQUEST_INVALID_TYPE);
420 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
421 AssertMsgReturn( idDstCpu == VMCPUID_ANY
422 || idDstCpu == VMCPUID_ANY_QUEUE
423 || idDstCpu < pUVM->cCpus
424 || idDstCpu == VMCPUID_ALL
425 || idDstCpu == VMCPUID_ALL_REVERSE,
426 ("Invalid destination %u (max=%u)\n", idDstCpu, pUVM->cCpus), VERR_INVALID_PARAMETER);
427
428 /*
429 * Try get a recycled packet.
430 * While this could all be solved with a single list with a lock, it's a sport
431 * of mine to avoid locks.
432 */
433 int cTries = RT_ELEMENTS(pUVM->vm.s.apReqFree) * 2;
434 while (--cTries >= 0)
435 {
436 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
437#if 0 /* sad, but this won't work safely because the reading of pReq->pNext. */
438 PVMREQ pNext = NULL;
439 PVMREQ pReq = *ppHead;
440 if ( pReq
441 && !ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (pNext = pReq->pNext), pReq)
442 && (pReq = *ppHead)
443 && !ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (pNext = pReq->pNext), pReq))
444 pReq = NULL;
445 if (pReq)
446 {
447 Assert(pReq->pNext == pNext); NOREF(pReq);
448#else
449 PVMREQ pReq = (PVMREQ)ASMAtomicXchgPtr((void * volatile *)ppHead, NULL);
450 if (pReq)
451 {
452 PVMREQ pNext = pReq->pNext;
453 if ( pNext
454 && !ASMAtomicCmpXchgPtr((void * volatile *)ppHead, pNext, NULL))
455 {
456 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRaces);
457 vmr3ReqJoinFree(&pUVM->vm.s, pReq->pNext);
458 }
459#endif
460 ASMAtomicDecU32(&pUVM->vm.s.cReqFree);
461
462 /*
463 * Make sure the event sem is not signaled.
464 */
465 if (!pReq->fEventSemClear)
466 {
467 int rc = RTSemEventWait(pReq->EventSem, 0);
468 if (rc != VINF_SUCCESS && rc != VERR_TIMEOUT)
469 {
470 /*
471 * This shall not happen, but if it does we'll just destroy
472 * the semaphore and create a new one.
473 */
474 AssertMsgFailed(("rc=%Rrc from RTSemEventWait(%#x).\n", rc, pReq->EventSem));
475 RTSemEventDestroy(pReq->EventSem);
476 rc = RTSemEventCreate(&pReq->EventSem);
477 AssertRC(rc);
478 if (RT_FAILURE(rc))
479 return rc;
480 }
481 pReq->fEventSemClear = true;
482 }
483 else
484 Assert(RTSemEventWait(pReq->EventSem, 0) == VERR_TIMEOUT);
485
486 /*
487 * Initialize the packet and return it.
488 */
489 Assert(pReq->enmType == VMREQTYPE_INVALID);
490 Assert(pReq->enmState == VMREQSTATE_FREE);
491 Assert(pReq->pUVM == pUVM);
492 ASMAtomicXchgSize(&pReq->pNext, NULL);
493 pReq->enmState = VMREQSTATE_ALLOCATED;
494 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
495 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
496 pReq->enmType = enmType;
497 pReq->idDstCpu = idDstCpu;
498
499 *ppReq = pReq;
500 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRecycled);
501 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p recycled\n", pReq));
502 return VINF_SUCCESS;
503 }
504 }
505
506 /*
507 * Ok allocate one.
508 */
509 PVMREQ pReq = (PVMREQ)MMR3HeapAllocU(pUVM, MM_TAG_VM_REQ, sizeof(*pReq));
510 if (!pReq)
511 return VERR_NO_MEMORY;
512
513 /*
514 * Create the semaphore.
515 */
516 int rc = RTSemEventCreate(&pReq->EventSem);
517 AssertRC(rc);
518 if (RT_FAILURE(rc))
519 {
520 MMR3HeapFree(pReq);
521 return rc;
522 }
523
524 /*
525 * Initialize the packet and return it.
526 */
527 pReq->pNext = NULL;
528 pReq->pUVM = pUVM;
529 pReq->enmState = VMREQSTATE_ALLOCATED;
530 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
531 pReq->fEventSemClear = true;
532 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
533 pReq->enmType = enmType;
534 pReq->idDstCpu = idDstCpu;
535
536 *ppReq = pReq;
537 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocNew);
538 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p new\n", pReq));
539 return VINF_SUCCESS;
540}
541
542
543/**
544 * Free a request packet.
545 *
546 * @returns VBox status code.
547 *
548 * @param pReq Package to free.
549 * @remark The request packet must be in allocated or completed state!
550 */
551VMMR3DECL(int) VMR3ReqFree(PVMREQ pReq)
552{
553 /*
554 * Ignore NULL (all free functions should do this imho).
555 */
556 if (!pReq)
557 return VINF_SUCCESS;
558
559 /*
560 * Check packet state.
561 */
562 switch (pReq->enmState)
563 {
564 case VMREQSTATE_ALLOCATED:
565 case VMREQSTATE_COMPLETED:
566 break;
567 default:
568 AssertMsgFailed(("Invalid state %d!\n", pReq->enmState));
569 return VERR_VM_REQUEST_STATE;
570 }
571
572 /*
573 * Make it a free packet and put it into one of the free packet lists.
574 */
575 pReq->enmState = VMREQSTATE_FREE;
576 pReq->iStatus = VERR_VM_REQUEST_STATUS_FREED;
577 pReq->enmType = VMREQTYPE_INVALID;
578
579 PUVM pUVM = pReq->pUVM;
580 STAM_COUNTER_INC(&pUVM->vm.s.StatReqFree);
581
582 if (pUVM->vm.s.cReqFree < 128)
583 {
584 ASMAtomicIncU32(&pUVM->vm.s.cReqFree);
585 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
586 PVMREQ pNext;
587 do
588 {
589 pNext = (PVMREQ)ASMAtomicUoReadPtr((void * volatile *)ppHead);
590 ASMAtomicWritePtr((void * volatile *)&pReq->pNext, pNext);
591 ASMCompilerBarrier();
592 } while (!ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (void *)pReq, (void *)pNext));
593 }
594 else
595 {
596 STAM_COUNTER_INC(&pReq->pUVM->vm.s.StatReqFreeOverflow);
597 RTSemEventDestroy(pReq->EventSem);
598 MMR3HeapFree(pReq);
599 }
600 return VINF_SUCCESS;
601}
602
603
604/**
605 * Queue a request.
606 *
607 * The quest must be allocated using VMR3ReqAlloc() and contain
608 * all the required data.
609 * If it's desired to poll on the completion of the request set cMillies
610 * to 0 and use VMR3ReqWait() to check for completation. In the other case
611 * use RT_INDEFINITE_WAIT.
612 *
613 * @returns VBox status code.
614 * Will not return VERR_INTERRUPTED.
615 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
616 *
617 * @param pReq The request to queue.
618 * @param cMillies Number of milliseconds to wait for the request to
619 * be completed. Use RT_INDEFINITE_WAIT to only
620 * wait till it's completed.
621 */
622VMMR3DECL(int) VMR3ReqQueue(PVMREQ pReq, unsigned cMillies)
623{
624 LogFlow(("VMR3ReqQueue: pReq=%p cMillies=%d\n", pReq, cMillies));
625 /*
626 * Verify the supplied package.
627 */
628 AssertMsgReturn(pReq->enmState == VMREQSTATE_ALLOCATED, ("%d\n", pReq->enmState), VERR_VM_REQUEST_STATE);
629 AssertMsgReturn( VALID_PTR(pReq->pUVM)
630 && !pReq->pNext
631 && pReq->EventSem != NIL_RTSEMEVENT,
632 ("Invalid request package! Anyone cooking their own packages???\n"),
633 VERR_VM_REQUEST_INVALID_PACKAGE);
634 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
635 && pReq->enmType < VMREQTYPE_MAX,
636 ("Invalid package type %d valid range %d-%d inclusivly. This was verified on alloc too...\n",
637 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
638 VERR_VM_REQUEST_INVALID_TYPE);
639 Assert(!(pReq->fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE)));
640
641 /*
642 * Are we the EMT or not?
643 * Also, store pVM (and fFlags) locally since pReq may be invalid after queuing it.
644 */
645 int rc = VINF_SUCCESS;
646 PUVM pUVM = ((VMREQ volatile *)pReq)->pUVM; /* volatile paranoia */
647 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
648
649 if (pReq->idDstCpu == VMCPUID_ALL)
650 {
651 /* One-by-one. */
652 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
653 for (unsigned i = 0; i < pUVM->cCpus; i++)
654 {
655 /* Reinit some members. */
656 pReq->enmState = VMREQSTATE_ALLOCATED;
657 pReq->idDstCpu = i;
658 rc = VMR3ReqQueue(pReq, cMillies);
659 if (RT_FAILURE(rc))
660 break;
661 }
662 }
663 else if (pReq->idDstCpu == VMCPUID_ALL_REVERSE)
664 {
665 /* One-by-one. */
666 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
667 for (int i = pUVM->cCpus-1; i >= 0; i--)
668 {
669 /* Reinit some members. */
670 pReq->enmState = VMREQSTATE_ALLOCATED;
671 pReq->idDstCpu = i;
672 rc = VMR3ReqQueue(pReq, cMillies);
673 if (RT_FAILURE(rc))
674 break;
675 }
676 }
677 else if ( pReq->idDstCpu != VMCPUID_ANY /* for a specific VMCPU? */
678 && pReq->idDstCpu != VMCPUID_ANY_QUEUE
679 && ( !pUVCpu /* and it's not the current thread. */
680 || pUVCpu->idCpu != pReq->idDstCpu))
681 {
682 VMCPUID idTarget = pReq->idDstCpu; Assert(idTarget < pUVM->cCpus);
683 PVMCPU pVCpu = &pUVM->pVM->aCpus[idTarget];
684 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
685
686 /* Fetch the right UVMCPU */
687 pUVCpu = &pUVM->aCpus[idTarget];
688
689 /*
690 * Insert it.
691 */
692 pReq->enmState = VMREQSTATE_QUEUED;
693 PVMREQ pNext;
694 do
695 {
696 pNext = (PVMREQ)ASMAtomicUoReadPtr((void * volatile *)&pUVCpu->vm.s.pReqs);
697 ASMAtomicWritePtr((void * volatile *)&pReq->pNext, pNext);
698 ASMCompilerBarrier();
699 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVCpu->vm.s.pReqs, (void *)pReq, (void *)pNext));
700
701 /*
702 * Notify EMT.
703 */
704 if (pUVM->pVM)
705 VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
706 VMR3NotifyCpuFFU(pUVCpu, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
707
708 /*
709 * Wait and return.
710 */
711 if (!(fFlags & VMREQFLAGS_NO_WAIT))
712 rc = VMR3ReqWait(pReq, cMillies);
713 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
714 }
715 else if ( ( pReq->idDstCpu == VMCPUID_ANY
716 && !pUVCpu /* only EMT threads have a valid pointer stored in the TLS slot. */)
717 || pReq->idDstCpu == VMCPUID_ANY_QUEUE)
718 {
719 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
720
721 Assert(pReq->idDstCpu != VMCPUID_ANY_QUEUE || pUVCpu);
722
723 /*
724 * Insert it.
725 */
726 pReq->enmState = VMREQSTATE_QUEUED;
727 PVMREQ pNext;
728 do
729 {
730 pNext = (PVMREQ)ASMAtomicUoReadPtr((void * volatile *)&pUVM->vm.s.pReqs);
731 ASMAtomicWritePtr((void * volatile *)&pReq->pNext, pNext);
732 ASMCompilerBarrier();
733 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVM->vm.s.pReqs, (void *)pReq, (void *)pNext));
734
735 /*
736 * Notify EMT.
737 */
738 if (pUVM->pVM)
739 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
740 VMR3NotifyGlobalFFU(pUVM, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
741
742 /*
743 * Wait and return.
744 */
745 if (!(fFlags & VMREQFLAGS_NO_WAIT))
746 rc = VMR3ReqWait(pReq, cMillies);
747 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
748 }
749 else
750 {
751 Assert(pUVCpu);
752
753 /*
754 * The requester was an EMT, just execute it.
755 */
756 pReq->enmState = VMREQSTATE_QUEUED;
757 rc = vmR3ReqProcessOneU(pUVM, pReq);
758 LogFlow(("VMR3ReqQueue: returns %Rrc (processed)\n", rc));
759 }
760 return rc;
761}
762
763
764/**
765 * Wait for a request to be completed.
766 *
767 * @returns VBox status code.
768 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
769 *
770 * @param pReq The request to wait for.
771 * @param cMillies Number of milliseconds to wait.
772 * Use RT_INDEFINITE_WAIT to only wait till it's completed.
773 */
774VMMR3DECL(int) VMR3ReqWait(PVMREQ pReq, unsigned cMillies)
775{
776 LogFlow(("VMR3ReqWait: pReq=%p cMillies=%d\n", pReq, cMillies));
777
778 /*
779 * Verify the supplied package.
780 */
781 AssertMsgReturn( pReq->enmState == VMREQSTATE_QUEUED
782 || pReq->enmState == VMREQSTATE_PROCESSING
783 || pReq->enmState == VMREQSTATE_COMPLETED,
784 ("Invalid state %d\n", pReq->enmState),
785 VERR_VM_REQUEST_STATE);
786 AssertMsgReturn( VALID_PTR(pReq->pUVM)
787 && pReq->EventSem != NIL_RTSEMEVENT,
788 ("Invalid request package! Anyone cooking their own packages???\n"),
789 VERR_VM_REQUEST_INVALID_PACKAGE);
790 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
791 && pReq->enmType < VMREQTYPE_MAX,
792 ("Invalid package type %d valid range %d-%d inclusivly. This was verified on alloc too...\n",
793 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
794 VERR_VM_REQUEST_INVALID_TYPE);
795
796 /*
797 * Check for deadlock condition
798 */
799 PUVM pUVM = pReq->pUVM;
800 NOREF(pUVM);
801
802 /*
803 * Wait on the package.
804 */
805 int rc;
806 if (cMillies != RT_INDEFINITE_WAIT)
807 rc = RTSemEventWait(pReq->EventSem, cMillies);
808 else
809 {
810 do
811 {
812 rc = RTSemEventWait(pReq->EventSem, RT_INDEFINITE_WAIT);
813 Assert(rc != VERR_TIMEOUT);
814 } while ( pReq->enmState != VMREQSTATE_COMPLETED
815 && pReq->enmState != VMREQSTATE_INVALID);
816 }
817 if (RT_SUCCESS(rc))
818 ASMAtomicXchgSize(&pReq->fEventSemClear, true);
819 if (pReq->enmState == VMREQSTATE_COMPLETED)
820 rc = VINF_SUCCESS;
821 LogFlow(("VMR3ReqWait: returns %Rrc\n", rc));
822 Assert(rc != VERR_INTERRUPTED);
823 return rc;
824}
825
826
827/**
828 * Process pending request(s).
829 *
830 * This function is called from a forced action handler in the EMT
831 * or from one of the EMT loops.
832 *
833 * @returns VBox status code.
834 *
835 * @param pUVM Pointer to the user mode VM structure.
836 * @param idDstCpu Pass VMCPUID_ANY to process the common request queue
837 * and the CPU ID for a CPU specific one. In the latter
838 * case the calling thread must be the EMT of that CPU.
839 *
840 * @note SMP safe (multiple EMTs trying to satisfy VM_FF_REQUESTs).
841 */
842VMMR3DECL(int) VMR3ReqProcessU(PUVM pUVM, VMCPUID idDstCpu)
843{
844 LogFlow(("VMR3ReqProcessU: (enmVMState=%d) idDstCpu=%d\n", pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING, idDstCpu));
845
846 /*
847 * Process loop.
848 *
849 * We do not repeat the outer loop if we've got an informational status code
850 * since that code needs processing by our caller.
851 */
852 int rc = VINF_SUCCESS;
853 while (rc <= VINF_SUCCESS)
854 {
855 /*
856 * Get pending requests.
857 */
858 void * volatile *ppReqs;
859 if (idDstCpu == VMCPUID_ANY)
860 {
861 ppReqs = (void * volatile *)&pUVM->vm.s.pReqs;
862 if (RT_LIKELY(pUVM->pVM))
863 VM_FF_CLEAR(pUVM->pVM, VM_FF_REQUEST);
864 }
865 else
866 {
867 Assert(idDstCpu < pUVM->cCpus);
868 Assert(pUVM->aCpus[idDstCpu].vm.s.NativeThreadEMT == RTThreadNativeSelf());
869 ppReqs = (void * volatile *)&pUVM->aCpus[idDstCpu].vm.s.pReqs;
870 if (RT_LIKELY(pUVM->pVM))
871 VMCPU_FF_CLEAR(&pUVM->pVM->aCpus[idDstCpu], VMCPU_FF_REQUEST);
872 }
873 PVMREQ pReqs = (PVMREQ)ASMAtomicXchgPtr(ppReqs, NULL);
874 if (!pReqs)
875 break;
876
877 /*
878 * Reverse the list to process it in FIFO order.
879 */
880 PVMREQ pReq = pReqs;
881 if (pReq->pNext)
882 Log2(("VMR3ReqProcess: 2+ requests: %p %p %p\n", pReq, pReq->pNext, pReq->pNext->pNext));
883 pReqs = NULL;
884 while (pReq)
885 {
886 Assert(pReq->enmState == VMREQSTATE_QUEUED);
887 Assert(pReq->pUVM == pUVM);
888 PVMREQ pCur = pReq;
889 pReq = pReq->pNext;
890 pCur->pNext = pReqs;
891 pReqs = pCur;
892 }
893
894
895 /*
896 * Process the requests.
897 *
898 * Since this is a FF worker certain rules applies to the
899 * status codes. See the EM section in VBox/err.h and EM.cpp for details.
900 */
901 while (pReqs)
902 {
903 /* Unchain the first request and advance the list. */
904 pReq = pReqs;
905 pReqs = pReqs->pNext;
906 pReq->pNext = NULL;
907
908 /* Process the request */
909 int rc2 = vmR3ReqProcessOneU(pUVM, pReq);
910
911 /*
912 * The status code handling extremely important yet very fragile. Should probably
913 * look for a better way of communicating status changes to EM...
914 */
915 if ( rc2 >= VINF_EM_FIRST
916 && rc2 <= VINF_EM_LAST
917 && ( rc == VINF_SUCCESS
918 || rc2 < rc) )
919 rc = rc2;
920 }
921 }
922
923 LogFlow(("VMR3ReqProcess: returns %Rrc (enmVMState=%d)\n", rc, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
924 return rc;
925}
926
927
928/**
929 * Process one request.
930 *
931 * @returns VBox status code.
932 *
933 * @param pVM VM handle.
934 * @param pReq Request packet to process.
935 */
936static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq)
937{
938 LogFlow(("vmR3ReqProcessOne: pReq=%p type=%d fFlags=%#x\n", pReq, pReq->enmType, pReq->fFlags));
939
940 /*
941 * Process the request.
942 */
943 Assert(pReq->enmState == VMREQSTATE_QUEUED);
944 pReq->enmState = VMREQSTATE_PROCESSING;
945 int rcRet = VINF_SUCCESS; /* the return code of this function. */
946 int rcReq = VERR_NOT_IMPLEMENTED; /* the request status. */
947 switch (pReq->enmType)
948 {
949 /*
950 * A packed down call frame.
951 */
952 case VMREQTYPE_INTERNAL:
953 {
954 uintptr_t *pauArgs = &pReq->u.Internal.aArgs[0];
955 union
956 {
957 PFNRT pfn;
958 DECLCALLBACKMEMBER(int, pfn00)(void);
959 DECLCALLBACKMEMBER(int, pfn01)(uintptr_t);
960 DECLCALLBACKMEMBER(int, pfn02)(uintptr_t, uintptr_t);
961 DECLCALLBACKMEMBER(int, pfn03)(uintptr_t, uintptr_t, uintptr_t);
962 DECLCALLBACKMEMBER(int, pfn04)(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
963 DECLCALLBACKMEMBER(int, pfn05)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
964 DECLCALLBACKMEMBER(int, pfn06)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
965 DECLCALLBACKMEMBER(int, pfn07)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
966 DECLCALLBACKMEMBER(int, pfn08)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
967 DECLCALLBACKMEMBER(int, pfn09)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
968 DECLCALLBACKMEMBER(int, pfn10)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
969 DECLCALLBACKMEMBER(int, pfn11)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
970 DECLCALLBACKMEMBER(int, pfn12)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
971 } u;
972 u.pfn = pReq->u.Internal.pfn;
973#ifdef RT_ARCH_AMD64
974 switch (pReq->u.Internal.cArgs)
975 {
976 case 0: rcRet = u.pfn00(); break;
977 case 1: rcRet = u.pfn01(pauArgs[0]); break;
978 case 2: rcRet = u.pfn02(pauArgs[0], pauArgs[1]); break;
979 case 3: rcRet = u.pfn03(pauArgs[0], pauArgs[1], pauArgs[2]); break;
980 case 4: rcRet = u.pfn04(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3]); break;
981 case 5: rcRet = u.pfn05(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4]); break;
982 case 6: rcRet = u.pfn06(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5]); break;
983 case 7: rcRet = u.pfn07(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6]); break;
984 case 8: rcRet = u.pfn08(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7]); break;
985 case 9: rcRet = u.pfn09(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8]); break;
986 case 10: rcRet = u.pfn10(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9]); break;
987 case 11: rcRet = u.pfn11(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10]); break;
988 case 12: rcRet = u.pfn12(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11]); break;
989 default:
990 AssertReleaseMsgFailed(("cArgs=%d\n", pReq->u.Internal.cArgs));
991 rcRet = rcReq = VERR_INTERNAL_ERROR;
992 break;
993 }
994#else /* x86: */
995 size_t cbArgs = pReq->u.Internal.cArgs * sizeof(uintptr_t);
996# ifdef __GNUC__
997 __asm__ __volatile__("movl %%esp, %%edx\n\t"
998 "subl %2, %%esp\n\t"
999 "andl $0xfffffff0, %%esp\n\t"
1000 "shrl $2, %2\n\t"
1001 "movl %%esp, %%edi\n\t"
1002 "rep movsl\n\t"
1003 "movl %%edx, %%edi\n\t"
1004 "call *%%eax\n\t"
1005 "mov %%edi, %%esp\n\t"
1006 : "=a" (rcRet),
1007 "=S" (pauArgs),
1008 "=c" (cbArgs)
1009 : "0" (u.pfn),
1010 "1" (pauArgs),
1011 "2" (cbArgs)
1012 : "edi", "edx");
1013# else
1014 __asm
1015 {
1016 xor edx, edx /* just mess it up. */
1017 mov eax, u.pfn
1018 mov ecx, cbArgs
1019 shr ecx, 2
1020 mov esi, pauArgs
1021 mov ebx, esp
1022 sub esp, cbArgs
1023 and esp, 0xfffffff0
1024 mov edi, esp
1025 rep movsd
1026 call eax
1027 mov esp, ebx
1028 mov rcRet, eax
1029 }
1030# endif
1031#endif /* x86 */
1032 if ((pReq->fFlags & (VMREQFLAGS_RETURN_MASK)) == VMREQFLAGS_VOID)
1033 rcRet = VINF_SUCCESS;
1034 rcReq = rcRet;
1035 break;
1036 }
1037
1038 default:
1039 AssertMsgFailed(("pReq->enmType=%d\n", pReq->enmType));
1040 rcReq = VERR_NOT_IMPLEMENTED;
1041 break;
1042 }
1043
1044 /*
1045 * Complete the request.
1046 */
1047 pReq->iStatus = rcReq;
1048 pReq->enmState = VMREQSTATE_COMPLETED;
1049 if (pReq->fFlags & VMREQFLAGS_NO_WAIT)
1050 {
1051 /* Free the packet, nobody is waiting. */
1052 LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - freeing it\n",
1053 pReq, rcReq, rcRet));
1054 VMR3ReqFree(pReq);
1055 }
1056 else
1057 {
1058 /* Notify the waiter and him free up the packet. */
1059 LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - notifying waiting thread\n",
1060 pReq, rcReq, rcRet));
1061 ASMAtomicXchgSize(&pReq->fEventSemClear, false);
1062 int rc2 = RTSemEventSignal(pReq->EventSem);
1063 if (RT_FAILURE(rc2))
1064 {
1065 AssertRC(rc2);
1066 rcRet = rc2;
1067 }
1068 }
1069 return rcRet;
1070}
1071
1072
1073
1074
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette