VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMReq.cpp@ 80191

Last change on this file since 80191 was 80191, checked in by vboxsync, 5 years ago

VMM/r3: Refactored VMCPU enumeration in preparation that aCpus will be replaced with a pointer array. Removed two raw-mode offset members from the CPUM and CPUMCPU sub-structures. bugref:9217 bugref:9517

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 50.0 KB
Line 
1/* $Id: VMReq.cpp 80191 2019-08-08 00:36:57Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_BUGREF_9217_PART_I
23#define LOG_GROUP LOG_GROUP_VM
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/vmm.h>
26#include "VMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/uvm.h>
29
30#include <VBox/err.h>
31#include <VBox/param.h>
32#include <VBox/log.h>
33#include <iprt/assert.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/time.h>
37#include <iprt/semaphore.h>
38#include <iprt/thread.h>
39
40
41/*********************************************************************************************************************************
42* Internal Functions *
43*********************************************************************************************************************************/
44static int vmR3ReqProcessOne(PVMREQ pReq);
45
46
47/**
48 * Convenience wrapper for VMR3ReqCallU.
49 *
50 * This assumes (1) you're calling a function that returns an VBox status code,
51 * (2) that you want it's return code on success, and (3) that you wish to wait
52 * for ever for it to return.
53 *
54 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
55 * its status code is return. Otherwise, the status of pfnFunction is
56 * returned.
57 *
58 * @param pVM The cross context VM structure.
59 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
60 * one of the following special values:
61 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
62 * @param pfnFunction Pointer to the function to call.
63 * @param cArgs Number of arguments following in the ellipsis.
64 * @param ... Function arguments.
65 *
66 * @remarks See remarks on VMR3ReqCallVU.
67 * @internal
68 */
69VMMR3_INT_DECL(int) VMR3ReqCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
70{
71 PVMREQ pReq;
72 va_list va;
73 va_start(va, cArgs);
74 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
75 pfnFunction, cArgs, va);
76 va_end(va);
77 if (RT_SUCCESS(rc))
78 rc = pReq->iStatus;
79 VMR3ReqFree(pReq);
80 return rc;
81}
82
83
84/**
85 * Convenience wrapper for VMR3ReqCallU.
86 *
87 * This assumes (1) you're calling a function that returns an VBox status code,
88 * (2) that you want it's return code on success, and (3) that you wish to wait
89 * for ever for it to return.
90 *
91 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
92 * its status code is return. Otherwise, the status of pfnFunction is
93 * returned.
94 *
95 * @param pUVM The user mode VM structure.
96 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
97 * one of the following special values:
98 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
99 * @param pfnFunction Pointer to the function to call.
100 * @param cArgs Number of arguments following in the ellipsis.
101 * @param ... Function arguments.
102 *
103 * @remarks See remarks on VMR3ReqCallVU.
104 * @internal
105 */
106VMMR3DECL(int) VMR3ReqCallWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
107{
108 PVMREQ pReq;
109 va_list va;
110 va_start(va, cArgs);
111 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
112 pfnFunction, cArgs, va);
113 va_end(va);
114 if (RT_SUCCESS(rc))
115 rc = pReq->iStatus;
116 VMR3ReqFree(pReq);
117 return rc;
118}
119
120
121/**
122 * Convenience wrapper for VMR3ReqCallU.
123 *
124 * This assumes (1) you're calling a function that returns an VBox status code
125 * and that you do not wish to wait for it to complete.
126 *
127 * @returns VBox status code returned by VMR3ReqCallVU.
128 *
129 * @param pVM The cross context VM structure.
130 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
131 * one of the following special values:
132 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
133 * @param pfnFunction Pointer to the function to call.
134 * @param cArgs Number of arguments following in the ellipsis.
135 * @param ... Function arguments.
136 *
137 * @remarks See remarks on VMR3ReqCallVU.
138 * @internal
139 */
140VMMR3DECL(int) VMR3ReqCallNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
141{
142 va_list va;
143 va_start(va, cArgs);
144 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
145 pfnFunction, cArgs, va);
146 va_end(va);
147 return rc;
148}
149
150
151/**
152 * Convenience wrapper for VMR3ReqCallU.
153 *
154 * This assumes (1) you're calling a function that returns an VBox status code
155 * and that you do not wish to wait for it to complete.
156 *
157 * @returns VBox status code returned by VMR3ReqCallVU.
158 *
159 * @param pUVM Pointer to the VM.
160 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
161 * one of the following special values:
162 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
163 * @param pfnFunction Pointer to the function to call.
164 * @param cArgs Number of arguments following in the ellipsis.
165 * @param ... Function arguments.
166 *
167 * @remarks See remarks on VMR3ReqCallVU.
168 */
169VMMR3DECL(int) VMR3ReqCallNoWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
170{
171 va_list va;
172 va_start(va, cArgs);
173 int rc = VMR3ReqCallVU(pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
174 pfnFunction, cArgs, va);
175 va_end(va);
176 return rc;
177}
178
179
180/**
181 * Convenience wrapper for VMR3ReqCallU.
182 *
183 * This assumes (1) you're calling a function that returns void, and (2) that
184 * you wish to wait for ever for it to return.
185 *
186 * @returns VBox status code of VMR3ReqCallVU.
187 *
188 * @param pVM The cross context VM structure.
189 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
190 * one of the following special values:
191 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
192 * @param pfnFunction Pointer to the function to call.
193 * @param cArgs Number of arguments following in the ellipsis.
194 * @param ... Function arguments.
195 *
196 * @remarks See remarks on VMR3ReqCallVU.
197 * @internal
198 */
199VMMR3_INT_DECL(int) VMR3ReqCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
200{
201 PVMREQ pReq;
202 va_list va;
203 va_start(va, cArgs);
204 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
205 pfnFunction, cArgs, va);
206 va_end(va);
207 VMR3ReqFree(pReq);
208 return rc;
209}
210
211
212/**
213 * Convenience wrapper for VMR3ReqCallU.
214 *
215 * This assumes (1) you're calling a function that returns void, and (2) that
216 * you wish to wait for ever for it to return.
217 *
218 * @returns VBox status code of VMR3ReqCallVU.
219 *
220 * @param pUVM Pointer to the VM.
221 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
222 * one of the following special values:
223 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
224 * @param pfnFunction Pointer to the function to call.
225 * @param cArgs Number of arguments following in the ellipsis.
226 * @param ... Function arguments.
227 *
228 * @remarks See remarks on VMR3ReqCallVU.
229 */
230VMMR3DECL(int) VMR3ReqCallVoidWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
231{
232 PVMREQ pReq;
233 va_list va;
234 va_start(va, cArgs);
235 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
236 pfnFunction, cArgs, va);
237 va_end(va);
238 VMR3ReqFree(pReq);
239 return rc;
240}
241
242
243/**
244 * Convenience wrapper for VMR3ReqCallU.
245 *
246 * This assumes (1) you're calling a function that returns void, and (2) that
247 * you do not wish to wait for it to complete.
248 *
249 * @returns VBox status code of VMR3ReqCallVU.
250 *
251 * @param pVM The cross context VM structure.
252 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
253 * one of the following special values:
254 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
255 * @param pfnFunction Pointer to the function to call.
256 * @param cArgs Number of arguments following in the ellipsis.
257 * @param ... Function arguments.
258 *
259 * @remarks See remarks on VMR3ReqCallVU.
260 * @internal
261 */
262VMMR3DECL(int) VMR3ReqCallVoidNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
263{
264 PVMREQ pReq;
265 va_list va;
266 va_start(va, cArgs);
267 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_NO_WAIT,
268 pfnFunction, cArgs, va);
269 va_end(va);
270 VMR3ReqFree(pReq);
271 return rc;
272}
273
274
275/**
276 * Convenience wrapper for VMR3ReqCallU.
277 *
278 * This assumes (1) you're calling a function that returns an VBox status code,
279 * (2) that you want it's return code on success, (3) that you wish to wait for
280 * ever for it to return, and (4) that it's priority request that can be safely
281 * be handled during async suspend and power off.
282 *
283 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
284 * its status code is return. Otherwise, the status of pfnFunction is
285 * returned.
286 *
287 * @param pVM The cross context VM structure.
288 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
289 * one of the following special values:
290 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
291 * @param pfnFunction Pointer to the function to call.
292 * @param cArgs Number of arguments following in the ellipsis.
293 * @param ... Function arguments.
294 *
295 * @remarks See remarks on VMR3ReqCallVU.
296 * @internal
297 */
298VMMR3DECL(int) VMR3ReqPriorityCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
299{
300 PVMREQ pReq;
301 va_list va;
302 va_start(va, cArgs);
303 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_PRIORITY,
304 pfnFunction, cArgs, va);
305 va_end(va);
306 if (RT_SUCCESS(rc))
307 rc = pReq->iStatus;
308 VMR3ReqFree(pReq);
309 return rc;
310}
311
312
313/**
314 * Convenience wrapper for VMR3ReqCallU.
315 *
316 * This assumes (1) you're calling a function that returns an VBox status code,
317 * (2) that you want it's return code on success, (3) that you wish to wait for
318 * ever for it to return, and (4) that it's priority request that can be safely
319 * be handled during async suspend and power off.
320 *
321 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
322 * its status code is return. Otherwise, the status of pfnFunction is
323 * returned.
324 *
325 * @param pUVM The user mode VM handle.
326 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
327 * one of the following special values:
328 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
329 * @param pfnFunction Pointer to the function to call.
330 * @param cArgs Number of arguments following in the ellipsis.
331 * @param ... Function arguments.
332 *
333 * @remarks See remarks on VMR3ReqCallVU.
334 */
335VMMR3DECL(int) VMR3ReqPriorityCallWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
336{
337 PVMREQ pReq;
338 va_list va;
339 va_start(va, cArgs);
340 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_PRIORITY,
341 pfnFunction, cArgs, va);
342 va_end(va);
343 if (RT_SUCCESS(rc))
344 rc = pReq->iStatus;
345 VMR3ReqFree(pReq);
346 return rc;
347}
348
349
350/**
351 * Convenience wrapper for VMR3ReqCallU.
352 *
353 * This assumes (1) you're calling a function that returns void, (2) that you
354 * wish to wait for ever for it to return, and (3) that it's priority request
355 * that can be safely be handled during async suspend and power off.
356 *
357 * @returns VBox status code of VMR3ReqCallVU.
358 *
359 * @param pUVM The user mode VM handle.
360 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
361 * one of the following special values:
362 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
363 * @param pfnFunction Pointer to the function to call.
364 * @param cArgs Number of arguments following in the ellipsis.
365 * @param ... Function arguments.
366 *
367 * @remarks See remarks on VMR3ReqCallVU.
368 */
369VMMR3DECL(int) VMR3ReqPriorityCallVoidWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
370{
371 PVMREQ pReq;
372 va_list va;
373 va_start(va, cArgs);
374 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_PRIORITY,
375 pfnFunction, cArgs, va);
376 va_end(va);
377 VMR3ReqFree(pReq);
378 return rc;
379}
380
381
382/**
383 * Allocate and queue a call request to a void function.
384 *
385 * If it's desired to poll on the completion of the request set cMillies
386 * to 0 and use VMR3ReqWait() to check for completion. In the other case
387 * use RT_INDEFINITE_WAIT.
388 * The returned request packet must be freed using VMR3ReqFree().
389 *
390 * @returns VBox status code.
391 * Will not return VERR_INTERRUPTED.
392 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
393 *
394 * @param pUVM Pointer to the user mode VM structure.
395 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
396 * one of the following special values:
397 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
398 * @param ppReq Where to store the pointer to the request.
399 * This will be NULL or a valid request pointer not matter what happens, unless fFlags
400 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
401 * @param cMillies Number of milliseconds to wait for the request to
402 * be completed. Use RT_INDEFINITE_WAIT to only
403 * wait till it's completed.
404 * @param fFlags A combination of the VMREQFLAGS values.
405 * @param pfnFunction Pointer to the function to call.
406 * @param cArgs Number of arguments following in the ellipsis.
407 * @param ... Function arguments.
408 *
409 * @remarks See remarks on VMR3ReqCallVU.
410 */
411VMMR3DECL(int) VMR3ReqCallU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
412 PFNRT pfnFunction, unsigned cArgs, ...)
413{
414 va_list va;
415 va_start(va, cArgs);
416 int rc = VMR3ReqCallVU(pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
417 va_end(va);
418 return rc;
419}
420
421
422/**
423 * Allocate and queue a call request.
424 *
425 * If it's desired to poll on the completion of the request set cMillies
426 * to 0 and use VMR3ReqWait() to check for completion. In the other case
427 * use RT_INDEFINITE_WAIT.
428 * The returned request packet must be freed using VMR3ReqFree().
429 *
430 * @returns VBox status code.
431 * Will not return VERR_INTERRUPTED.
432 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
433 *
434 * @param pUVM Pointer to the user mode VM structure.
435 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
436 * one of the following special values:
437 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
438 * @param ppReq Where to store the pointer to the request.
439 * This will be NULL or a valid request pointer not matter what happens, unless fFlags
440 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
441 * @param cMillies Number of milliseconds to wait for the request to
442 * be completed. Use RT_INDEFINITE_WAIT to only
443 * wait till it's completed.
444 * @param pfnFunction Pointer to the function to call.
445 * @param fFlags A combination of the VMREQFLAGS values.
446 * @param cArgs Number of arguments following in the ellipsis.
447 * Stuff which differs in size from uintptr_t is gonna make trouble, so don't try!
448 * @param Args Argument vector.
449 *
450 * @remarks Caveats:
451 * - Do not pass anything which is larger than an uintptr_t.
452 * - 64-bit integers are larger than uintptr_t on 32-bit hosts.
453 * Pass integers > 32-bit by reference (pointers).
454 * - Don't use NULL since it should be the integer 0 in C++ and may
455 * therefore end up with garbage in the bits 63:32 on 64-bit
456 * hosts because 'int' is 32-bit.
457 * Use (void *)NULL or (uintptr_t)0 instead of NULL.
458 */
459VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
460 PFNRT pfnFunction, unsigned cArgs, va_list Args)
461{
462 LogFlow(("VMR3ReqCallV: idDstCpu=%u cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", idDstCpu, cMillies, fFlags, pfnFunction, cArgs));
463
464 /*
465 * Validate input.
466 */
467 AssertPtrReturn(pfnFunction, VERR_INVALID_POINTER);
468 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
469 AssertReturn(!(fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE | VMREQFLAGS_PRIORITY)), VERR_INVALID_PARAMETER);
470 if (!(fFlags & VMREQFLAGS_NO_WAIT) || ppReq)
471 {
472 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
473 *ppReq = NULL;
474 }
475 PVMREQ pReq = NULL;
476 AssertMsgReturn(cArgs * sizeof(uintptr_t) <= sizeof(pReq->u.Internal.aArgs),
477 ("cArg=%d\n", cArgs),
478 VERR_TOO_MUCH_DATA);
479
480 /*
481 * Allocate request
482 */
483 int rc = VMR3ReqAlloc(pUVM, &pReq, VMREQTYPE_INTERNAL, idDstCpu);
484 if (RT_FAILURE(rc))
485 return rc;
486
487 /*
488 * Initialize the request data.
489 */
490 pReq->fFlags = fFlags;
491 pReq->u.Internal.pfn = pfnFunction;
492 pReq->u.Internal.cArgs = cArgs;
493 for (unsigned iArg = 0; iArg < cArgs; iArg++)
494 pReq->u.Internal.aArgs[iArg] = va_arg(Args, uintptr_t);
495
496 /*
497 * Queue the request and return.
498 */
499 rc = VMR3ReqQueue(pReq, cMillies);
500 if ( RT_FAILURE(rc)
501 && rc != VERR_TIMEOUT)
502 {
503 VMR3ReqFree(pReq);
504 pReq = NULL;
505 }
506 if (!(fFlags & VMREQFLAGS_NO_WAIT))
507 {
508 *ppReq = pReq;
509 LogFlow(("VMR3ReqCallV: returns %Rrc *ppReq=%p\n", rc, pReq));
510 }
511 else
512 LogFlow(("VMR3ReqCallV: returns %Rrc\n", rc));
513 Assert(rc != VERR_INTERRUPTED);
514 return rc;
515}
516
517
518/**
519 * Joins the list pList with whatever is linked up at *pHead.
520 */
521static void vmr3ReqJoinFreeSub(volatile PVMREQ *ppHead, PVMREQ pList)
522{
523 for (unsigned cIterations = 0;; cIterations++)
524 {
525 PVMREQ pHead = ASMAtomicXchgPtrT(ppHead, pList, PVMREQ);
526 if (!pHead)
527 return;
528 PVMREQ pTail = pHead;
529 while (pTail->pNext)
530 pTail = pTail->pNext;
531 ASMAtomicWritePtr(&pTail->pNext, pList);
532 ASMCompilerBarrier();
533 if (ASMAtomicCmpXchgPtr(ppHead, pHead, pList))
534 return;
535 ASMAtomicWriteNullPtr(&pTail->pNext);
536 ASMCompilerBarrier();
537 if (ASMAtomicCmpXchgPtr(ppHead, pHead, NULL))
538 return;
539 pList = pHead;
540 Assert(cIterations != 32);
541 Assert(cIterations != 64);
542 }
543}
544
545
546/**
547 * Joins the list pList with whatever is linked up at *pHead.
548 */
549static void vmr3ReqJoinFree(PVMINTUSERPERVM pVMInt, PVMREQ pList)
550{
551 /*
552 * Split the list if it's too long.
553 */
554 unsigned cReqs = 1;
555 PVMREQ pTail = pList;
556 while (pTail->pNext)
557 {
558 if (cReqs++ > 25)
559 {
560 const uint32_t i = pVMInt->iReqFree;
561 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
562
563 pTail->pNext = NULL;
564 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2 + (i == pVMInt->iReqFree)) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
565 return;
566 }
567 pTail = pTail->pNext;
568 }
569 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(pVMInt->iReqFree + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pList);
570}
571
572
573/**
574 * Allocates a request packet.
575 *
576 * The caller allocates a request packet, fills in the request data
577 * union and queues the request.
578 *
579 * @returns VBox status code.
580 *
581 * @param pUVM Pointer to the user mode VM structure.
582 * @param ppReq Where to store the pointer to the allocated packet.
583 * @param enmType Package type.
584 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
585 * one of the following special values:
586 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
587 */
588VMMR3DECL(int) VMR3ReqAlloc(PUVM pUVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
589{
590 /*
591 * Validate input.
592 */
593 AssertMsgReturn(enmType > VMREQTYPE_INVALID && enmType < VMREQTYPE_MAX,
594 ("Invalid package type %d valid range %d-%d inclusively.\n",
595 enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
596 VERR_VM_REQUEST_INVALID_TYPE);
597 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
598 AssertMsgReturn( idDstCpu == VMCPUID_ANY
599 || idDstCpu == VMCPUID_ANY_QUEUE
600 || idDstCpu < pUVM->cCpus
601 || idDstCpu == VMCPUID_ALL
602 || idDstCpu == VMCPUID_ALL_REVERSE,
603 ("Invalid destination %u (max=%u)\n", idDstCpu, pUVM->cCpus), VERR_INVALID_PARAMETER);
604
605 /*
606 * Try get a recycled packet.
607 * While this could all be solved with a single list with a lock, it's a sport
608 * of mine to avoid locks.
609 */
610 int cTries = RT_ELEMENTS(pUVM->vm.s.apReqFree) * 2;
611 while (--cTries >= 0)
612 {
613 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
614#if 0 /* sad, but this won't work safely because the reading of pReq->pNext. */
615 PVMREQ pNext = NULL;
616 PVMREQ pReq = *ppHead;
617 if ( pReq
618 && !ASMAtomicCmpXchgPtr(ppHead, (pNext = pReq->pNext), pReq)
619 && (pReq = *ppHead)
620 && !ASMAtomicCmpXchgPtr(ppHead, (pNext = pReq->pNext), pReq))
621 pReq = NULL;
622 if (pReq)
623 {
624 Assert(pReq->pNext == pNext); NOREF(pReq);
625#else
626 PVMREQ pReq = ASMAtomicXchgPtrT(ppHead, NULL, PVMREQ);
627 if (pReq)
628 {
629 PVMREQ pNext = pReq->pNext;
630 if ( pNext
631 && !ASMAtomicCmpXchgPtr(ppHead, pNext, NULL))
632 {
633 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRaces);
634 vmr3ReqJoinFree(&pUVM->vm.s, pReq->pNext);
635 }
636#endif
637 ASMAtomicDecU32(&pUVM->vm.s.cReqFree);
638
639 /*
640 * Make sure the event sem is not signaled.
641 */
642 if (!pReq->fEventSemClear)
643 {
644 int rc = RTSemEventWait(pReq->EventSem, 0);
645 if (rc != VINF_SUCCESS && rc != VERR_TIMEOUT)
646 {
647 /*
648 * This shall not happen, but if it does we'll just destroy
649 * the semaphore and create a new one.
650 */
651 AssertMsgFailed(("rc=%Rrc from RTSemEventWait(%#x).\n", rc, pReq->EventSem));
652 RTSemEventDestroy(pReq->EventSem);
653 rc = RTSemEventCreate(&pReq->EventSem);
654 AssertRC(rc);
655 if (RT_FAILURE(rc))
656 return rc;
657#if 0 /// @todo @bugref{4725} - def RT_LOCK_STRICT
658 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
659 RTSemEventAddSignaller(pReq->EventSem, pUVM->aCpus[idCpu].vm.s.ThreadEMT);
660#endif
661 }
662 pReq->fEventSemClear = true;
663 }
664 else
665 Assert(RTSemEventWait(pReq->EventSem, 0) == VERR_TIMEOUT);
666
667 /*
668 * Initialize the packet and return it.
669 */
670 Assert(pReq->enmType == VMREQTYPE_INVALID);
671 Assert(pReq->enmState == VMREQSTATE_FREE);
672 Assert(pReq->pUVM == pUVM);
673 ASMAtomicWriteNullPtr(&pReq->pNext);
674 pReq->enmState = VMREQSTATE_ALLOCATED;
675 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
676 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
677 pReq->enmType = enmType;
678 pReq->idDstCpu = idDstCpu;
679
680 *ppReq = pReq;
681 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRecycled);
682 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p recycled\n", pReq));
683 return VINF_SUCCESS;
684 }
685 }
686
687 /*
688 * Ok allocate one.
689 */
690 PVMREQ pReq = (PVMREQ)MMR3HeapAllocU(pUVM, MM_TAG_VM_REQ, sizeof(*pReq));
691 if (!pReq)
692 return VERR_NO_MEMORY;
693
694 /*
695 * Create the semaphore.
696 */
697 int rc = RTSemEventCreate(&pReq->EventSem);
698 AssertRC(rc);
699 if (RT_FAILURE(rc))
700 {
701 MMR3HeapFree(pReq);
702 return rc;
703 }
704#if 0 /// @todo @bugref{4725} - def RT_LOCK_STRICT
705 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
706 RTSemEventAddSignaller(pReq->EventSem, pUVM->aCpus[idCpu].vm.s.ThreadEMT);
707#endif
708
709 /*
710 * Initialize the packet and return it.
711 */
712 pReq->pNext = NULL;
713 pReq->pUVM = pUVM;
714 pReq->enmState = VMREQSTATE_ALLOCATED;
715 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
716 pReq->fEventSemClear = true;
717 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
718 pReq->enmType = enmType;
719 pReq->idDstCpu = idDstCpu;
720
721 *ppReq = pReq;
722 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocNew);
723 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p new\n", pReq));
724 return VINF_SUCCESS;
725}
726
727
728/**
729 * Free a request packet.
730 *
731 * @returns VBox status code.
732 *
733 * @param pReq Package to free.
734 * @remark The request packet must be in allocated or completed state!
735 */
736VMMR3DECL(int) VMR3ReqFree(PVMREQ pReq)
737{
738 /*
739 * Ignore NULL (all free functions should do this imho).
740 */
741 if (!pReq)
742 return VINF_SUCCESS;
743
744 /*
745 * Check packet state.
746 */
747 switch (pReq->enmState)
748 {
749 case VMREQSTATE_ALLOCATED:
750 case VMREQSTATE_COMPLETED:
751 break;
752 default:
753 AssertMsgFailed(("Invalid state %d!\n", pReq->enmState));
754 return VERR_VM_REQUEST_STATE;
755 }
756
757 /*
758 * Make it a free packet and put it into one of the free packet lists.
759 */
760 pReq->enmState = VMREQSTATE_FREE;
761 pReq->iStatus = VERR_VM_REQUEST_STATUS_FREED;
762 pReq->enmType = VMREQTYPE_INVALID;
763
764 PUVM pUVM = pReq->pUVM;
765 STAM_COUNTER_INC(&pUVM->vm.s.StatReqFree);
766
767 if (pUVM->vm.s.cReqFree < 128)
768 {
769 ASMAtomicIncU32(&pUVM->vm.s.cReqFree);
770 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
771 PVMREQ pNext;
772 do
773 {
774 pNext = ASMAtomicUoReadPtrT(ppHead, PVMREQ);
775 ASMAtomicWritePtr(&pReq->pNext, pNext);
776 ASMCompilerBarrier();
777 } while (!ASMAtomicCmpXchgPtr(ppHead, pReq, pNext));
778 }
779 else
780 {
781 STAM_COUNTER_INC(&pReq->pUVM->vm.s.StatReqFreeOverflow);
782 RTSemEventDestroy(pReq->EventSem);
783 MMR3HeapFree(pReq);
784 }
785 return VINF_SUCCESS;
786}
787
788
789/**
790 * Queue a request.
791 *
792 * The quest must be allocated using VMR3ReqAlloc() and contain
793 * all the required data.
794 * If it's desired to poll on the completion of the request set cMillies
795 * to 0 and use VMR3ReqWait() to check for completion. In the other case
796 * use RT_INDEFINITE_WAIT.
797 *
798 * @returns VBox status code.
799 * Will not return VERR_INTERRUPTED.
800 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
801 *
802 * @param pReq The request to queue.
803 * @param cMillies Number of milliseconds to wait for the request to
804 * be completed. Use RT_INDEFINITE_WAIT to only
805 * wait till it's completed.
806 */
807VMMR3DECL(int) VMR3ReqQueue(PVMREQ pReq, RTMSINTERVAL cMillies)
808{
809 LogFlow(("VMR3ReqQueue: pReq=%p cMillies=%d\n", pReq, cMillies));
810 /*
811 * Verify the supplied package.
812 */
813 AssertMsgReturn(pReq->enmState == VMREQSTATE_ALLOCATED, ("%d\n", pReq->enmState), VERR_VM_REQUEST_STATE);
814 AssertMsgReturn( VALID_PTR(pReq->pUVM)
815 && !pReq->pNext
816 && pReq->EventSem != NIL_RTSEMEVENT,
817 ("Invalid request package! Anyone cooking their own packages???\n"),
818 VERR_VM_REQUEST_INVALID_PACKAGE);
819 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
820 && pReq->enmType < VMREQTYPE_MAX,
821 ("Invalid package type %d valid range %d-%d inclusively. This was verified on alloc too...\n",
822 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
823 VERR_VM_REQUEST_INVALID_TYPE);
824 Assert(!(pReq->fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE | VMREQFLAGS_PRIORITY)));
825
826 /*
827 * Are we the EMT or not?
828 * Also, store pVM (and fFlags) locally since pReq may be invalid after queuing it.
829 */
830 int rc = VINF_SUCCESS;
831 PUVM pUVM = ((VMREQ volatile *)pReq)->pUVM; /* volatile paranoia */
832 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
833
834 if (pReq->idDstCpu == VMCPUID_ALL)
835 {
836 /* One-by-one. */
837 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
838 for (unsigned i = 0; i < pUVM->cCpus; i++)
839 {
840 /* Reinit some members. */
841 pReq->enmState = VMREQSTATE_ALLOCATED;
842 pReq->idDstCpu = i;
843 rc = VMR3ReqQueue(pReq, cMillies);
844 if (RT_FAILURE(rc))
845 break;
846 }
847 }
848 else if (pReq->idDstCpu == VMCPUID_ALL_REVERSE)
849 {
850 /* One-by-one. */
851 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
852 for (int i = pUVM->cCpus-1; i >= 0; i--)
853 {
854 /* Reinit some members. */
855 pReq->enmState = VMREQSTATE_ALLOCATED;
856 pReq->idDstCpu = i;
857 rc = VMR3ReqQueue(pReq, cMillies);
858 if (RT_FAILURE(rc))
859 break;
860 }
861 }
862 else if ( pReq->idDstCpu != VMCPUID_ANY /* for a specific VMCPU? */
863 && pReq->idDstCpu != VMCPUID_ANY_QUEUE
864 && ( !pUVCpu /* and it's not the current thread. */
865 || pUVCpu->idCpu != pReq->idDstCpu))
866 {
867 VMCPUID idTarget = pReq->idDstCpu; Assert(idTarget < pUVM->cCpus);
868 PVMCPU pVCpu = pUVM->pVM->apCpusR3[idTarget];
869 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
870
871 /* Fetch the right UVMCPU */
872 pUVCpu = &pUVM->aCpus[idTarget];
873
874 /*
875 * Insert it.
876 */
877 volatile PVMREQ *ppQueueHead = pReq->fFlags & VMREQFLAGS_PRIORITY ? &pUVCpu->vm.s.pPriorityReqs : &pUVCpu->vm.s.pNormalReqs;
878 pReq->enmState = VMREQSTATE_QUEUED;
879 PVMREQ pNext;
880 do
881 {
882 pNext = ASMAtomicUoReadPtrT(ppQueueHead, PVMREQ);
883 ASMAtomicWritePtr(&pReq->pNext, pNext);
884 ASMCompilerBarrier();
885 } while (!ASMAtomicCmpXchgPtr(ppQueueHead, pReq, pNext));
886
887 /*
888 * Notify EMT.
889 */
890 if (pUVM->pVM)
891 VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
892 VMR3NotifyCpuFFU(pUVCpu, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
893
894 /*
895 * Wait and return.
896 */
897 if (!(fFlags & VMREQFLAGS_NO_WAIT))
898 rc = VMR3ReqWait(pReq, cMillies);
899 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
900 }
901 else if ( ( pReq->idDstCpu == VMCPUID_ANY
902 && !pUVCpu /* only EMT threads have a valid pointer stored in the TLS slot. */)
903 || pReq->idDstCpu == VMCPUID_ANY_QUEUE)
904 {
905 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
906
907 /* Note: pUVCpu may or may not be NULL in the VMCPUID_ANY_QUEUE case; we don't care. */
908
909 /*
910 * Insert it.
911 */
912 volatile PVMREQ *ppQueueHead = pReq->fFlags & VMREQFLAGS_PRIORITY ? &pUVM->vm.s.pPriorityReqs : &pUVM->vm.s.pNormalReqs;
913 pReq->enmState = VMREQSTATE_QUEUED;
914 PVMREQ pNext;
915 do
916 {
917 pNext = ASMAtomicUoReadPtrT(ppQueueHead, PVMREQ);
918 ASMAtomicWritePtr(&pReq->pNext, pNext);
919 ASMCompilerBarrier();
920 } while (!ASMAtomicCmpXchgPtr(ppQueueHead, pReq, pNext));
921
922 /*
923 * Notify EMT.
924 */
925 if (pUVM->pVM)
926 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
927 VMR3NotifyGlobalFFU(pUVM, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
928
929 /*
930 * Wait and return.
931 */
932 if (!(fFlags & VMREQFLAGS_NO_WAIT))
933 rc = VMR3ReqWait(pReq, cMillies);
934 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
935 }
936 else
937 {
938 Assert(pUVCpu);
939
940 /*
941 * The requester was an EMT, just execute it.
942 */
943 pReq->enmState = VMREQSTATE_QUEUED;
944 rc = vmR3ReqProcessOne(pReq);
945 LogFlow(("VMR3ReqQueue: returns %Rrc (processed)\n", rc));
946 }
947 return rc;
948}
949
950
951/**
952 * Wait for a request to be completed.
953 *
954 * @returns VBox status code.
955 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
956 *
957 * @param pReq The request to wait for.
958 * @param cMillies Number of milliseconds to wait.
959 * Use RT_INDEFINITE_WAIT to only wait till it's completed.
960 */
961VMMR3DECL(int) VMR3ReqWait(PVMREQ pReq, RTMSINTERVAL cMillies)
962{
963 LogFlow(("VMR3ReqWait: pReq=%p cMillies=%d\n", pReq, cMillies));
964
965 /*
966 * Verify the supplied package.
967 */
968 AssertMsgReturn( pReq->enmState == VMREQSTATE_QUEUED
969 || pReq->enmState == VMREQSTATE_PROCESSING
970 || pReq->enmState == VMREQSTATE_COMPLETED,
971 ("Invalid state %d\n", pReq->enmState),
972 VERR_VM_REQUEST_STATE);
973 AssertMsgReturn( VALID_PTR(pReq->pUVM)
974 && pReq->EventSem != NIL_RTSEMEVENT,
975 ("Invalid request package! Anyone cooking their own packages???\n"),
976 VERR_VM_REQUEST_INVALID_PACKAGE);
977 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
978 && pReq->enmType < VMREQTYPE_MAX,
979 ("Invalid package type %d valid range %d-%d inclusively. This was verified on alloc too...\n",
980 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
981 VERR_VM_REQUEST_INVALID_TYPE);
982
983 /*
984 * Check for deadlock condition
985 */
986 PUVM pUVM = pReq->pUVM;
987 NOREF(pUVM);
988
989 /*
990 * Wait on the package.
991 */
992 int rc;
993 if (cMillies != RT_INDEFINITE_WAIT)
994 rc = RTSemEventWait(pReq->EventSem, cMillies);
995 else
996 {
997 do
998 {
999 rc = RTSemEventWait(pReq->EventSem, RT_INDEFINITE_WAIT);
1000 Assert(rc != VERR_TIMEOUT);
1001 } while ( pReq->enmState != VMREQSTATE_COMPLETED
1002 && pReq->enmState != VMREQSTATE_INVALID);
1003 }
1004 if (RT_SUCCESS(rc))
1005 ASMAtomicXchgSize(&pReq->fEventSemClear, true);
1006 if (pReq->enmState == VMREQSTATE_COMPLETED)
1007 rc = VINF_SUCCESS;
1008 LogFlow(("VMR3ReqWait: returns %Rrc\n", rc));
1009 Assert(rc != VERR_INTERRUPTED);
1010 return rc;
1011}
1012
1013
1014/**
1015 * Sets the relevant FF.
1016 *
1017 * @param pUVM Pointer to the user mode VM structure.
1018 * @param idDstCpu VMCPUID_ANY or the ID of the current CPU.
1019 */
1020DECLINLINE(void) vmR3ReqSetFF(PUVM pUVM, VMCPUID idDstCpu)
1021{
1022 if (RT_LIKELY(pUVM->pVM))
1023 {
1024 if (idDstCpu == VMCPUID_ANY)
1025 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
1026 else
1027 VMCPU_FF_SET(pUVM->pVM->apCpusR3[idDstCpu], VMCPU_FF_REQUEST);
1028 }
1029}
1030
1031
1032/**
1033 * VMR3ReqProcessU helper that handles cases where there are more than one
1034 * pending request.
1035 *
1036 * @returns The oldest request.
1037 * @param pUVM Pointer to the user mode VM structure
1038 * @param idDstCpu VMCPUID_ANY or virtual CPU ID.
1039 * @param pReqList The list of requests.
1040 * @param ppReqs Pointer to the list head.
1041 */
1042static PVMREQ vmR3ReqProcessUTooManyHelper(PUVM pUVM, VMCPUID idDstCpu, PVMREQ pReqList, PVMREQ volatile *ppReqs)
1043{
1044 STAM_COUNTER_INC(&pUVM->vm.s.StatReqMoreThan1);
1045
1046 /*
1047 * Chop off the last one (pReq).
1048 */
1049 PVMREQ pPrev;
1050 PVMREQ pReqRet = pReqList;
1051 do
1052 {
1053 pPrev = pReqRet;
1054 pReqRet = pReqRet->pNext;
1055 } while (pReqRet->pNext);
1056 ASMAtomicWriteNullPtr(&pPrev->pNext);
1057
1058 /*
1059 * Push the others back onto the list (end of it).
1060 */
1061 Log2(("VMR3ReqProcess: Pushing back %p %p...\n", pReqList, pReqList->pNext));
1062 if (RT_UNLIKELY(!ASMAtomicCmpXchgPtr(ppReqs, pReqList, NULL)))
1063 {
1064 STAM_COUNTER_INC(&pUVM->vm.s.StatReqPushBackRaces);
1065 do
1066 {
1067 ASMNopPause();
1068 PVMREQ pReqList2 = ASMAtomicXchgPtrT(ppReqs, NULL, PVMREQ);
1069 if (pReqList2)
1070 {
1071 PVMREQ pLast = pReqList2;
1072 while (pLast->pNext)
1073 pLast = pLast->pNext;
1074 ASMAtomicWritePtr(&pLast->pNext, pReqList);
1075 pReqList = pReqList2;
1076 }
1077 } while (!ASMAtomicCmpXchgPtr(ppReqs, pReqList, NULL));
1078 }
1079
1080 vmR3ReqSetFF(pUVM, idDstCpu);
1081 return pReqRet;
1082}
1083
1084
1085/**
1086 * Process pending request(s).
1087 *
1088 * This function is called from a forced action handler in the EMT
1089 * or from one of the EMT loops.
1090 *
1091 * @returns VBox status code.
1092 *
1093 * @param pUVM Pointer to the user mode VM structure.
1094 * @param idDstCpu Pass VMCPUID_ANY to process the common request queue
1095 * and the CPU ID for a CPU specific one. In the latter
1096 * case the calling thread must be the EMT of that CPU.
1097 * @param fPriorityOnly When set, only process the priority request queue.
1098 *
1099 * @note SMP safe (multiple EMTs trying to satisfy VM_FF_REQUESTs).
1100 *
1101 * @remarks This was made reentrant for async PDM handling, the debugger and
1102 * others.
1103 * @internal
1104 */
1105VMMR3_INT_DECL(int) VMR3ReqProcessU(PUVM pUVM, VMCPUID idDstCpu, bool fPriorityOnly)
1106{
1107 LogFlow(("VMR3ReqProcessU: (enmVMState=%d) idDstCpu=%d\n", pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING, idDstCpu));
1108
1109 /*
1110 * Determine which queues to process.
1111 */
1112 PVMREQ volatile *ppNormalReqs;
1113 PVMREQ volatile *ppPriorityReqs;
1114 if (idDstCpu == VMCPUID_ANY)
1115 {
1116 ppPriorityReqs = &pUVM->vm.s.pPriorityReqs;
1117 ppNormalReqs = !fPriorityOnly ? &pUVM->vm.s.pNormalReqs : ppPriorityReqs;
1118 }
1119 else
1120 {
1121 Assert(idDstCpu < pUVM->cCpus);
1122 Assert(pUVM->aCpus[idDstCpu].vm.s.NativeThreadEMT == RTThreadNativeSelf());
1123 ppPriorityReqs = &pUVM->aCpus[idDstCpu].vm.s.pPriorityReqs;
1124 ppNormalReqs = !fPriorityOnly ? &pUVM->aCpus[idDstCpu].vm.s.pNormalReqs : ppPriorityReqs;
1125 }
1126
1127 /*
1128 * Process loop.
1129 *
1130 * We do not repeat the outer loop if we've got an informational status code
1131 * since that code needs processing by our caller (usually EM).
1132 */
1133 int rc = VINF_SUCCESS;
1134 for (;;)
1135 {
1136 /*
1137 * Get the pending requests.
1138 *
1139 * If there are more than one request, unlink the oldest and put the
1140 * rest back so that we're reentrant.
1141 */
1142 if (RT_LIKELY(pUVM->pVM))
1143 {
1144 if (idDstCpu == VMCPUID_ANY)
1145 VM_FF_CLEAR(pUVM->pVM, VM_FF_REQUEST);
1146 else
1147 VMCPU_FF_CLEAR(pUVM->pVM->apCpusR3[idDstCpu], VMCPU_FF_REQUEST);
1148 }
1149
1150 PVMREQ pReq = ASMAtomicXchgPtrT(ppPriorityReqs, NULL, PVMREQ);
1151 if (pReq)
1152 {
1153 if (RT_UNLIKELY(pReq->pNext))
1154 pReq = vmR3ReqProcessUTooManyHelper(pUVM, idDstCpu, pReq, ppPriorityReqs);
1155 else if (ASMAtomicReadPtrT(ppNormalReqs, PVMREQ))
1156 vmR3ReqSetFF(pUVM, idDstCpu);
1157 }
1158 else
1159 {
1160 pReq = ASMAtomicXchgPtrT(ppNormalReqs, NULL, PVMREQ);
1161 if (!pReq)
1162 break;
1163 if (RT_UNLIKELY(pReq->pNext))
1164 pReq = vmR3ReqProcessUTooManyHelper(pUVM, idDstCpu, pReq, ppNormalReqs);
1165 }
1166
1167 /*
1168 * Process the request
1169 */
1170 STAM_COUNTER_INC(&pUVM->vm.s.StatReqProcessed);
1171 int rc2 = vmR3ReqProcessOne(pReq);
1172 if ( rc2 >= VINF_EM_FIRST
1173 && rc2 <= VINF_EM_LAST)
1174 {
1175 rc = rc2;
1176 break;
1177 }
1178 }
1179
1180 LogFlow(("VMR3ReqProcess: returns %Rrc (enmVMState=%d)\n", rc, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
1181 return rc;
1182}
1183
1184
1185/**
1186 * Process one request.
1187 *
1188 * @returns VBox status code.
1189 *
1190 * @param pReq Request packet to process.
1191 */
1192static int vmR3ReqProcessOne(PVMREQ pReq)
1193{
1194 LogFlow(("vmR3ReqProcessOne: pReq=%p type=%d fFlags=%#x\n", pReq, pReq->enmType, pReq->fFlags));
1195
1196 /*
1197 * Process the request.
1198 */
1199 Assert(pReq->enmState == VMREQSTATE_QUEUED);
1200 pReq->enmState = VMREQSTATE_PROCESSING;
1201 int rcRet = VINF_SUCCESS; /* the return code of this function. */
1202 int rcReq = VERR_NOT_IMPLEMENTED; /* the request status. */
1203 switch (pReq->enmType)
1204 {
1205 /*
1206 * A packed down call frame.
1207 */
1208 case VMREQTYPE_INTERNAL:
1209 {
1210 uintptr_t *pauArgs = &pReq->u.Internal.aArgs[0];
1211 union
1212 {
1213 PFNRT pfn;
1214 DECLCALLBACKMEMBER(int, pfn00)(void);
1215 DECLCALLBACKMEMBER(int, pfn01)(uintptr_t);
1216 DECLCALLBACKMEMBER(int, pfn02)(uintptr_t, uintptr_t);
1217 DECLCALLBACKMEMBER(int, pfn03)(uintptr_t, uintptr_t, uintptr_t);
1218 DECLCALLBACKMEMBER(int, pfn04)(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1219 DECLCALLBACKMEMBER(int, pfn05)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1220 DECLCALLBACKMEMBER(int, pfn06)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1221 DECLCALLBACKMEMBER(int, pfn07)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1222 DECLCALLBACKMEMBER(int, pfn08)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1223 DECLCALLBACKMEMBER(int, pfn09)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1224 DECLCALLBACKMEMBER(int, pfn10)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1225 DECLCALLBACKMEMBER(int, pfn11)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1226 DECLCALLBACKMEMBER(int, pfn12)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1227 DECLCALLBACKMEMBER(int, pfn13)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1228 DECLCALLBACKMEMBER(int, pfn14)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1229 DECLCALLBACKMEMBER(int, pfn15)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1230 } u;
1231 u.pfn = pReq->u.Internal.pfn;
1232#ifdef RT_ARCH_AMD64
1233 switch (pReq->u.Internal.cArgs)
1234 {
1235 case 0: rcRet = u.pfn00(); break;
1236 case 1: rcRet = u.pfn01(pauArgs[0]); break;
1237 case 2: rcRet = u.pfn02(pauArgs[0], pauArgs[1]); break;
1238 case 3: rcRet = u.pfn03(pauArgs[0], pauArgs[1], pauArgs[2]); break;
1239 case 4: rcRet = u.pfn04(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3]); break;
1240 case 5: rcRet = u.pfn05(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4]); break;
1241 case 6: rcRet = u.pfn06(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5]); break;
1242 case 7: rcRet = u.pfn07(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6]); break;
1243 case 8: rcRet = u.pfn08(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7]); break;
1244 case 9: rcRet = u.pfn09(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8]); break;
1245 case 10: rcRet = u.pfn10(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9]); break;
1246 case 11: rcRet = u.pfn11(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10]); break;
1247 case 12: rcRet = u.pfn12(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11]); break;
1248 case 13: rcRet = u.pfn13(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12]); break;
1249 case 14: rcRet = u.pfn14(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12], pauArgs[13]); break;
1250 case 15: rcRet = u.pfn15(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12], pauArgs[13], pauArgs[14]); break;
1251 default:
1252 AssertReleaseMsgFailed(("cArgs=%d\n", pReq->u.Internal.cArgs));
1253 rcRet = rcReq = VERR_VM_REQUEST_TOO_MANY_ARGS_IPE;
1254 break;
1255 }
1256#else /* x86: */
1257 size_t cbArgs = pReq->u.Internal.cArgs * sizeof(uintptr_t);
1258# ifdef __GNUC__
1259 __asm__ __volatile__("movl %%esp, %%edx\n\t"
1260 "subl %2, %%esp\n\t"
1261 "andl $0xfffffff0, %%esp\n\t"
1262 "shrl $2, %2\n\t"
1263 "movl %%esp, %%edi\n\t"
1264 "rep movsl\n\t"
1265 "movl %%edx, %%edi\n\t"
1266 "call *%%eax\n\t"
1267 "mov %%edi, %%esp\n\t"
1268 : "=a" (rcRet),
1269 "=S" (pauArgs),
1270 "=c" (cbArgs)
1271 : "0" (u.pfn),
1272 "1" (pauArgs),
1273 "2" (cbArgs)
1274 : "edi", "edx");
1275# else
1276 __asm
1277 {
1278 xor edx, edx /* just mess it up. */
1279 mov eax, u.pfn
1280 mov ecx, cbArgs
1281 shr ecx, 2
1282 mov esi, pauArgs
1283 mov ebx, esp
1284 sub esp, cbArgs
1285 and esp, 0xfffffff0
1286 mov edi, esp
1287 rep movsd
1288 call eax
1289 mov esp, ebx
1290 mov rcRet, eax
1291 }
1292# endif
1293#endif /* x86 */
1294 if ((pReq->fFlags & (VMREQFLAGS_RETURN_MASK)) == VMREQFLAGS_VOID)
1295 rcRet = VINF_SUCCESS;
1296 rcReq = rcRet;
1297 break;
1298 }
1299
1300 default:
1301 AssertMsgFailed(("pReq->enmType=%d\n", pReq->enmType));
1302 rcReq = VERR_NOT_IMPLEMENTED;
1303 break;
1304 }
1305
1306 /*
1307 * Complete the request.
1308 */
1309 pReq->iStatus = rcReq;
1310 pReq->enmState = VMREQSTATE_COMPLETED;
1311 if (pReq->fFlags & VMREQFLAGS_NO_WAIT)
1312 {
1313 /* Free the packet, nobody is waiting. */
1314 LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - freeing it\n",
1315 pReq, rcReq, rcRet));
1316 VMR3ReqFree(pReq);
1317 }
1318 else
1319 {
1320 /* Notify the waiter and him free up the packet. */
1321 LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - notifying waiting thread\n",
1322 pReq, rcReq, rcRet));
1323 ASMAtomicXchgSize(&pReq->fEventSemClear, false);
1324 int rc2 = RTSemEventSignal(pReq->EventSem);
1325 if (RT_FAILURE(rc2))
1326 {
1327 AssertRC(rc2);
1328 rcRet = rc2;
1329 }
1330 }
1331
1332 return rcRet;
1333}
1334
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette