VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMReq.cpp@ 105266

Last change on this file since 105266 was 98103, checked in by vboxsync, 22 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 50.3 KB
Line 
1/* $Id: VMReq.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_VM
33#include <VBox/vmm/mm.h>
34#include <VBox/vmm/vmm.h>
35#include "VMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/uvm.h>
38
39#include <VBox/err.h>
40#include <VBox/param.h>
41#include <VBox/log.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <iprt/time.h>
46#include <iprt/semaphore.h>
47#include <iprt/thread.h>
48
49
50/*********************************************************************************************************************************
51* Internal Functions *
52*********************************************************************************************************************************/
53static int vmR3ReqProcessOne(PVMREQ pReq);
54
55
56/**
57 * Convenience wrapper for VMR3ReqCallU.
58 *
59 * This assumes (1) you're calling a function that returns an VBox status code,
60 * (2) that you want it's return code on success, and (3) that you wish to wait
61 * for ever for it to return.
62 *
63 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
64 * its status code is return. Otherwise, the status of pfnFunction is
65 * returned.
66 *
67 * @param pVM The cross context VM structure.
68 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
69 * one of the following special values:
70 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
71 * @param pfnFunction Pointer to the function to call.
72 * @param cArgs Number of arguments following in the ellipsis.
73 * @param ... Function arguments.
74 *
75 * @remarks See remarks on VMR3ReqCallVU.
76 * @internal
77 */
78VMMR3_INT_DECL(int) VMR3ReqCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
79{
80 PVMREQ pReq;
81 va_list va;
82 va_start(va, cArgs);
83 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
84 pfnFunction, cArgs, va);
85 va_end(va);
86 if (RT_SUCCESS(rc))
87 rc = pReq->iStatus;
88 VMR3ReqFree(pReq);
89 return rc;
90}
91
92
93/**
94 * Convenience wrapper for VMR3ReqCallU.
95 *
96 * This assumes (1) you're calling a function that returns an VBox status code,
97 * (2) that you want it's return code on success, and (3) that you wish to wait
98 * for ever for it to return.
99 *
100 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
101 * its status code is return. Otherwise, the status of pfnFunction is
102 * returned.
103 *
104 * @param pUVM The user mode VM structure.
105 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
106 * one of the following special values:
107 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
108 * @param pfnFunction Pointer to the function to call.
109 * @param cArgs Number of arguments following in the ellipsis.
110 * @param ... Function arguments.
111 *
112 * @remarks See remarks on VMR3ReqCallVU.
113 * @internal
114 */
115VMMR3DECL(int) VMR3ReqCallWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
116{
117 PVMREQ pReq;
118 va_list va;
119 va_start(va, cArgs);
120 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
121 pfnFunction, cArgs, va);
122 va_end(va);
123 if (RT_SUCCESS(rc))
124 rc = pReq->iStatus;
125 VMR3ReqFree(pReq);
126 return rc;
127}
128
129
130/**
131 * Convenience wrapper for VMR3ReqCallU.
132 *
133 * This assumes (1) you're calling a function that returns an VBox status code
134 * and that you do not wish to wait for it to complete.
135 *
136 * @returns VBox status code returned by VMR3ReqCallVU.
137 *
138 * @param pVM The cross context VM structure.
139 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
140 * one of the following special values:
141 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
142 * @param pfnFunction Pointer to the function to call.
143 * @param cArgs Number of arguments following in the ellipsis.
144 * @param ... Function arguments.
145 *
146 * @remarks See remarks on VMR3ReqCallVU.
147 * @internal
148 */
149VMMR3DECL(int) VMR3ReqCallNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
150{
151 va_list va;
152 va_start(va, cArgs);
153 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
154 pfnFunction, cArgs, va);
155 va_end(va);
156 return rc;
157}
158
159
160/**
161 * Convenience wrapper for VMR3ReqCallU.
162 *
163 * This assumes (1) you're calling a function that returns an VBox status code
164 * and that you do not wish to wait for it to complete.
165 *
166 * @returns VBox status code returned by VMR3ReqCallVU.
167 *
168 * @param pUVM Pointer to the VM.
169 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
170 * one of the following special values:
171 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
172 * @param pfnFunction Pointer to the function to call.
173 * @param cArgs Number of arguments following in the ellipsis.
174 * @param ... Function arguments.
175 *
176 * @remarks See remarks on VMR3ReqCallVU.
177 */
178VMMR3DECL(int) VMR3ReqCallNoWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
179{
180 va_list va;
181 va_start(va, cArgs);
182 int rc = VMR3ReqCallVU(pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
183 pfnFunction, cArgs, va);
184 va_end(va);
185 return rc;
186}
187
188
189/**
190 * Convenience wrapper for VMR3ReqCallU.
191 *
192 * This assumes (1) you're calling a function that returns void, and (2) that
193 * you wish to wait for ever for it to return.
194 *
195 * @returns VBox status code of VMR3ReqCallVU.
196 *
197 * @param pVM The cross context VM structure.
198 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
199 * one of the following special values:
200 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
201 * @param pfnFunction Pointer to the function to call.
202 * @param cArgs Number of arguments following in the ellipsis.
203 * @param ... Function arguments.
204 *
205 * @remarks See remarks on VMR3ReqCallVU.
206 * @internal
207 */
208VMMR3_INT_DECL(int) VMR3ReqCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
209{
210 PVMREQ pReq;
211 va_list va;
212 va_start(va, cArgs);
213 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
214 pfnFunction, cArgs, va);
215 va_end(va);
216 VMR3ReqFree(pReq);
217 return rc;
218}
219
220
221/**
222 * Convenience wrapper for VMR3ReqCallU.
223 *
224 * This assumes (1) you're calling a function that returns void, and (2) that
225 * you wish to wait for ever for it to return.
226 *
227 * @returns VBox status code of VMR3ReqCallVU.
228 *
229 * @param pUVM Pointer to the VM.
230 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
231 * one of the following special values:
232 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
233 * @param pfnFunction Pointer to the function to call.
234 * @param cArgs Number of arguments following in the ellipsis.
235 * @param ... Function arguments.
236 *
237 * @remarks See remarks on VMR3ReqCallVU.
238 */
239VMMR3DECL(int) VMR3ReqCallVoidWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
240{
241 PVMREQ pReq;
242 va_list va;
243 va_start(va, cArgs);
244 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
245 pfnFunction, cArgs, va);
246 va_end(va);
247 VMR3ReqFree(pReq);
248 return rc;
249}
250
251
252/**
253 * Convenience wrapper for VMR3ReqCallU.
254 *
255 * This assumes (1) you're calling a function that returns void, and (2) that
256 * you do not wish to wait for it to complete.
257 *
258 * @returns VBox status code of VMR3ReqCallVU.
259 *
260 * @param pVM The cross context VM structure.
261 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
262 * one of the following special values:
263 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
264 * @param pfnFunction Pointer to the function to call.
265 * @param cArgs Number of arguments following in the ellipsis.
266 * @param ... Function arguments.
267 *
268 * @remarks See remarks on VMR3ReqCallVU.
269 * @internal
270 */
271VMMR3DECL(int) VMR3ReqCallVoidNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
272{
273 PVMREQ pReq;
274 va_list va;
275 va_start(va, cArgs);
276 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_NO_WAIT,
277 pfnFunction, cArgs, va);
278 va_end(va);
279 VMR3ReqFree(pReq);
280 return rc;
281}
282
283
284/**
285 * Convenience wrapper for VMR3ReqCallU.
286 *
287 * This assumes (1) you're calling a function that returns an VBox status code,
288 * (2) that you want it's return code on success, (3) that you wish to wait for
289 * ever for it to return, and (4) that it's priority request that can be safely
290 * be handled during async suspend and power off.
291 *
292 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
293 * its status code is return. Otherwise, the status of pfnFunction is
294 * returned.
295 *
296 * @param pVM The cross context VM structure.
297 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
298 * one of the following special values:
299 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
300 * @param pfnFunction Pointer to the function to call.
301 * @param cArgs Number of arguments following in the ellipsis.
302 * @param ... Function arguments.
303 *
304 * @remarks See remarks on VMR3ReqCallVU.
305 * @internal
306 */
307VMMR3DECL(int) VMR3ReqPriorityCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
308{
309 PVMREQ pReq;
310 va_list va;
311 va_start(va, cArgs);
312 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_PRIORITY,
313 pfnFunction, cArgs, va);
314 va_end(va);
315 if (RT_SUCCESS(rc))
316 rc = pReq->iStatus;
317 VMR3ReqFree(pReq);
318 return rc;
319}
320
321
322/**
323 * Convenience wrapper for VMR3ReqCallU.
324 *
325 * This assumes (1) you're calling a function that returns an VBox status code,
326 * (2) that you want it's return code on success, (3) that you wish to wait for
327 * ever for it to return, and (4) that it's priority request that can be safely
328 * be handled during async suspend and power off.
329 *
330 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
331 * its status code is return. Otherwise, the status of pfnFunction is
332 * returned.
333 *
334 * @param pUVM The user mode VM handle.
335 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
336 * one of the following special values:
337 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
338 * @param pfnFunction Pointer to the function to call.
339 * @param cArgs Number of arguments following in the ellipsis.
340 * @param ... Function arguments.
341 *
342 * @remarks See remarks on VMR3ReqCallVU.
343 */
344VMMR3DECL(int) VMR3ReqPriorityCallWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
345{
346 PVMREQ pReq;
347 va_list va;
348 va_start(va, cArgs);
349 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_PRIORITY,
350 pfnFunction, cArgs, va);
351 va_end(va);
352 if (RT_SUCCESS(rc))
353 rc = pReq->iStatus;
354 VMR3ReqFree(pReq);
355 return rc;
356}
357
358
359/**
360 * Convenience wrapper for VMR3ReqCallU.
361 *
362 * This assumes (1) you're calling a function that returns void, (2) that you
363 * wish to wait for ever for it to return, and (3) that it's priority request
364 * that can be safely be handled during async suspend and power off.
365 *
366 * @returns VBox status code of VMR3ReqCallVU.
367 *
368 * @param pUVM The user mode VM handle.
369 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
370 * one of the following special values:
371 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
372 * @param pfnFunction Pointer to the function to call.
373 * @param cArgs Number of arguments following in the ellipsis.
374 * @param ... Function arguments.
375 *
376 * @remarks See remarks on VMR3ReqCallVU.
377 */
378VMMR3DECL(int) VMR3ReqPriorityCallVoidWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
379{
380 PVMREQ pReq;
381 va_list va;
382 va_start(va, cArgs);
383 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_PRIORITY,
384 pfnFunction, cArgs, va);
385 va_end(va);
386 VMR3ReqFree(pReq);
387 return rc;
388}
389
390
391/**
392 * Allocate and queue a call request to a void function.
393 *
394 * If it's desired to poll on the completion of the request set cMillies
395 * to 0 and use VMR3ReqWait() to check for completion. In the other case
396 * use RT_INDEFINITE_WAIT.
397 * The returned request packet must be freed using VMR3ReqFree().
398 *
399 * @returns VBox status code.
400 * Will not return VERR_INTERRUPTED.
401 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
402 *
403 * @param pUVM Pointer to the user mode VM structure.
404 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
405 * one of the following special values:
406 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
407 * @param ppReq Where to store the pointer to the request.
408 * This will be NULL or a valid request pointer not matter what happens, unless fFlags
409 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
410 * @param cMillies Number of milliseconds to wait for the request to
411 * be completed. Use RT_INDEFINITE_WAIT to only
412 * wait till it's completed.
413 * @param fFlags A combination of the VMREQFLAGS values.
414 * @param pfnFunction Pointer to the function to call.
415 * @param cArgs Number of arguments following in the ellipsis.
416 * @param ... Function arguments.
417 *
418 * @remarks See remarks on VMR3ReqCallVU.
419 */
420VMMR3DECL(int) VMR3ReqCallU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
421 PFNRT pfnFunction, unsigned cArgs, ...)
422{
423 va_list va;
424 va_start(va, cArgs);
425 int rc = VMR3ReqCallVU(pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
426 va_end(va);
427 return rc;
428}
429
430
431/**
432 * Allocate and queue a call request.
433 *
434 * If it's desired to poll on the completion of the request set cMillies
435 * to 0 and use VMR3ReqWait() to check for completion. In the other case
436 * use RT_INDEFINITE_WAIT.
437 * The returned request packet must be freed using VMR3ReqFree().
438 *
439 * @returns VBox status code.
440 * Will not return VERR_INTERRUPTED.
441 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
442 *
443 * @param pUVM Pointer to the user mode VM structure.
444 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
445 * one of the following special values:
446 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
447 * @param ppReq Where to store the pointer to the request.
448 * This will be NULL or a valid request pointer not matter what happens, unless fFlags
449 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
450 * @param cMillies Number of milliseconds to wait for the request to
451 * be completed. Use RT_INDEFINITE_WAIT to only
452 * wait till it's completed.
453 * @param pfnFunction Pointer to the function to call.
454 * @param fFlags A combination of the VMREQFLAGS values.
455 * @param cArgs Number of arguments following in the ellipsis.
456 * Stuff which differs in size from uintptr_t is gonna make trouble, so don't try!
457 * @param Args Argument vector.
458 *
459 * @remarks Caveats:
460 * - Do not pass anything which is larger than an uintptr_t.
461 * - 64-bit integers are larger than uintptr_t on 32-bit hosts.
462 * Pass integers > 32-bit by reference (pointers).
463 * - Don't use NULL since it should be the integer 0 in C++ and may
464 * therefore end up with garbage in the bits 63:32 on 64-bit
465 * hosts because 'int' is 32-bit.
466 * Use (void *)NULL or (uintptr_t)0 instead of NULL.
467 */
468VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
469 PFNRT pfnFunction, unsigned cArgs, va_list Args)
470{
471 LogFlow(("VMR3ReqCallV: idDstCpu=%u cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", idDstCpu, cMillies, fFlags, pfnFunction, cArgs));
472
473 /*
474 * Validate input.
475 */
476 AssertPtrReturn(pfnFunction, VERR_INVALID_POINTER);
477 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
478 AssertReturn(!(fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE | VMREQFLAGS_PRIORITY)), VERR_INVALID_PARAMETER);
479 if (!(fFlags & VMREQFLAGS_NO_WAIT) || ppReq)
480 {
481 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
482 *ppReq = NULL;
483 }
484 PVMREQ pReq = NULL;
485 AssertMsgReturn(cArgs * sizeof(uintptr_t) <= sizeof(pReq->u.Internal.aArgs),
486 ("cArg=%d\n", cArgs),
487 VERR_TOO_MUCH_DATA);
488
489 /*
490 * Allocate request
491 */
492 int rc = VMR3ReqAlloc(pUVM, &pReq, VMREQTYPE_INTERNAL, idDstCpu);
493 if (RT_FAILURE(rc))
494 return rc;
495
496 /*
497 * Initialize the request data.
498 */
499 pReq->fFlags = fFlags;
500 pReq->u.Internal.pfn = pfnFunction;
501 pReq->u.Internal.cArgs = cArgs;
502 for (unsigned iArg = 0; iArg < cArgs; iArg++)
503 pReq->u.Internal.aArgs[iArg] = va_arg(Args, uintptr_t);
504
505 /*
506 * Queue the request and return.
507 */
508 rc = VMR3ReqQueue(pReq, cMillies);
509 if ( RT_FAILURE(rc)
510 && rc != VERR_TIMEOUT)
511 {
512 VMR3ReqFree(pReq);
513 pReq = NULL;
514 }
515 if (!(fFlags & VMREQFLAGS_NO_WAIT))
516 {
517 *ppReq = pReq;
518 LogFlow(("VMR3ReqCallV: returns %Rrc *ppReq=%p\n", rc, pReq));
519 }
520 else
521 LogFlow(("VMR3ReqCallV: returns %Rrc\n", rc));
522 Assert(rc != VERR_INTERRUPTED);
523 return rc;
524}
525
526
527/**
528 * Joins the list pList with whatever is linked up at *pHead.
529 */
530static void vmr3ReqJoinFreeSub(volatile PVMREQ *ppHead, PVMREQ pList)
531{
532 for (unsigned cIterations = 0;; cIterations++)
533 {
534 PVMREQ pHead = ASMAtomicXchgPtrT(ppHead, pList, PVMREQ);
535 if (!pHead)
536 return;
537 PVMREQ pTail = pHead;
538 while (pTail->pNext)
539 pTail = pTail->pNext;
540 ASMAtomicWritePtr(&pTail->pNext, pList);
541 ASMCompilerBarrier();
542 if (ASMAtomicCmpXchgPtr(ppHead, pHead, pList))
543 return;
544 ASMAtomicWriteNullPtr(&pTail->pNext);
545 ASMCompilerBarrier();
546 if (ASMAtomicCmpXchgPtr(ppHead, pHead, NULL))
547 return;
548 pList = pHead;
549 Assert(cIterations != 32);
550 Assert(cIterations != 64);
551 }
552}
553
554
555/**
556 * Joins the list pList with whatever is linked up at *pHead.
557 */
558static void vmr3ReqJoinFree(PVMINTUSERPERVM pVMInt, PVMREQ pList)
559{
560 /*
561 * Split the list if it's too long.
562 */
563 unsigned cReqs = 1;
564 PVMREQ pTail = pList;
565 while (pTail->pNext)
566 {
567 if (cReqs++ > 25)
568 {
569 const uint32_t i = pVMInt->iReqFree;
570 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
571
572 pTail->pNext = NULL;
573 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2 + (i == pVMInt->iReqFree)) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
574 return;
575 }
576 pTail = pTail->pNext;
577 }
578 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(pVMInt->iReqFree + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pList);
579}
580
581
582/**
583 * Allocates a request packet.
584 *
585 * The caller allocates a request packet, fills in the request data
586 * union and queues the request.
587 *
588 * @returns VBox status code.
589 *
590 * @param pUVM Pointer to the user mode VM structure.
591 * @param ppReq Where to store the pointer to the allocated packet.
592 * @param enmType Package type.
593 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
594 * one of the following special values:
595 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
596 */
597VMMR3DECL(int) VMR3ReqAlloc(PUVM pUVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
598{
599 /*
600 * Validate input.
601 */
602 AssertMsgReturn(enmType > VMREQTYPE_INVALID && enmType < VMREQTYPE_MAX,
603 ("Invalid package type %d valid range %d-%d inclusively.\n",
604 enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
605 VERR_VM_REQUEST_INVALID_TYPE);
606 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
607 AssertMsgReturn( idDstCpu == VMCPUID_ANY
608 || idDstCpu == VMCPUID_ANY_QUEUE
609 || idDstCpu < pUVM->cCpus
610 || idDstCpu == VMCPUID_ALL
611 || idDstCpu == VMCPUID_ALL_REVERSE,
612 ("Invalid destination %u (max=%u)\n", idDstCpu, pUVM->cCpus), VERR_INVALID_PARAMETER);
613
614 /*
615 * Try get a recycled packet.
616 * While this could all be solved with a single list with a lock, it's a sport
617 * of mine to avoid locks.
618 */
619 int cTries = RT_ELEMENTS(pUVM->vm.s.apReqFree) * 2;
620 while (--cTries >= 0)
621 {
622 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
623#if 0 /* sad, but this won't work safely because the reading of pReq->pNext. */
624 PVMREQ pNext = NULL;
625 PVMREQ pReq = *ppHead;
626 if ( pReq
627 && !ASMAtomicCmpXchgPtr(ppHead, (pNext = pReq->pNext), pReq)
628 && (pReq = *ppHead)
629 && !ASMAtomicCmpXchgPtr(ppHead, (pNext = pReq->pNext), pReq))
630 pReq = NULL;
631 if (pReq)
632 {
633 Assert(pReq->pNext == pNext); NOREF(pReq);
634#else
635 PVMREQ pReq = ASMAtomicXchgPtrT(ppHead, NULL, PVMREQ);
636 if (pReq)
637 {
638 PVMREQ pNext = pReq->pNext;
639 if ( pNext
640 && !ASMAtomicCmpXchgPtr(ppHead, pNext, NULL))
641 {
642 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRaces);
643 vmr3ReqJoinFree(&pUVM->vm.s, pReq->pNext);
644 }
645#endif
646 ASMAtomicDecU32(&pUVM->vm.s.cReqFree);
647
648 /*
649 * Make sure the event sem is not signaled.
650 */
651 if (!pReq->fEventSemClear)
652 {
653 int rc = RTSemEventWait(pReq->EventSem, 0);
654 if (rc != VINF_SUCCESS && rc != VERR_TIMEOUT)
655 {
656 /*
657 * This shall not happen, but if it does we'll just destroy
658 * the semaphore and create a new one.
659 */
660 AssertMsgFailed(("rc=%Rrc from RTSemEventWait(%#x).\n", rc, pReq->EventSem));
661 RTSemEventDestroy(pReq->EventSem);
662 rc = RTSemEventCreate(&pReq->EventSem);
663 AssertRC(rc);
664 if (RT_FAILURE(rc))
665 return rc;
666#if 0 /// @todo @bugref{4725} - def RT_LOCK_STRICT
667 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
668 RTSemEventAddSignaller(pReq->EventSem, pUVM->aCpus[idCpu].vm.s.ThreadEMT);
669#endif
670 }
671 pReq->fEventSemClear = true;
672 }
673 else
674 Assert(RTSemEventWait(pReq->EventSem, 0) == VERR_TIMEOUT);
675
676 /*
677 * Initialize the packet and return it.
678 */
679 Assert(pReq->enmType == VMREQTYPE_INVALID);
680 Assert(pReq->enmState == VMREQSTATE_FREE);
681 Assert(pReq->pUVM == pUVM);
682 ASMAtomicWriteNullPtr(&pReq->pNext);
683 pReq->enmState = VMREQSTATE_ALLOCATED;
684 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
685 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
686 pReq->enmType = enmType;
687 pReq->idDstCpu = idDstCpu;
688
689 *ppReq = pReq;
690 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRecycled);
691 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p recycled\n", pReq));
692 return VINF_SUCCESS;
693 }
694 }
695
696 /*
697 * Ok allocate one.
698 */
699 PVMREQ pReq = (PVMREQ)MMR3HeapAllocU(pUVM, MM_TAG_VM_REQ, sizeof(*pReq));
700 if (!pReq)
701 return VERR_NO_MEMORY;
702
703 /*
704 * Create the semaphore.
705 */
706 int rc = RTSemEventCreate(&pReq->EventSem);
707 AssertRC(rc);
708 if (RT_FAILURE(rc))
709 {
710 MMR3HeapFree(pReq);
711 return rc;
712 }
713#if 0 /// @todo @bugref{4725} - def RT_LOCK_STRICT
714 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
715 RTSemEventAddSignaller(pReq->EventSem, pUVM->aCpus[idCpu].vm.s.ThreadEMT);
716#endif
717
718 /*
719 * Initialize the packet and return it.
720 */
721 pReq->pNext = NULL;
722 pReq->pUVM = pUVM;
723 pReq->enmState = VMREQSTATE_ALLOCATED;
724 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
725 pReq->fEventSemClear = true;
726 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
727 pReq->enmType = enmType;
728 pReq->idDstCpu = idDstCpu;
729
730 *ppReq = pReq;
731 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocNew);
732 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p new\n", pReq));
733 return VINF_SUCCESS;
734}
735
736
737/**
738 * Free a request packet.
739 *
740 * @returns VBox status code.
741 *
742 * @param pReq Package to free.
743 * @remark The request packet must be in allocated or completed state!
744 */
745VMMR3DECL(int) VMR3ReqFree(PVMREQ pReq)
746{
747 /*
748 * Ignore NULL (all free functions should do this imho).
749 */
750 if (!pReq)
751 return VINF_SUCCESS;
752
753 /*
754 * Check packet state.
755 */
756 switch (pReq->enmState)
757 {
758 case VMREQSTATE_ALLOCATED:
759 case VMREQSTATE_COMPLETED:
760 break;
761 default:
762 AssertMsgFailed(("Invalid state %d!\n", pReq->enmState));
763 return VERR_VM_REQUEST_STATE;
764 }
765
766 /*
767 * Make it a free packet and put it into one of the free packet lists.
768 */
769 pReq->enmState = VMREQSTATE_FREE;
770 pReq->iStatus = VERR_VM_REQUEST_STATUS_FREED;
771 pReq->enmType = VMREQTYPE_INVALID;
772
773 PUVM pUVM = pReq->pUVM;
774 STAM_COUNTER_INC(&pUVM->vm.s.StatReqFree);
775
776 if (pUVM->vm.s.cReqFree < 128)
777 {
778 ASMAtomicIncU32(&pUVM->vm.s.cReqFree);
779 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
780 PVMREQ pNext;
781 do
782 {
783 pNext = ASMAtomicUoReadPtrT(ppHead, PVMREQ);
784 ASMAtomicWritePtr(&pReq->pNext, pNext);
785 ASMCompilerBarrier();
786 } while (!ASMAtomicCmpXchgPtr(ppHead, pReq, pNext));
787 }
788 else
789 {
790 STAM_COUNTER_INC(&pReq->pUVM->vm.s.StatReqFreeOverflow);
791 RTSemEventDestroy(pReq->EventSem);
792 MMR3HeapFree(pReq);
793 }
794 return VINF_SUCCESS;
795}
796
797
798/**
799 * Queue a request.
800 *
801 * The quest must be allocated using VMR3ReqAlloc() and contain
802 * all the required data.
803 * If it's desired to poll on the completion of the request set cMillies
804 * to 0 and use VMR3ReqWait() to check for completion. In the other case
805 * use RT_INDEFINITE_WAIT.
806 *
807 * @returns VBox status code.
808 * Will not return VERR_INTERRUPTED.
809 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
810 *
811 * @param pReq The request to queue.
812 * @param cMillies Number of milliseconds to wait for the request to
813 * be completed. Use RT_INDEFINITE_WAIT to only
814 * wait till it's completed.
815 */
816VMMR3DECL(int) VMR3ReqQueue(PVMREQ pReq, RTMSINTERVAL cMillies)
817{
818 LogFlow(("VMR3ReqQueue: pReq=%p cMillies=%d\n", pReq, cMillies));
819 /*
820 * Verify the supplied package.
821 */
822 AssertMsgReturn(pReq->enmState == VMREQSTATE_ALLOCATED, ("%d\n", pReq->enmState), VERR_VM_REQUEST_STATE);
823 AssertMsgReturn( RT_VALID_PTR(pReq->pUVM)
824 && !pReq->pNext
825 && pReq->EventSem != NIL_RTSEMEVENT,
826 ("Invalid request package! Anyone cooking their own packages???\n"),
827 VERR_VM_REQUEST_INVALID_PACKAGE);
828 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
829 && pReq->enmType < VMREQTYPE_MAX,
830 ("Invalid package type %d valid range %d-%d inclusively. This was verified on alloc too...\n",
831 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
832 VERR_VM_REQUEST_INVALID_TYPE);
833 Assert(!(pReq->fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE | VMREQFLAGS_PRIORITY)));
834
835 /*
836 * Are we the EMT or not?
837 * Also, store pVM (and fFlags) locally since pReq may be invalid after queuing it.
838 */
839 int rc = VINF_SUCCESS;
840 PUVM pUVM = ((VMREQ volatile *)pReq)->pUVM; /* volatile paranoia */
841 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
842
843 if (pReq->idDstCpu == VMCPUID_ALL)
844 {
845 /* One-by-one. */
846 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
847 for (unsigned i = 0; i < pUVM->cCpus; i++)
848 {
849 /* Reinit some members. */
850 pReq->enmState = VMREQSTATE_ALLOCATED;
851 pReq->idDstCpu = i;
852 rc = VMR3ReqQueue(pReq, cMillies);
853 if (RT_FAILURE(rc))
854 break;
855 }
856 }
857 else if (pReq->idDstCpu == VMCPUID_ALL_REVERSE)
858 {
859 /* One-by-one. */
860 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
861 for (int i = pUVM->cCpus-1; i >= 0; i--)
862 {
863 /* Reinit some members. */
864 pReq->enmState = VMREQSTATE_ALLOCATED;
865 pReq->idDstCpu = i;
866 rc = VMR3ReqQueue(pReq, cMillies);
867 if (RT_FAILURE(rc))
868 break;
869 }
870 }
871 else if ( pReq->idDstCpu != VMCPUID_ANY /* for a specific VMCPU? */
872 && pReq->idDstCpu != VMCPUID_ANY_QUEUE
873 && ( !pUVCpu /* and it's not the current thread. */
874 || pUVCpu->idCpu != pReq->idDstCpu))
875 {
876 VMCPUID idTarget = pReq->idDstCpu; Assert(idTarget < pUVM->cCpus);
877 PVMCPU pVCpu = pUVM->pVM->apCpusR3[idTarget];
878 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
879
880 /* Fetch the right UVMCPU */
881 pUVCpu = &pUVM->aCpus[idTarget];
882
883 /*
884 * Insert it.
885 */
886 volatile PVMREQ *ppQueueHead = pReq->fFlags & VMREQFLAGS_PRIORITY ? &pUVCpu->vm.s.pPriorityReqs : &pUVCpu->vm.s.pNormalReqs;
887 pReq->enmState = VMREQSTATE_QUEUED;
888 PVMREQ pNext;
889 do
890 {
891 pNext = ASMAtomicUoReadPtrT(ppQueueHead, PVMREQ);
892 ASMAtomicWritePtr(&pReq->pNext, pNext);
893 ASMCompilerBarrier();
894 } while (!ASMAtomicCmpXchgPtr(ppQueueHead, pReq, pNext));
895
896 /*
897 * Notify EMT.
898 */
899 if (pUVM->pVM)
900 VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
901 VMR3NotifyCpuFFU(pUVCpu, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
902
903 /*
904 * Wait and return.
905 */
906 if (!(fFlags & VMREQFLAGS_NO_WAIT))
907 rc = VMR3ReqWait(pReq, cMillies);
908 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
909 }
910 else if ( ( pReq->idDstCpu == VMCPUID_ANY
911 && !pUVCpu /* only EMT threads have a valid pointer stored in the TLS slot. */)
912 || pReq->idDstCpu == VMCPUID_ANY_QUEUE)
913 {
914 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
915
916 /* Note: pUVCpu may or may not be NULL in the VMCPUID_ANY_QUEUE case; we don't care. */
917
918 /*
919 * Insert it.
920 */
921 volatile PVMREQ *ppQueueHead = pReq->fFlags & VMREQFLAGS_PRIORITY ? &pUVM->vm.s.pPriorityReqs : &pUVM->vm.s.pNormalReqs;
922 pReq->enmState = VMREQSTATE_QUEUED;
923 PVMREQ pNext;
924 do
925 {
926 pNext = ASMAtomicUoReadPtrT(ppQueueHead, PVMREQ);
927 ASMAtomicWritePtr(&pReq->pNext, pNext);
928 ASMCompilerBarrier();
929 } while (!ASMAtomicCmpXchgPtr(ppQueueHead, pReq, pNext));
930
931 /*
932 * Notify EMT.
933 */
934 if (pUVM->pVM)
935 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
936 VMR3NotifyGlobalFFU(pUVM, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
937
938 /*
939 * Wait and return.
940 */
941 if (!(fFlags & VMREQFLAGS_NO_WAIT))
942 rc = VMR3ReqWait(pReq, cMillies);
943 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
944 }
945 else
946 {
947 Assert(pUVCpu);
948
949 /*
950 * The requester was an EMT, just execute it.
951 */
952 pReq->enmState = VMREQSTATE_QUEUED;
953 rc = vmR3ReqProcessOne(pReq);
954 LogFlow(("VMR3ReqQueue: returns %Rrc (processed)\n", rc));
955 }
956 return rc;
957}
958
959
960/**
961 * Wait for a request to be completed.
962 *
963 * @returns VBox status code.
964 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
965 *
966 * @param pReq The request to wait for.
967 * @param cMillies Number of milliseconds to wait.
968 * Use RT_INDEFINITE_WAIT to only wait till it's completed.
969 */
970VMMR3DECL(int) VMR3ReqWait(PVMREQ pReq, RTMSINTERVAL cMillies)
971{
972 LogFlow(("VMR3ReqWait: pReq=%p cMillies=%d\n", pReq, cMillies));
973
974 /*
975 * Verify the supplied package.
976 */
977 AssertMsgReturn( pReq->enmState == VMREQSTATE_QUEUED
978 || pReq->enmState == VMREQSTATE_PROCESSING
979 || pReq->enmState == VMREQSTATE_COMPLETED,
980 ("Invalid state %d\n", pReq->enmState),
981 VERR_VM_REQUEST_STATE);
982 AssertMsgReturn( RT_VALID_PTR(pReq->pUVM)
983 && pReq->EventSem != NIL_RTSEMEVENT,
984 ("Invalid request package! Anyone cooking their own packages???\n"),
985 VERR_VM_REQUEST_INVALID_PACKAGE);
986 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
987 && pReq->enmType < VMREQTYPE_MAX,
988 ("Invalid package type %d valid range %d-%d inclusively. This was verified on alloc too...\n",
989 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
990 VERR_VM_REQUEST_INVALID_TYPE);
991
992 /*
993 * Check for deadlock condition
994 */
995 PUVM pUVM = pReq->pUVM;
996 NOREF(pUVM);
997
998 /*
999 * Wait on the package.
1000 */
1001 int rc;
1002 if (cMillies != RT_INDEFINITE_WAIT)
1003 rc = RTSemEventWait(pReq->EventSem, cMillies);
1004 else
1005 {
1006 do
1007 {
1008 rc = RTSemEventWait(pReq->EventSem, RT_INDEFINITE_WAIT);
1009 Assert(rc != VERR_TIMEOUT);
1010 } while ( pReq->enmState != VMREQSTATE_COMPLETED
1011 && pReq->enmState != VMREQSTATE_INVALID);
1012 }
1013 if (RT_SUCCESS(rc))
1014 ASMAtomicXchgSize(&pReq->fEventSemClear, true);
1015 if (pReq->enmState == VMREQSTATE_COMPLETED)
1016 rc = VINF_SUCCESS;
1017 LogFlow(("VMR3ReqWait: returns %Rrc\n", rc));
1018 Assert(rc != VERR_INTERRUPTED);
1019 return rc;
1020}
1021
1022
1023/**
1024 * Sets the relevant FF.
1025 *
1026 * @param pUVM Pointer to the user mode VM structure.
1027 * @param idDstCpu VMCPUID_ANY or the ID of the current CPU.
1028 */
1029DECLINLINE(void) vmR3ReqSetFF(PUVM pUVM, VMCPUID idDstCpu)
1030{
1031 if (RT_LIKELY(pUVM->pVM))
1032 {
1033 if (idDstCpu == VMCPUID_ANY)
1034 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
1035 else
1036 VMCPU_FF_SET(pUVM->pVM->apCpusR3[idDstCpu], VMCPU_FF_REQUEST);
1037 }
1038}
1039
1040
1041/**
1042 * VMR3ReqProcessU helper that handles cases where there are more than one
1043 * pending request.
1044 *
1045 * @returns The oldest request.
1046 * @param pUVM Pointer to the user mode VM structure
1047 * @param idDstCpu VMCPUID_ANY or virtual CPU ID.
1048 * @param pReqList The list of requests.
1049 * @param ppReqs Pointer to the list head.
1050 */
1051static PVMREQ vmR3ReqProcessUTooManyHelper(PUVM pUVM, VMCPUID idDstCpu, PVMREQ pReqList, PVMREQ volatile *ppReqs)
1052{
1053 STAM_COUNTER_INC(&pUVM->vm.s.StatReqMoreThan1);
1054
1055 /*
1056 * Chop off the last one (pReq).
1057 */
1058 PVMREQ pPrev;
1059 PVMREQ pReqRet = pReqList;
1060 do
1061 {
1062 pPrev = pReqRet;
1063 pReqRet = pReqRet->pNext;
1064 } while (pReqRet->pNext);
1065 ASMAtomicWriteNullPtr(&pPrev->pNext);
1066
1067 /*
1068 * Push the others back onto the list (end of it).
1069 */
1070 Log2(("VMR3ReqProcess: Pushing back %p %p...\n", pReqList, pReqList->pNext));
1071 if (RT_UNLIKELY(!ASMAtomicCmpXchgPtr(ppReqs, pReqList, NULL)))
1072 {
1073 STAM_COUNTER_INC(&pUVM->vm.s.StatReqPushBackRaces);
1074 do
1075 {
1076 ASMNopPause();
1077 PVMREQ pReqList2 = ASMAtomicXchgPtrT(ppReqs, NULL, PVMREQ);
1078 if (pReqList2)
1079 {
1080 PVMREQ pLast = pReqList2;
1081 while (pLast->pNext)
1082 pLast = pLast->pNext;
1083 ASMAtomicWritePtr(&pLast->pNext, pReqList);
1084 pReqList = pReqList2;
1085 }
1086 } while (!ASMAtomicCmpXchgPtr(ppReqs, pReqList, NULL));
1087 }
1088
1089 vmR3ReqSetFF(pUVM, idDstCpu);
1090 return pReqRet;
1091}
1092
1093
1094/**
1095 * Process pending request(s).
1096 *
1097 * This function is called from a forced action handler in the EMT
1098 * or from one of the EMT loops.
1099 *
1100 * @returns VBox status code.
1101 *
1102 * @param pUVM Pointer to the user mode VM structure.
1103 * @param idDstCpu Pass VMCPUID_ANY to process the common request queue
1104 * and the CPU ID for a CPU specific one. In the latter
1105 * case the calling thread must be the EMT of that CPU.
1106 * @param fPriorityOnly When set, only process the priority request queue.
1107 *
1108 * @note SMP safe (multiple EMTs trying to satisfy VM_FF_REQUESTs).
1109 *
1110 * @remarks This was made reentrant for async PDM handling, the debugger and
1111 * others.
1112 * @internal
1113 */
1114VMMR3_INT_DECL(int) VMR3ReqProcessU(PUVM pUVM, VMCPUID idDstCpu, bool fPriorityOnly)
1115{
1116 LogFlow(("VMR3ReqProcessU: (enmVMState=%d) idDstCpu=%d\n", pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING, idDstCpu));
1117
1118 /*
1119 * Determine which queues to process.
1120 */
1121 PVMREQ volatile *ppNormalReqs;
1122 PVMREQ volatile *ppPriorityReqs;
1123 if (idDstCpu == VMCPUID_ANY)
1124 {
1125 ppPriorityReqs = &pUVM->vm.s.pPriorityReqs;
1126 ppNormalReqs = !fPriorityOnly ? &pUVM->vm.s.pNormalReqs : ppPriorityReqs;
1127 }
1128 else
1129 {
1130 Assert(idDstCpu < pUVM->cCpus);
1131 Assert(pUVM->aCpus[idDstCpu].vm.s.NativeThreadEMT == RTThreadNativeSelf());
1132 ppPriorityReqs = &pUVM->aCpus[idDstCpu].vm.s.pPriorityReqs;
1133 ppNormalReqs = !fPriorityOnly ? &pUVM->aCpus[idDstCpu].vm.s.pNormalReqs : ppPriorityReqs;
1134 }
1135
1136 /*
1137 * Process loop.
1138 *
1139 * We do not repeat the outer loop if we've got an informational status code
1140 * since that code needs processing by our caller (usually EM).
1141 */
1142 int rc = VINF_SUCCESS;
1143 for (;;)
1144 {
1145 /*
1146 * Get the pending requests.
1147 *
1148 * If there are more than one request, unlink the oldest and put the
1149 * rest back so that we're reentrant.
1150 */
1151 if (RT_LIKELY(pUVM->pVM))
1152 {
1153 if (idDstCpu == VMCPUID_ANY)
1154 VM_FF_CLEAR(pUVM->pVM, VM_FF_REQUEST);
1155 else
1156 VMCPU_FF_CLEAR(pUVM->pVM->apCpusR3[idDstCpu], VMCPU_FF_REQUEST);
1157 }
1158
1159 PVMREQ pReq = ASMAtomicXchgPtrT(ppPriorityReqs, NULL, PVMREQ);
1160 if (pReq)
1161 {
1162 if (RT_UNLIKELY(pReq->pNext))
1163 pReq = vmR3ReqProcessUTooManyHelper(pUVM, idDstCpu, pReq, ppPriorityReqs);
1164 else if (ASMAtomicReadPtrT(ppNormalReqs, PVMREQ))
1165 vmR3ReqSetFF(pUVM, idDstCpu);
1166 }
1167 else
1168 {
1169 pReq = ASMAtomicXchgPtrT(ppNormalReqs, NULL, PVMREQ);
1170 if (!pReq)
1171 break;
1172 if (RT_UNLIKELY(pReq->pNext))
1173 pReq = vmR3ReqProcessUTooManyHelper(pUVM, idDstCpu, pReq, ppNormalReqs);
1174 }
1175
1176 /*
1177 * Process the request
1178 */
1179 STAM_COUNTER_INC(&pUVM->vm.s.StatReqProcessed);
1180 int rc2 = vmR3ReqProcessOne(pReq);
1181 if ( rc2 >= VINF_EM_FIRST
1182 && rc2 <= VINF_EM_LAST)
1183 {
1184 rc = rc2;
1185 break;
1186 }
1187 }
1188
1189 LogFlow(("VMR3ReqProcess: returns %Rrc (enmVMState=%d)\n", rc, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
1190 return rc;
1191}
1192
1193
1194/**
1195 * Process one request.
1196 *
1197 * @returns VBox status code.
1198 *
1199 * @param pReq Request packet to process.
1200 */
1201static int vmR3ReqProcessOne(PVMREQ pReq)
1202{
1203 LogFlow(("vmR3ReqProcessOne: pReq=%p type=%d fFlags=%#x\n", pReq, pReq->enmType, pReq->fFlags));
1204
1205 /*
1206 * Process the request.
1207 */
1208 Assert(pReq->enmState == VMREQSTATE_QUEUED);
1209 pReq->enmState = VMREQSTATE_PROCESSING;
1210 int rcRet = VINF_SUCCESS; /* the return code of this function. */
1211 int rcReq = VERR_NOT_IMPLEMENTED; /* the request status. */
1212 switch (pReq->enmType)
1213 {
1214 /*
1215 * A packed down call frame.
1216 */
1217 case VMREQTYPE_INTERNAL:
1218 {
1219 uintptr_t *pauArgs = &pReq->u.Internal.aArgs[0];
1220 union
1221 {
1222 PFNRT pfn;
1223 DECLCALLBACKMEMBER(int, pfn00,(void));
1224 DECLCALLBACKMEMBER(int, pfn01,(uintptr_t));
1225 DECLCALLBACKMEMBER(int, pfn02,(uintptr_t, uintptr_t));
1226 DECLCALLBACKMEMBER(int, pfn03,(uintptr_t, uintptr_t, uintptr_t));
1227 DECLCALLBACKMEMBER(int, pfn04,(uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1228 DECLCALLBACKMEMBER(int, pfn05,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1229 DECLCALLBACKMEMBER(int, pfn06,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1230 DECLCALLBACKMEMBER(int, pfn07,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1231 DECLCALLBACKMEMBER(int, pfn08,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1232 DECLCALLBACKMEMBER(int, pfn09,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1233 DECLCALLBACKMEMBER(int, pfn10,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1234 DECLCALLBACKMEMBER(int, pfn11,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1235 DECLCALLBACKMEMBER(int, pfn12,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1236 DECLCALLBACKMEMBER(int, pfn13,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1237 DECLCALLBACKMEMBER(int, pfn14,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1238 DECLCALLBACKMEMBER(int, pfn15,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
1239 } u;
1240 u.pfn = pReq->u.Internal.pfn;
1241#ifndef RT_ARCH_X86
1242 switch (pReq->u.Internal.cArgs)
1243 {
1244 case 0: rcRet = u.pfn00(); break;
1245 case 1: rcRet = u.pfn01(pauArgs[0]); break;
1246 case 2: rcRet = u.pfn02(pauArgs[0], pauArgs[1]); break;
1247 case 3: rcRet = u.pfn03(pauArgs[0], pauArgs[1], pauArgs[2]); break;
1248 case 4: rcRet = u.pfn04(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3]); break;
1249 case 5: rcRet = u.pfn05(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4]); break;
1250 case 6: rcRet = u.pfn06(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5]); break;
1251 case 7: rcRet = u.pfn07(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6]); break;
1252 case 8: rcRet = u.pfn08(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7]); break;
1253 case 9: rcRet = u.pfn09(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8]); break;
1254 case 10: rcRet = u.pfn10(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9]); break;
1255 case 11: rcRet = u.pfn11(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10]); break;
1256 case 12: rcRet = u.pfn12(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11]); break;
1257 case 13: rcRet = u.pfn13(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12]); break;
1258 case 14: rcRet = u.pfn14(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12], pauArgs[13]); break;
1259 case 15: rcRet = u.pfn15(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12], pauArgs[13], pauArgs[14]); break;
1260 default:
1261 AssertReleaseMsgFailed(("cArgs=%d\n", pReq->u.Internal.cArgs));
1262 rcRet = rcReq = VERR_VM_REQUEST_TOO_MANY_ARGS_IPE;
1263 break;
1264 }
1265#else /* x86: */
1266 size_t cbArgs = pReq->u.Internal.cArgs * sizeof(uintptr_t);
1267# ifdef __GNUC__
1268 __asm__ __volatile__("movl %%esp, %%edx\n\t"
1269 "subl %2, %%esp\n\t"
1270 "andl $0xfffffff0, %%esp\n\t"
1271 "shrl $2, %2\n\t"
1272 "movl %%esp, %%edi\n\t"
1273 "rep movsl\n\t"
1274 "movl %%edx, %%edi\n\t"
1275 "call *%%eax\n\t"
1276 "mov %%edi, %%esp\n\t"
1277 : "=a" (rcRet),
1278 "=S" (pauArgs),
1279 "=c" (cbArgs)
1280 : "0" (u.pfn),
1281 "1" (pauArgs),
1282 "2" (cbArgs)
1283 : "edi", "edx");
1284# else
1285 __asm
1286 {
1287 xor edx, edx /* just mess it up. */
1288 mov eax, u.pfn
1289 mov ecx, cbArgs
1290 shr ecx, 2
1291 mov esi, pauArgs
1292 mov ebx, esp
1293 sub esp, cbArgs
1294 and esp, 0xfffffff0
1295 mov edi, esp
1296 rep movsd
1297 call eax
1298 mov esp, ebx
1299 mov rcRet, eax
1300 }
1301# endif
1302#endif /* x86 */
1303 if ((pReq->fFlags & (VMREQFLAGS_RETURN_MASK)) == VMREQFLAGS_VOID)
1304 rcRet = VINF_SUCCESS;
1305 rcReq = rcRet;
1306 break;
1307 }
1308
1309 default:
1310 AssertMsgFailed(("pReq->enmType=%d\n", pReq->enmType));
1311 rcReq = VERR_NOT_IMPLEMENTED;
1312 break;
1313 }
1314
1315 /*
1316 * Complete the request.
1317 */
1318 pReq->iStatus = rcReq;
1319 pReq->enmState = VMREQSTATE_COMPLETED;
1320 if (pReq->fFlags & VMREQFLAGS_NO_WAIT)
1321 {
1322 /* Free the packet, nobody is waiting. */
1323 LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - freeing it\n",
1324 pReq, rcReq, rcRet));
1325 VMR3ReqFree(pReq);
1326 }
1327 else
1328 {
1329 /* Notify the waiter and him free up the packet. */
1330 LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - notifying waiting thread\n",
1331 pReq, rcReq, rcRet));
1332 ASMAtomicXchgSize(&pReq->fEventSemClear, false);
1333 int rc2 = RTSemEventSignal(pReq->EventSem);
1334 if (RT_FAILURE(rc2))
1335 {
1336 AssertRC(rc2);
1337 rcRet = rc2;
1338 }
1339 }
1340
1341 return rcRet;
1342}
1343
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette