VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/reqqueue.cpp@ 88813

Last change on this file since 88813 was 88813, checked in by vboxsync, 4 years ago

IPRT/RTReq: Added a RTReqCancel function. Changed the behavior of the RTREQFLAGS_NO_WAIT to optionally return the request handle rather than obstinately returning NIL even when the phReq parameter is not NULL. This makes the usage clearer and allows canceling NO_WAIT requests. (Needed in DrvAudio.) bugref:9890

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 13.2 KB
Line 
1/* $Id: reqqueue.cpp 88813 2021-05-01 18:15:13Z vboxsync $ */
2/** @file
3 * IPRT - Request Queue.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include <iprt/req.h>
32#include "internal/iprt.h"
33
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36#include <iprt/err.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/semaphore.h>
40#include <iprt/thread.h>
41#include <iprt/log.h>
42#include <iprt/mem.h>
43
44#include "internal/req.h"
45#include "internal/magics.h"
46
47
48
49RTDECL(int) RTReqQueueCreate(RTREQQUEUE *phQueue)
50{
51 PRTREQQUEUEINT pQueue = (PRTREQQUEUEINT)RTMemAllocZ(sizeof(RTREQQUEUEINT));
52 if (!pQueue)
53 return VERR_NO_MEMORY;
54 int rc = RTSemEventCreate(&pQueue->EventSem);
55 if (RT_SUCCESS(rc))
56 {
57 pQueue->u32Magic = RTREQQUEUE_MAGIC;
58
59 *phQueue = pQueue;
60 return VINF_SUCCESS;
61 }
62
63 RTMemFree(pQueue);
64 return rc;
65}
66RT_EXPORT_SYMBOL(RTReqQueueCreate);
67
68
69RTDECL(int) RTReqQueueDestroy(RTREQQUEUE hQueue)
70{
71 /*
72 * Check input.
73 */
74 if (hQueue == NIL_RTREQQUEUE)
75 return VINF_SUCCESS;
76 PRTREQQUEUEINT pQueue = hQueue;
77 AssertPtrReturn(pQueue, VERR_INVALID_HANDLE);
78 AssertReturn(ASMAtomicCmpXchgU32(&pQueue->u32Magic, RTREQQUEUE_MAGIC_DEAD, RTREQQUEUE_MAGIC), VERR_INVALID_HANDLE);
79
80 RTSemEventDestroy(pQueue->EventSem);
81 pQueue->EventSem = NIL_RTSEMEVENT;
82
83 for (unsigned i = 0; i < RT_ELEMENTS(pQueue->apReqFree); i++)
84 {
85 PRTREQ pReq = (PRTREQ)ASMAtomicXchgPtr((void **)&pQueue->apReqFree[i], NULL);
86 while (pReq)
87 {
88 PRTREQ pNext = pReq->pNext;
89 rtReqFreeIt(pReq);
90 pReq = pNext;
91 }
92 }
93
94 RTMemFree(pQueue);
95 return VINF_SUCCESS;
96}
97RT_EXPORT_SYMBOL(RTReqQueueDestroy);
98
99
100RTDECL(int) RTReqQueueProcess(RTREQQUEUE hQueue, RTMSINTERVAL cMillies)
101{
102 LogFlow(("RTReqQueueProcess %x\n", hQueue));
103
104 /*
105 * Check input.
106 */
107 PRTREQQUEUEINT pQueue = hQueue;
108 AssertPtrReturn(pQueue, VERR_INVALID_HANDLE);
109 AssertReturn(pQueue->u32Magic == RTREQQUEUE_MAGIC, VERR_INVALID_HANDLE);
110
111 /*
112 * Process loop. Stop (break) after the first non-VINF_SUCCESS status code.
113 */
114 int rc = VINF_SUCCESS;
115 for (;;)
116 {
117 /*
118 * Get pending requests.
119 */
120 PRTREQ pReqs = ASMAtomicXchgPtrT(&pQueue->pAlreadyPendingReqs, NULL, PRTREQ);
121 if (RT_LIKELY(!pReqs))
122 {
123 pReqs = ASMAtomicXchgPtrT(&pQueue->pReqs, NULL, PRTREQ);
124 if (!pReqs)
125 {
126 /* We do not adjust cMillies (documented behavior). */
127 ASMAtomicWriteBool(&pQueue->fBusy, false); /* this aint 100% perfect, but it's good enough for now... */
128 rc = RTSemEventWait(pQueue->EventSem, cMillies);
129 if (rc != VINF_SUCCESS)
130 break;
131 continue;
132 }
133
134 ASMAtomicWriteBool(&pQueue->fBusy, true);
135
136 /*
137 * Reverse the list to process it in FIFO order.
138 */
139 PRTREQ pReq = pReqs;
140 if (pReq->pNext)
141 Log2(("RTReqQueueProcess: 2+ requests: %p %p %p\n", pReq, pReq->pNext, pReq->pNext->pNext));
142 pReqs = NULL;
143 while (pReq)
144 {
145 Assert(pReq->enmState == RTREQSTATE_QUEUED);
146 Assert(pReq->uOwner.hQueue == pQueue);
147 PRTREQ pCur = pReq;
148 pReq = pReq->pNext;
149 pCur->pNext = pReqs;
150 pReqs = pCur;
151 }
152
153 }
154 else
155 ASMAtomicWriteBool(&pQueue->fBusy, true);
156
157 /*
158 * Process the requests.
159 */
160 while (pReqs)
161 {
162 /* Unchain the first request and advance the list. */
163 PRTREQ pReq = pReqs;
164 pReqs = pReqs->pNext;
165 pReq->pNext = NULL;
166
167 /* Process the request. */
168 rc = rtReqProcessOne(pReq);
169 if (rc != VINF_SUCCESS)
170 {
171 /* Propagate the return code to caller. If more requests pending, queue them for later. */
172 if (pReqs)
173 {
174 pReqs = ASMAtomicXchgPtrT(&pQueue->pAlreadyPendingReqs, pReqs, PRTREQ);
175 Assert(!pReqs);
176 }
177 break;
178 }
179 }
180 if (rc != VINF_SUCCESS)
181 break;
182 }
183
184 LogFlow(("RTReqQueueProcess: returns %Rrc\n", rc));
185 return rc;
186}
187RT_EXPORT_SYMBOL(RTReqQueueProcess);
188
189
190RTDECL(int) RTReqQueueCall(RTREQQUEUE hQueue, PRTREQ *ppReq, RTMSINTERVAL cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
191{
192 va_list va;
193 va_start(va, cArgs);
194 int rc = RTReqQueueCallV(hQueue, ppReq, cMillies, RTREQFLAGS_IPRT_STATUS, pfnFunction, cArgs, va);
195 va_end(va);
196 return rc;
197}
198RT_EXPORT_SYMBOL(RTReqQueueCall);
199
200
201RTDECL(int) RTReqQueueCallVoid(RTREQQUEUE hQueue, PRTREQ *ppReq, RTMSINTERVAL cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
202{
203 va_list va;
204 va_start(va, cArgs);
205 int rc = RTReqQueueCallV(hQueue, ppReq, cMillies, RTREQFLAGS_VOID, pfnFunction, cArgs, va);
206 va_end(va);
207 return rc;
208}
209RT_EXPORT_SYMBOL(RTReqQueueCallVoid);
210
211
212RTDECL(int) RTReqQueueCallEx(RTREQQUEUE hQueue, PRTREQ *ppReq, RTMSINTERVAL cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
213{
214 va_list va;
215 va_start(va, cArgs);
216 int rc = RTReqQueueCallV(hQueue, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
217 va_end(va);
218 return rc;
219}
220RT_EXPORT_SYMBOL(RTReqQueueCallEx);
221
222
223RTDECL(int) RTReqQueueCallV(RTREQQUEUE hQueue, PRTREQ *ppReq, RTMSINTERVAL cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, va_list Args)
224{
225 LogFlow(("RTReqQueueCallV: cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", cMillies, fFlags, pfnFunction, cArgs));
226
227 /*
228 * Check input.
229 */
230 PRTREQQUEUEINT pQueue = hQueue;
231 AssertPtrReturn(pQueue, VERR_INVALID_HANDLE);
232 AssertReturn(pQueue->u32Magic == RTREQQUEUE_MAGIC, VERR_INVALID_HANDLE);
233 AssertPtrReturn(pfnFunction, VERR_INVALID_POINTER);
234 AssertReturn(!(fFlags & ~(RTREQFLAGS_RETURN_MASK | RTREQFLAGS_NO_WAIT)), VERR_INVALID_PARAMETER);
235
236 if (!(fFlags & RTREQFLAGS_NO_WAIT) || ppReq)
237 {
238 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
239 *ppReq = NIL_RTREQ;
240 }
241
242 PRTREQ pReq = NULL;
243 AssertMsgReturn(cArgs * sizeof(uintptr_t) <= sizeof(pReq->u.Internal.aArgs), ("cArgs=%u\n", cArgs), VERR_TOO_MUCH_DATA);
244
245 /*
246 * Allocate request
247 */
248 int rc = RTReqQueueAlloc(pQueue, RTREQTYPE_INTERNAL, &pReq);
249 if (rc != VINF_SUCCESS)
250 return rc;
251
252 /*
253 * Initialize the request data.
254 */
255 pReq->fFlags = fFlags;
256 pReq->u.Internal.pfn = pfnFunction;
257 pReq->u.Internal.cArgs = cArgs;
258 for (unsigned iArg = 0; iArg < cArgs; iArg++)
259 pReq->u.Internal.aArgs[iArg] = va_arg(Args, uintptr_t);
260
261 /*
262 * Queue the request and return.
263 */
264 rc = RTReqSubmit(pReq, cMillies);
265 if ( rc != VINF_SUCCESS
266 && rc != VERR_TIMEOUT)
267 {
268 RTReqRelease(pReq);
269 pReq = NULL;
270 }
271 if (ppReq)
272 {
273 *ppReq = pReq;
274 LogFlow(("RTReqQueueCallV: returns %Rrc *ppReq=%p\n", rc, pReq));
275 }
276 else
277 {
278 RTReqRelease(pReq);
279 LogFlow(("RTReqQueueCallV: returns %Rrc\n", rc));
280 }
281 Assert(rc != VERR_INTERRUPTED);
282 return rc;
283}
284RT_EXPORT_SYMBOL(RTReqQueueCallV);
285
286
287RTDECL(bool) RTReqQueueIsBusy(RTREQQUEUE hQueue)
288{
289 PRTREQQUEUEINT pQueue = hQueue;
290 AssertPtrReturn(pQueue, false);
291
292 if (ASMAtomicReadBool(&pQueue->fBusy))
293 return true;
294 if (ASMAtomicReadPtrT(&pQueue->pReqs, PRTREQ) != NULL)
295 return true;
296 if (ASMAtomicReadBool(&pQueue->fBusy))
297 return true;
298 return false;
299}
300RT_EXPORT_SYMBOL(RTReqQueueIsBusy);
301
302
303/**
304 * Joins the list pList with whatever is linked up at *pHead.
305 */
306static void vmr3ReqJoinFreeSub(volatile PRTREQ *ppHead, PRTREQ pList)
307{
308 for (unsigned cIterations = 0;; cIterations++)
309 {
310 PRTREQ pHead = ASMAtomicXchgPtrT(ppHead, pList, PRTREQ);
311 if (!pHead)
312 return;
313 PRTREQ pTail = pHead;
314 while (pTail->pNext)
315 pTail = pTail->pNext;
316 pTail->pNext = pList;
317 if (ASMAtomicCmpXchgPtr(ppHead, pHead, pList))
318 return;
319 pTail->pNext = NULL;
320 if (ASMAtomicCmpXchgPtr(ppHead, pHead, NULL))
321 return;
322 pList = pHead;
323 Assert(cIterations != 32);
324 Assert(cIterations != 64);
325 }
326}
327
328
329/**
330 * Joins the list pList with whatever is linked up at *pHead.
331 */
332static void vmr3ReqJoinFree(PRTREQQUEUEINT pQueue, PRTREQ pList)
333{
334 /*
335 * Split the list if it's too long.
336 */
337 unsigned cReqs = 1;
338 PRTREQ pTail = pList;
339 while (pTail->pNext)
340 {
341 if (cReqs++ > 25)
342 {
343 const uint32_t i = pQueue->iReqFree;
344 vmr3ReqJoinFreeSub(&pQueue->apReqFree[(i + 2) % RT_ELEMENTS(pQueue->apReqFree)], pTail->pNext);
345
346 pTail->pNext = NULL;
347 vmr3ReqJoinFreeSub(&pQueue->apReqFree[(i + 2 + (i == pQueue->iReqFree)) % RT_ELEMENTS(pQueue->apReqFree)], pTail->pNext);
348 return;
349 }
350 pTail = pTail->pNext;
351 }
352 vmr3ReqJoinFreeSub(&pQueue->apReqFree[(pQueue->iReqFree + 2) % RT_ELEMENTS(pQueue->apReqFree)], pList);
353}
354
355
356RTDECL(int) RTReqQueueAlloc(RTREQQUEUE hQueue, RTREQTYPE enmType, PRTREQ *phReq)
357{
358 /*
359 * Validate input.
360 */
361 PRTREQQUEUEINT pQueue = hQueue;
362 AssertPtrReturn(pQueue, VERR_INVALID_HANDLE);
363 AssertReturn(pQueue->u32Magic == RTREQQUEUE_MAGIC, VERR_INVALID_HANDLE);
364 AssertMsgReturn(enmType > RTREQTYPE_INVALID && enmType < RTREQTYPE_MAX, ("%d\n", enmType), VERR_RT_REQUEST_INVALID_TYPE);
365
366 /*
367 * Try get a recycled packet.
368 *
369 * While this could all be solved with a single list with a lock, it's a sport
370 * of mine to avoid locks.
371 */
372 int cTries = RT_ELEMENTS(pQueue->apReqFree) * 2;
373 while (--cTries >= 0)
374 {
375 PRTREQ volatile *ppHead = &pQueue->apReqFree[ASMAtomicIncU32(&pQueue->iReqFree) % RT_ELEMENTS(pQueue->apReqFree)];
376 PRTREQ pReq = ASMAtomicXchgPtrT(ppHead, NULL, PRTREQ);
377 if (pReq)
378 {
379 PRTREQ pNext = pReq->pNext;
380 if ( pNext
381 && !ASMAtomicCmpXchgPtr(ppHead, pNext, NULL))
382 vmr3ReqJoinFree(pQueue, pReq->pNext);
383 ASMAtomicDecU32(&pQueue->cReqFree);
384
385 Assert(pReq->uOwner.hQueue == pQueue);
386 Assert(!pReq->fPoolOrQueue);
387
388 int rc = rtReqReInit(pReq, enmType);
389 if (RT_SUCCESS(rc))
390 {
391 *phReq = pReq;
392 LogFlow(("RTReqQueueAlloc: returns VINF_SUCCESS *phReq=%p recycled\n", pReq));
393 return VINF_SUCCESS;
394 }
395 }
396 }
397
398 /*
399 * Ok, allocate a new one.
400 */
401 int rc = rtReqAlloc(enmType, false /*fPoolOrQueue*/, pQueue, phReq);
402 LogFlow(("RTReqQueueAlloc: returns %Rrc *phReq=%p\n", rc, *phReq));
403 return rc;
404}
405RT_EXPORT_SYMBOL(RTReqQueueAlloc);
406
407
408/**
409 * Recycles a requst.
410 *
411 * @returns true if recycled, false if it should be freed.
412 * @param pQueue The queue.
413 * @param pReq The request.
414 */
415DECLHIDDEN(bool) rtReqQueueRecycle(PRTREQQUEUEINT pQueue, PRTREQINT pReq)
416{
417 if ( !pQueue
418 || pQueue->cReqFree >= 128)
419 return false;
420
421 ASMAtomicIncU32(&pQueue->cReqFree);
422 PRTREQ volatile *ppHead = &pQueue->apReqFree[ASMAtomicIncU32(&pQueue->iReqFree) % RT_ELEMENTS(pQueue->apReqFree)];
423 PRTREQ pNext;
424 do
425 {
426 pNext = *ppHead;
427 ASMAtomicWritePtr(&pReq->pNext, pNext);
428 } while (!ASMAtomicCmpXchgPtr(ppHead, pReq, pNext));
429
430 return true;
431}
432
433
434/**
435 * Submits a request to the queue.
436 *
437 * @param pQueue The queue.
438 * @param pReq The request.
439 */
440DECLHIDDEN(void) rtReqQueueSubmit(PRTREQQUEUEINT pQueue, PRTREQINT pReq)
441{
442 PRTREQ pNext;
443 do
444 {
445 pNext = pQueue->pReqs;
446 pReq->pNext = pNext;
447 ASMAtomicWriteBool(&pQueue->fBusy, true);
448 } while (!ASMAtomicCmpXchgPtr(&pQueue->pReqs, pReq, pNext));
449
450 /*
451 * Notify queue thread.
452 */
453 RTSemEventSignal(pQueue->EventSem);
454}
455
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette