VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp@ 78126

Last change on this file since 78126 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 66.4 KB
Line 
1/* $Id: PDMAsyncCompletionFileNormal.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * PDM Async I/O - Async File I/O manager.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
23#include <iprt/types.h>
24#include <iprt/asm.h>
25#include <iprt/file.h>
26#include <iprt/mem.h>
27#include <iprt/string.h>
28#include <iprt/assert.h>
29#include <VBox/log.h>
30
31#include "PDMAsyncCompletionFileInternal.h"
32
33/** The update period for the I/O load statistics in ms. */
34#define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 1000
35/** Maximum number of requests a manager will handle. */
36#define PDMACEPFILEMGR_REQS_STEP 64
37
38
39/*********************************************************************************************************************************
40* Internal functions *
41*********************************************************************************************************************************/
42static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
43 PPDMACEPFILEMGR pAioMgr,
44 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
45
46static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
47 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
48 PPDMACFILERANGELOCK pRangeLock);
49
50static void pdmacFileAioMgrNormalReqCompleteRc(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq,
51 int rc, size_t cbTransfered);
52
53
54int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr)
55{
56 pAioMgr->cRequestsActiveMax = PDMACEPFILEMGR_REQS_STEP;
57
58 int rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS, 0 /* fFlags */);
59 if (rc == VERR_OUT_OF_RANGE)
60 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, pAioMgr->cRequestsActiveMax, 0 /* fFlags */);
61
62 if (RT_SUCCESS(rc))
63 {
64 /* Initialize request handle array. */
65 pAioMgr->iFreeEntry = 0;
66 pAioMgr->cReqEntries = pAioMgr->cRequestsActiveMax;
67 pAioMgr->pahReqsFree = (RTFILEAIOREQ *)RTMemAllocZ(pAioMgr->cReqEntries * sizeof(RTFILEAIOREQ));
68
69 if (pAioMgr->pahReqsFree)
70 {
71 /* Create the range lock memcache. */
72 rc = RTMemCacheCreate(&pAioMgr->hMemCacheRangeLocks, sizeof(PDMACFILERANGELOCK),
73 0, UINT32_MAX, NULL, NULL, NULL, 0);
74 if (RT_SUCCESS(rc))
75 return VINF_SUCCESS;
76
77 RTMemFree(pAioMgr->pahReqsFree);
78 }
79 else
80 {
81 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
82 rc = VERR_NO_MEMORY;
83 }
84 }
85
86 return rc;
87}
88
89void pdmacFileAioMgrNormalDestroy(PPDMACEPFILEMGR pAioMgr)
90{
91 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
92
93 while (pAioMgr->iFreeEntry > 0)
94 {
95 pAioMgr->iFreeEntry--;
96 Assert(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] != NIL_RTFILEAIOREQ);
97 RTFileAioReqDestroy(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry]);
98 }
99
100 RTMemFree(pAioMgr->pahReqsFree);
101 RTMemCacheDestroy(pAioMgr->hMemCacheRangeLocks);
102}
103
104#if 0 /* currently unused */
105/**
106 * Sorts the endpoint list with insertion sort.
107 */
108static void pdmacFileAioMgrNormalEndpointsSortByLoad(PPDMACEPFILEMGR pAioMgr)
109{
110 PPDMASYNCCOMPLETIONENDPOINTFILE pEpPrev, pEpCurr, pEpNextToSort;
111
112 pEpPrev = pAioMgr->pEndpointsHead;
113 pEpCurr = pEpPrev->AioMgr.pEndpointNext;
114
115 while (pEpCurr)
116 {
117 /* Remember the next element to sort because the list might change. */
118 pEpNextToSort = pEpCurr->AioMgr.pEndpointNext;
119
120 /* Unlink the current element from the list. */
121 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
122 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
123
124 if (pPrev)
125 pPrev->AioMgr.pEndpointNext = pNext;
126 else
127 pAioMgr->pEndpointsHead = pNext;
128
129 if (pNext)
130 pNext->AioMgr.pEndpointPrev = pPrev;
131
132 /* Go back until we reached the place to insert the current endpoint into. */
133 while (pEpPrev && (pEpPrev->AioMgr.cReqsPerSec < pEpCurr->AioMgr.cReqsPerSec))
134 pEpPrev = pEpPrev->AioMgr.pEndpointPrev;
135
136 /* Link the endpoint into the list. */
137 if (pEpPrev)
138 pNext = pEpPrev->AioMgr.pEndpointNext;
139 else
140 pNext = pAioMgr->pEndpointsHead;
141
142 pEpCurr->AioMgr.pEndpointNext = pNext;
143 pEpCurr->AioMgr.pEndpointPrev = pEpPrev;
144
145 if (pNext)
146 pNext->AioMgr.pEndpointPrev = pEpCurr;
147
148 if (pEpPrev)
149 pEpPrev->AioMgr.pEndpointNext = pEpCurr;
150 else
151 pAioMgr->pEndpointsHead = pEpCurr;
152
153 pEpCurr = pEpNextToSort;
154 }
155
156#ifdef DEBUG
157 /* Validate sorting algorithm */
158 unsigned cEndpoints = 0;
159 pEpCurr = pAioMgr->pEndpointsHead;
160
161 AssertMsg(pEpCurr, ("No endpoint in the list?\n"));
162 AssertMsg(!pEpCurr->AioMgr.pEndpointPrev, ("First element in the list points to previous element\n"));
163
164 while (pEpCurr)
165 {
166 cEndpoints++;
167
168 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
169 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
170
171 Assert(!pNext || pNext->AioMgr.cReqsPerSec <= pEpCurr->AioMgr.cReqsPerSec);
172 Assert(!pPrev || pPrev->AioMgr.cReqsPerSec >= pEpCurr->AioMgr.cReqsPerSec);
173
174 pEpCurr = pNext;
175 }
176
177 AssertMsg(cEndpoints == pAioMgr->cEndpoints, ("Endpoints lost during sort!\n"));
178
179#endif
180}
181#endif /* currently unused */
182
183/**
184 * Removes an endpoint from the currently assigned manager.
185 *
186 * @returns TRUE if there are still requests pending on the current manager for this endpoint.
187 * FALSE otherwise.
188 * @param pEndpointRemove The endpoint to remove.
189 */
190static bool pdmacFileAioMgrNormalRemoveEndpoint(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove)
191{
192 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev;
193 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext;
194 PPDMACEPFILEMGR pAioMgr = pEndpointRemove->pAioMgr;
195
196 pAioMgr->cEndpoints--;
197
198 if (pPrev)
199 pPrev->AioMgr.pEndpointNext = pNext;
200 else
201 pAioMgr->pEndpointsHead = pNext;
202
203 if (pNext)
204 pNext->AioMgr.pEndpointPrev = pPrev;
205
206 /* Make sure that there is no request pending on this manager for the endpoint. */
207 if (!pEndpointRemove->AioMgr.cRequestsActive)
208 {
209 Assert(!pEndpointRemove->pFlushReq);
210
211 /* Reopen the file so that the new endpoint can re-associate with the file */
212 RTFileClose(pEndpointRemove->hFile);
213 int rc = RTFileOpen(&pEndpointRemove->hFile, pEndpointRemove->Core.pszUri, pEndpointRemove->fFlags);
214 AssertRC(rc);
215 return false;
216 }
217
218 return true;
219}
220
221#if 0 /* currently unused */
222
223static bool pdmacFileAioMgrNormalIsBalancePossible(PPDMACEPFILEMGR pAioMgr)
224{
225 /* Balancing doesn't make sense with only one endpoint. */
226 if (pAioMgr->cEndpoints == 1)
227 return false;
228
229 /* Doesn't make sens to move endpoints if only one produces the whole load */
230 unsigned cEndpointsWithLoad = 0;
231
232 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
233
234 while (pCurr)
235 {
236 if (pCurr->AioMgr.cReqsPerSec)
237 cEndpointsWithLoad++;
238
239 pCurr = pCurr->AioMgr.pEndpointNext;
240 }
241
242 return (cEndpointsWithLoad > 1);
243}
244
245/**
246 * Creates a new I/O manager and spreads the I/O load of the endpoints
247 * between the given I/O manager and the new one.
248 *
249 * @returns nothing.
250 * @param pAioMgr The I/O manager with high I/O load.
251 */
252static void pdmacFileAioMgrNormalBalanceLoad(PPDMACEPFILEMGR pAioMgr)
253{
254 /*
255 * Check if balancing would improve the situation.
256 */
257 if (pdmacFileAioMgrNormalIsBalancePossible(pAioMgr))
258 {
259 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
260 PPDMACEPFILEMGR pAioMgrNew = NULL;
261
262 int rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgrNew, PDMACEPFILEMGRTYPE_ASYNC);
263 if (RT_SUCCESS(rc))
264 {
265 /* We will sort the list by request count per second. */
266 pdmacFileAioMgrNormalEndpointsSortByLoad(pAioMgr);
267
268 /* Now move some endpoints to the new manager. */
269 unsigned cReqsHere = pAioMgr->pEndpointsHead->AioMgr.cReqsPerSec;
270 unsigned cReqsOther = 0;
271 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead->AioMgr.pEndpointNext;
272
273 while (pCurr)
274 {
275 if (cReqsHere <= cReqsOther)
276 {
277 /*
278 * The other manager has more requests to handle now.
279 * We will keep the current endpoint.
280 */
281 Log(("Keeping endpoint %#p{%s} with %u reqs/s\n", pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
282 cReqsHere += pCurr->AioMgr.cReqsPerSec;
283 pCurr = pCurr->AioMgr.pEndpointNext;
284 }
285 else
286 {
287 /* Move to other endpoint. */
288 Log(("Moving endpoint %#p{%s} with %u reqs/s to other manager\n", pCurr, pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
289 cReqsOther += pCurr->AioMgr.cReqsPerSec;
290
291 PPDMASYNCCOMPLETIONENDPOINTFILE pMove = pCurr;
292
293 pCurr = pCurr->AioMgr.pEndpointNext;
294
295 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pMove);
296
297 if (fReqsPending)
298 {
299 pMove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
300 pMove->AioMgr.fMoving = true;
301 pMove->AioMgr.pAioMgrDst = pAioMgrNew;
302 }
303 else
304 {
305 pMove->AioMgr.fMoving = false;
306 pMove->AioMgr.pAioMgrDst = NULL;
307 pdmacFileAioMgrAddEndpoint(pAioMgrNew, pMove);
308 }
309 }
310 }
311 }
312 else
313 {
314 /* Don't process further but leave a log entry about reduced performance. */
315 LogRel(("AIOMgr: Could not create new I/O manager (rc=%Rrc). Expect reduced performance\n", rc));
316 }
317 }
318 else
319 Log(("AIOMgr: Load balancing would not improve anything\n"));
320}
321
322#endif /* unused */
323
324/**
325 * Increase the maximum number of active requests for the given I/O manager.
326 *
327 * @returns VBox status code.
328 * @param pAioMgr The I/O manager to grow.
329 */
330static int pdmacFileAioMgrNormalGrow(PPDMACEPFILEMGR pAioMgr)
331{
332 LogFlowFunc(("pAioMgr=%#p\n", pAioMgr));
333
334 AssertMsg( pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING
335 && !pAioMgr->cRequestsActive,
336 ("Invalid state of the I/O manager\n"));
337
338#ifdef RT_OS_WINDOWS
339 /*
340 * Reopen the files of all assigned endpoints first so we can assign them to the new
341 * I/O context.
342 */
343 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
344
345 while (pCurr)
346 {
347 RTFileClose(pCurr->hFile);
348 int rc2 = RTFileOpen(&pCurr->hFile, pCurr->Core.pszUri, pCurr->fFlags); AssertRC(rc2);
349
350 pCurr = pCurr->AioMgr.pEndpointNext;
351 }
352#endif
353
354 /* Create the new bigger context. */
355 pAioMgr->cRequestsActiveMax += PDMACEPFILEMGR_REQS_STEP;
356
357 RTFILEAIOCTX hAioCtxNew = NIL_RTFILEAIOCTX;
358 int rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS, 0 /* fFlags */);
359 if (rc == VERR_OUT_OF_RANGE)
360 rc = RTFileAioCtxCreate(&hAioCtxNew, pAioMgr->cRequestsActiveMax, 0 /* fFlags */);
361
362 if (RT_SUCCESS(rc))
363 {
364 /* Close the old context. */
365 rc = RTFileAioCtxDestroy(pAioMgr->hAioCtx);
366 AssertRC(rc); /** @todo r=bird: Ignoring error code, will propagate. */
367
368 pAioMgr->hAioCtx = hAioCtxNew;
369
370 /* Create a new I/O task handle array */
371 uint32_t cReqEntriesNew = pAioMgr->cRequestsActiveMax + 1;
372 RTFILEAIOREQ *pahReqNew = (RTFILEAIOREQ *)RTMemAllocZ(cReqEntriesNew * sizeof(RTFILEAIOREQ));
373
374 if (pahReqNew)
375 {
376 /* Copy the cached request handles. */
377 for (uint32_t iReq = 0; iReq < pAioMgr->cReqEntries; iReq++)
378 pahReqNew[iReq] = pAioMgr->pahReqsFree[iReq];
379
380 RTMemFree(pAioMgr->pahReqsFree);
381 pAioMgr->pahReqsFree = pahReqNew;
382 pAioMgr->cReqEntries = cReqEntriesNew;
383 LogFlowFunc(("I/O manager increased to handle a maximum of %u requests\n",
384 pAioMgr->cRequestsActiveMax));
385 }
386 else
387 rc = VERR_NO_MEMORY;
388 }
389
390#ifdef RT_OS_WINDOWS
391 /* Assign the file to the new context. */
392 pCurr = pAioMgr->pEndpointsHead;
393 while (pCurr)
394 {
395 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pCurr->hFile);
396 AssertRC(rc); /** @todo r=bird: Ignoring error code, will propagate. */
397
398 pCurr = pCurr->AioMgr.pEndpointNext;
399 }
400#endif
401
402 if (RT_FAILURE(rc))
403 {
404 LogFlow(("Increasing size of the I/O manager failed with rc=%Rrc\n", rc));
405 pAioMgr->cRequestsActiveMax -= PDMACEPFILEMGR_REQS_STEP;
406 }
407
408 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
409 LogFlowFunc(("returns rc=%Rrc\n", rc));
410
411 return rc;
412}
413
414/**
415 * Checks if a given status code is fatal.
416 * Non fatal errors can be fixed by migrating the endpoint to a
417 * failsafe manager.
418 *
419 * @returns true If the error is fatal and migrating to a failsafe manager doesn't help
420 * false If the error can be fixed by a migration. (image on NFS disk for example)
421 * @param rcReq The status code to check.
422 */
423DECLINLINE(bool) pdmacFileAioMgrNormalRcIsFatal(int rcReq)
424{
425 return rcReq == VERR_DEV_IO_ERROR
426 || rcReq == VERR_FILE_IO_ERROR
427 || rcReq == VERR_DISK_IO_ERROR
428 || rcReq == VERR_DISK_FULL
429 || rcReq == VERR_FILE_TOO_BIG;
430}
431
432/**
433 * Error handler which will create the failsafe managers and destroy the failed I/O manager.
434 *
435 * @returns VBox status code
436 * @param pAioMgr The I/O manager the error occurred on.
437 * @param rc The error code.
438 * @param SRC_POS The source location of the error (use RT_SRC_POS).
439 */
440static int pdmacFileAioMgrNormalErrorHandler(PPDMACEPFILEMGR pAioMgr, int rc, RT_SRC_POS_DECL)
441{
442 LogRel(("AIOMgr: I/O manager %#p encountered a critical error (rc=%Rrc) during operation. Falling back to failsafe mode. Expect reduced performance\n",
443 pAioMgr, rc));
444 LogRel(("AIOMgr: Error happened in %s:(%u){%s}\n", RT_SRC_POS_ARGS));
445 LogRel(("AIOMgr: Please contact the product vendor\n"));
446
447 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
448
449 pAioMgr->enmState = PDMACEPFILEMGRSTATE_FAULT;
450 ASMAtomicWriteU32((volatile uint32_t *)&pEpClassFile->enmMgrTypeOverride, PDMACEPFILEMGRTYPE_SIMPLE);
451
452 AssertMsgFailed(("Implement\n"));
453 return VINF_SUCCESS;
454}
455
456/**
457 * Put a list of tasks in the pending request list of an endpoint.
458 */
459DECLINLINE(void) pdmacFileAioMgrEpAddTaskList(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTaskHead)
460{
461 /* Add the rest of the tasks to the pending list */
462 if (!pEndpoint->AioMgr.pReqsPendingHead)
463 {
464 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
465 pEndpoint->AioMgr.pReqsPendingHead = pTaskHead;
466 }
467 else
468 {
469 Assert(pEndpoint->AioMgr.pReqsPendingTail);
470 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTaskHead;
471 }
472
473 /* Update the tail. */
474 while (pTaskHead->pNext)
475 pTaskHead = pTaskHead->pNext;
476
477 pEndpoint->AioMgr.pReqsPendingTail = pTaskHead;
478 pTaskHead->pNext = NULL;
479}
480
481/**
482 * Put one task in the pending request list of an endpoint.
483 */
484DECLINLINE(void) pdmacFileAioMgrEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask)
485{
486 /* Add the rest of the tasks to the pending list */
487 if (!pEndpoint->AioMgr.pReqsPendingHead)
488 {
489 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
490 pEndpoint->AioMgr.pReqsPendingHead = pTask;
491 }
492 else
493 {
494 Assert(pEndpoint->AioMgr.pReqsPendingTail);
495 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTask;
496 }
497
498 pEndpoint->AioMgr.pReqsPendingTail = pTask;
499 pTask->pNext = NULL;
500}
501
502/**
503 * Allocates a async I/O request.
504 *
505 * @returns Handle to the request.
506 * @param pAioMgr The I/O manager.
507 */
508static RTFILEAIOREQ pdmacFileAioMgrNormalRequestAlloc(PPDMACEPFILEMGR pAioMgr)
509{
510 /* Get a request handle. */
511 RTFILEAIOREQ hReq;
512 if (pAioMgr->iFreeEntry > 0)
513 {
514 pAioMgr->iFreeEntry--;
515 hReq = pAioMgr->pahReqsFree[pAioMgr->iFreeEntry];
516 pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] = NIL_RTFILEAIOREQ;
517 Assert(hReq != NIL_RTFILEAIOREQ);
518 }
519 else
520 {
521 int rc = RTFileAioReqCreate(&hReq);
522 AssertRCReturn(rc, NIL_RTFILEAIOREQ);
523 }
524
525 return hReq;
526}
527
528/**
529 * Frees a async I/O request handle.
530 *
531 * @returns nothing.
532 * @param pAioMgr The I/O manager.
533 * @param hReq The I/O request handle to free.
534 */
535static void pdmacFileAioMgrNormalRequestFree(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
536{
537 Assert(pAioMgr->iFreeEntry < pAioMgr->cReqEntries);
538 Assert(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] == NIL_RTFILEAIOREQ);
539
540 pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] = hReq;
541 pAioMgr->iFreeEntry++;
542}
543
544/**
545 * Wrapper around RTFIleAioCtxSubmit() which is also doing error handling.
546 */
547static int pdmacFileAioMgrNormalReqsEnqueue(PPDMACEPFILEMGR pAioMgr,
548 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
549 PRTFILEAIOREQ pahReqs, unsigned cReqs)
550{
551 pAioMgr->cRequestsActive += cReqs;
552 pEndpoint->AioMgr.cRequestsActive += cReqs;
553
554 LogFlow(("Enqueuing %d requests. I/O manager has a total of %d active requests now\n", cReqs, pAioMgr->cRequestsActive));
555 LogFlow(("Endpoint has a total of %d active requests now\n", pEndpoint->AioMgr.cRequestsActive));
556
557 int rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, pahReqs, cReqs);
558 if (RT_FAILURE(rc))
559 {
560 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
561 {
562 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
563
564 /* Append any not submitted task to the waiting list. */
565 for (size_t i = 0; i < cReqs; i++)
566 {
567 int rcReq = RTFileAioReqGetRC(pahReqs[i], NULL);
568
569 if (rcReq != VERR_FILE_AIO_IN_PROGRESS)
570 {
571 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(pahReqs[i]);
572
573 Assert(pTask->hReq == pahReqs[i]);
574 pdmacFileAioMgrEpAddTask(pEndpoint, pTask);
575 pAioMgr->cRequestsActive--;
576 pEndpoint->AioMgr.cRequestsActive--;
577
578 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
579 {
580 /* Clear the pending flush */
581 Assert(pEndpoint->pFlushReq == pTask);
582 pEndpoint->pFlushReq = NULL;
583 }
584 }
585 }
586
587 pAioMgr->cRequestsActiveMax = pAioMgr->cRequestsActive;
588
589 /* Print an entry in the release log */
590 if (RT_UNLIKELY(!pEpClass->fOutOfResourcesWarningPrinted))
591 {
592 pEpClass->fOutOfResourcesWarningPrinted = true;
593 LogRel(("AIOMgr: Host limits number of active IO requests to %u. Expect a performance impact.\n",
594 pAioMgr->cRequestsActive));
595 }
596
597 LogFlow(("Removed requests. I/O manager has a total of %u active requests now\n", pAioMgr->cRequestsActive));
598 LogFlow(("Endpoint has a total of %u active requests now\n", pEndpoint->AioMgr.cRequestsActive));
599 rc = VINF_SUCCESS;
600 }
601 else /* Another kind of error happened (full disk, ...) */
602 {
603 /* An error happened. Find out which one caused the error and resubmit all other tasks. */
604 for (size_t i = 0; i < cReqs; i++)
605 {
606 int rcReq = RTFileAioReqGetRC(pahReqs[i], NULL);
607
608 if (rcReq == VERR_FILE_AIO_NOT_SUBMITTED)
609 {
610 /* We call ourself again to do any error handling which might come up now. */
611 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, &pahReqs[i], 1);
612 AssertRC(rc);
613 }
614 else if (rcReq != VERR_FILE_AIO_IN_PROGRESS)
615 pdmacFileAioMgrNormalReqCompleteRc(pAioMgr, pahReqs[i], rcReq, 0);
616 }
617
618
619 if ( pEndpoint->pFlushReq
620 && !pAioMgr->cRequestsActive
621 && !pEndpoint->fAsyncFlushSupported)
622 {
623 /*
624 * Complete a pending flush if we don't have requests enqueued and the host doesn't support
625 * the async flush API.
626 * Happens only if this we just noticed that this is not supported
627 * and the only active request was a flush.
628 */
629 PPDMACTASKFILE pFlush = pEndpoint->pFlushReq;
630 pEndpoint->pFlushReq = NULL;
631 pFlush->pfnCompleted(pFlush, pFlush->pvUser, VINF_SUCCESS);
632 pdmacFileTaskFree(pEndpoint, pFlush);
633 }
634 }
635 }
636
637 return VINF_SUCCESS;
638}
639
640static bool pdmacFileAioMgrNormalIsRangeLocked(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
641 RTFOFF offStart, size_t cbRange,
642 PPDMACTASKFILE pTask, bool fAlignedReq)
643{
644 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
645 || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ,
646 ("Invalid task type %d\n", pTask->enmTransferType));
647
648 /*
649 * If there is no unaligned request active and the current one is aligned
650 * just pass it through.
651 */
652 if (!pEndpoint->AioMgr.cLockedReqsActive && fAlignedReq)
653 return false;
654
655 PPDMACFILERANGELOCK pRangeLock;
656 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetRangeGet(pEndpoint->AioMgr.pTreeRangesLocked, offStart);
657 if (!pRangeLock)
658 {
659 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetGetBestFit(pEndpoint->AioMgr.pTreeRangesLocked, offStart, true);
660 /* Check if we intersect with the range. */
661 if ( !pRangeLock
662 || !( (pRangeLock->Core.Key) <= (offStart + (RTFOFF)cbRange - 1)
663 && (pRangeLock->Core.KeyLast) >= offStart))
664 {
665 pRangeLock = NULL; /* False alarm */
666 }
667 }
668
669 /* Check whether we have one of the situations explained below */
670 if (pRangeLock)
671 {
672 /* Add to the list. */
673 pTask->pNext = NULL;
674
675 if (!pRangeLock->pWaitingTasksHead)
676 {
677 Assert(!pRangeLock->pWaitingTasksTail);
678 pRangeLock->pWaitingTasksHead = pTask;
679 pRangeLock->pWaitingTasksTail = pTask;
680 }
681 else
682 {
683 AssertPtr(pRangeLock->pWaitingTasksTail);
684 pRangeLock->pWaitingTasksTail->pNext = pTask;
685 pRangeLock->pWaitingTasksTail = pTask;
686 }
687 return true;
688 }
689
690 return false;
691}
692
693static int pdmacFileAioMgrNormalRangeLock(PPDMACEPFILEMGR pAioMgr,
694 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
695 RTFOFF offStart, size_t cbRange,
696 PPDMACTASKFILE pTask, bool fAlignedReq)
697{
698 LogFlowFunc(("pAioMgr=%#p pEndpoint=%#p offStart=%RTfoff cbRange=%zu pTask=%#p\n",
699 pAioMgr, pEndpoint, offStart, cbRange, pTask));
700
701 AssertMsg(!pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbRange, pTask, fAlignedReq),
702 ("Range is already locked offStart=%RTfoff cbRange=%u\n",
703 offStart, cbRange));
704
705 /*
706 * If there is no unaligned request active and the current one is aligned
707 * just don't use the lock.
708 */
709 if (!pEndpoint->AioMgr.cLockedReqsActive && fAlignedReq)
710 {
711 pTask->pRangeLock = NULL;
712 return VINF_SUCCESS;
713 }
714
715 PPDMACFILERANGELOCK pRangeLock = (PPDMACFILERANGELOCK)RTMemCacheAlloc(pAioMgr->hMemCacheRangeLocks);
716 if (!pRangeLock)
717 return VERR_NO_MEMORY;
718
719 /* Init the lock. */
720 pRangeLock->Core.Key = offStart;
721 pRangeLock->Core.KeyLast = offStart + cbRange - 1;
722 pRangeLock->cRefs = 1;
723 pRangeLock->fReadLock = pTask->enmTransferType == PDMACTASKFILETRANSFER_READ;
724 pRangeLock->pWaitingTasksHead = NULL;
725 pRangeLock->pWaitingTasksTail = NULL;
726
727 bool fInserted = RTAvlrFileOffsetInsert(pEndpoint->AioMgr.pTreeRangesLocked, &pRangeLock->Core);
728 AssertMsg(fInserted, ("Range lock was not inserted!\n")); NOREF(fInserted);
729
730 /* Let the task point to its lock. */
731 pTask->pRangeLock = pRangeLock;
732 pEndpoint->AioMgr.cLockedReqsActive++;
733
734 return VINF_SUCCESS;
735}
736
737static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
738 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
739 PPDMACFILERANGELOCK pRangeLock)
740{
741 PPDMACTASKFILE pTasksWaitingHead;
742
743 LogFlowFunc(("pAioMgr=%#p pEndpoint=%#p pRangeLock=%#p\n",
744 pAioMgr, pEndpoint, pRangeLock));
745
746 /* pRangeLock can be NULL if there was no lock assigned with the task. */
747 if (!pRangeLock)
748 return NULL;
749
750 Assert(pRangeLock->cRefs == 1);
751
752 RTAvlrFileOffsetRemove(pEndpoint->AioMgr.pTreeRangesLocked, pRangeLock->Core.Key);
753 pTasksWaitingHead = pRangeLock->pWaitingTasksHead;
754 pRangeLock->pWaitingTasksHead = NULL;
755 pRangeLock->pWaitingTasksTail = NULL;
756 RTMemCacheFree(pAioMgr->hMemCacheRangeLocks, pRangeLock);
757 pEndpoint->AioMgr.cLockedReqsActive--;
758
759 return pTasksWaitingHead;
760}
761
762static int pdmacFileAioMgrNormalTaskPrepareBuffered(PPDMACEPFILEMGR pAioMgr,
763 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
764 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
765{
766 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
767 || (uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) <= pEndpoint->cbFile,
768 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
769 pTask->Off, pTask->DataSeg.cbSeg, pEndpoint->cbFile));
770
771 pTask->fPrefetch = false;
772 pTask->cbBounceBuffer = 0;
773
774 /*
775 * Before we start to setup the request we have to check whether there is a task
776 * already active which range intersects with ours. We have to defer execution
777 * of this task in two cases:
778 * - The pending task is a write and the current is either read or write
779 * - The pending task is a read and the current task is a write task.
780 *
781 * To check whether a range is currently "locked" we use the AVL tree where every pending task
782 * is stored by its file offset range. The current task will be added to the active task
783 * and will be executed when the active one completes. (The method below
784 * which checks whether a range is already used will add the task)
785 *
786 * This is necessary because of the requirement to align all requests to a 512 boundary
787 * which is enforced by the host OS (Linux and Windows atm). It is possible that
788 * we have to process unaligned tasks and need to align them using bounce buffers.
789 * While the data is fetched from the file another request might arrive writing to
790 * the same range. This will result in data corruption if both are executed concurrently.
791 */
792 int rc = VINF_SUCCESS;
793 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, pTask->Off, pTask->DataSeg.cbSeg, pTask,
794 true /* fAlignedReq */);
795 if (!fLocked)
796 {
797 /* Get a request handle. */
798 RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
799 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
800
801 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
802 {
803 /* Grow the file if needed. */
804 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
805 {
806 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
807 RTFileSetSize(pEndpoint->hFile, pTask->Off + pTask->DataSeg.cbSeg);
808 }
809
810 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->hFile,
811 pTask->Off, pTask->DataSeg.pvSeg,
812 pTask->DataSeg.cbSeg, pTask);
813 }
814 else
815 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->hFile,
816 pTask->Off, pTask->DataSeg.pvSeg,
817 pTask->DataSeg.cbSeg, pTask);
818 AssertRC(rc);
819
820 rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, pTask->Off,
821 pTask->DataSeg.cbSeg,
822 pTask, true /* fAlignedReq */);
823
824 if (RT_SUCCESS(rc))
825 {
826 pTask->hReq = hReq;
827 *phReq = hReq;
828 }
829 }
830 else
831 LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
832
833 return rc;
834}
835
836static int pdmacFileAioMgrNormalTaskPrepareNonBuffered(PPDMACEPFILEMGR pAioMgr,
837 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
838 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
839{
840 /*
841 * Check if the alignment requirements are met.
842 * Offset, transfer size and buffer address
843 * need to be on a 512 boundary.
844 */
845 RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
846 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg + (pTask->Off - offStart), 512);
847 PDMACTASKFILETRANSFER enmTransferType = pTask->enmTransferType;
848 bool fAlignedReq = cbToTransfer == pTask->DataSeg.cbSeg
849 && offStart == pTask->Off;
850
851 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
852 || (uint64_t)(offStart + cbToTransfer) <= pEndpoint->cbFile,
853 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
854 offStart, cbToTransfer, pEndpoint->cbFile));
855
856 pTask->fPrefetch = false;
857
858 /*
859 * Before we start to setup the request we have to check whether there is a task
860 * already active which range intersects with ours. We have to defer execution
861 * of this task in two cases:
862 * - The pending task is a write and the current is either read or write
863 * - The pending task is a read and the current task is a write task.
864 *
865 * To check whether a range is currently "locked" we use the AVL tree where every pending task
866 * is stored by its file offset range. The current task will be added to the active task
867 * and will be executed when the active one completes. (The method below
868 * which checks whether a range is already used will add the task)
869 *
870 * This is necessary because of the requirement to align all requests to a 512 boundary
871 * which is enforced by the host OS (Linux and Windows atm). It is possible that
872 * we have to process unaligned tasks and need to align them using bounce buffers.
873 * While the data is fetched from the file another request might arrive writing to
874 * the same range. This will result in data corruption if both are executed concurrently.
875 */
876 int rc = VINF_SUCCESS;
877 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbToTransfer, pTask, fAlignedReq);
878 if (!fLocked)
879 {
880 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
881 void *pvBuf = pTask->DataSeg.pvSeg;
882
883 /* Get a request handle. */
884 RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
885 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
886
887 if ( !fAlignedReq
888 || ((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) != (RTR3UINTPTR)pvBuf))
889 {
890 LogFlow(("Using bounce buffer for task %#p cbToTransfer=%zd cbSeg=%zd offStart=%RTfoff off=%RTfoff\n",
891 pTask, cbToTransfer, pTask->DataSeg.cbSeg, offStart, pTask->Off));
892
893 /* Create bounce buffer. */
894 pTask->cbBounceBuffer = cbToTransfer;
895
896 AssertMsg(pTask->Off >= offStart, ("Overflow in calculation Off=%llu offStart=%llu\n",
897 pTask->Off, offStart));
898 pTask->offBounceBuffer = pTask->Off - offStart;
899
900 /** @todo I think we need something like a RTMemAllocAligned method here.
901 * Current assumption is that the maximum alignment is 4096byte
902 * (GPT disk on Windows)
903 * so we can use RTMemPageAlloc here.
904 */
905 pTask->pvBounceBuffer = RTMemPageAlloc(cbToTransfer);
906 if (RT_LIKELY(pTask->pvBounceBuffer))
907 {
908 pvBuf = pTask->pvBounceBuffer;
909
910 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
911 {
912 if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
913 || RT_UNLIKELY(offStart != pTask->Off))
914 {
915 /* We have to fill the buffer first before we can update the data. */
916 LogFlow(("Prefetching data for task %#p\n", pTask));
917 pTask->fPrefetch = true;
918 enmTransferType = PDMACTASKFILETRANSFER_READ;
919 }
920 else
921 memcpy(pvBuf, pTask->DataSeg.pvSeg, pTask->DataSeg.cbSeg);
922 }
923 }
924 else
925 rc = VERR_NO_MEMORY;
926 }
927 else
928 pTask->cbBounceBuffer = 0;
929
930 if (RT_SUCCESS(rc))
931 {
932 AssertMsg((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) == (RTR3UINTPTR)pvBuf,
933 ("AIO: Alignment restrictions not met! pvBuf=%p uBitmaskAlignment=%p\n", pvBuf, pEpClassFile->uBitmaskAlignment));
934
935 if (enmTransferType == PDMACTASKFILETRANSFER_WRITE)
936 {
937 /* Grow the file if needed. */
938 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
939 {
940 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
941 RTFileSetSize(pEndpoint->hFile, pTask->Off + pTask->DataSeg.cbSeg);
942 }
943
944 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->hFile,
945 offStart, pvBuf, cbToTransfer, pTask);
946 }
947 else
948 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->hFile,
949 offStart, pvBuf, cbToTransfer, pTask);
950 AssertRC(rc);
951
952 rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, offStart, cbToTransfer, pTask, fAlignedReq);
953 if (RT_SUCCESS(rc))
954 {
955 pTask->hReq = hReq;
956 *phReq = hReq;
957 }
958 else
959 {
960 /* Cleanup */
961 if (pTask->cbBounceBuffer)
962 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
963 }
964 }
965 }
966 else
967 LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
968
969 return rc;
970}
971
972static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
973 PPDMACEPFILEMGR pAioMgr,
974 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
975{
976 RTFILEAIOREQ apReqs[20];
977 unsigned cRequests = 0;
978 int rc = VINF_SUCCESS;
979
980 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
981 ("Trying to process request lists of a non active endpoint!\n"));
982
983 /* Go through the list and queue the requests until we get a flush request */
984 while ( pTaskHead
985 && !pEndpoint->pFlushReq
986 && (pAioMgr->cRequestsActive + cRequests < pAioMgr->cRequestsActiveMax)
987 && RT_SUCCESS(rc))
988 {
989 RTMSINTERVAL msWhenNext;
990 PPDMACTASKFILE pCurr = pTaskHead;
991
992 if (!pdmacEpIsTransferAllowed(&pEndpoint->Core, (uint32_t)pCurr->DataSeg.cbSeg, &msWhenNext))
993 {
994 pAioMgr->msBwLimitExpired = RT_MIN(pAioMgr->msBwLimitExpired, msWhenNext);
995 break;
996 }
997
998 pTaskHead = pTaskHead->pNext;
999
1000 pCurr->pNext = NULL;
1001
1002 AssertMsg(VALID_PTR(pCurr->pEndpoint) && (pCurr->pEndpoint == pEndpoint),
1003 ("Endpoints do not match\n"));
1004
1005 switch (pCurr->enmTransferType)
1006 {
1007 case PDMACTASKFILETRANSFER_FLUSH:
1008 {
1009 /* If there is no data transfer request this flush request finished immediately. */
1010 if (pEndpoint->fAsyncFlushSupported)
1011 {
1012 /* Issue a flush to the host. */
1013 RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
1014 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
1015
1016 LogFlow(("Flush request %#p\n", hReq));
1017
1018 rc = RTFileAioReqPrepareFlush(hReq, pEndpoint->hFile, pCurr);
1019 if (RT_FAILURE(rc))
1020 {
1021 if (rc == VERR_NOT_SUPPORTED)
1022 LogRel(("AIOMgr: Async flushes not supported\n"));
1023 else
1024 LogRel(("AIOMgr: Preparing flush failed with %Rrc, disabling async flushes\n", rc));
1025 pEndpoint->fAsyncFlushSupported = false;
1026 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1027 rc = VINF_SUCCESS; /* Fake success */
1028 }
1029 else
1030 {
1031 pCurr->hReq = hReq;
1032 apReqs[cRequests] = hReq;
1033 pEndpoint->AioMgr.cReqsProcessed++;
1034 cRequests++;
1035 }
1036 }
1037
1038 if ( !pEndpoint->AioMgr.cRequestsActive
1039 && !pEndpoint->fAsyncFlushSupported)
1040 {
1041 pCurr->pfnCompleted(pCurr, pCurr->pvUser, VINF_SUCCESS);
1042 pdmacFileTaskFree(pEndpoint, pCurr);
1043 }
1044 else
1045 {
1046 Assert(!pEndpoint->pFlushReq);
1047 pEndpoint->pFlushReq = pCurr;
1048 }
1049 break;
1050 }
1051 case PDMACTASKFILETRANSFER_READ:
1052 case PDMACTASKFILETRANSFER_WRITE:
1053 {
1054 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
1055
1056 if (pCurr->hReq == NIL_RTFILEAIOREQ)
1057 {
1058 if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_BUFFERED)
1059 rc = pdmacFileAioMgrNormalTaskPrepareBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
1060 else if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_NON_BUFFERED)
1061 rc = pdmacFileAioMgrNormalTaskPrepareNonBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
1062 else
1063 AssertMsgFailed(("Invalid backend type %d\n", pEndpoint->enmBackendType));
1064
1065 AssertRC(rc);
1066 }
1067 else
1068 {
1069 LogFlow(("Task %#p has I/O request %#p already\n", pCurr, pCurr->hReq));
1070 hReq = pCurr->hReq;
1071 }
1072
1073 LogFlow(("Read/Write request %#p\n", hReq));
1074
1075 if (hReq != NIL_RTFILEAIOREQ)
1076 {
1077 apReqs[cRequests] = hReq;
1078 cRequests++;
1079 }
1080 break;
1081 }
1082 default:
1083 AssertMsgFailed(("Invalid transfer type %d\n", pCurr->enmTransferType));
1084 } /* switch transfer type */
1085
1086 /* Queue the requests if the array is full. */
1087 if (cRequests == RT_ELEMENTS(apReqs))
1088 {
1089 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
1090 cRequests = 0;
1091 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1092 ("Unexpected return code\n"));
1093 }
1094 }
1095
1096 if (cRequests)
1097 {
1098 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
1099 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1100 ("Unexpected return code rc=%Rrc\n", rc));
1101 }
1102
1103 if (pTaskHead)
1104 {
1105 /* Add the rest of the tasks to the pending list */
1106 pdmacFileAioMgrEpAddTaskList(pEndpoint, pTaskHead);
1107
1108 if (RT_UNLIKELY( pAioMgr->cRequestsActiveMax == pAioMgr->cRequestsActive
1109 && !pEndpoint->pFlushReq))
1110 {
1111#if 0
1112 /*
1113 * The I/O manager has no room left for more requests
1114 * but there are still requests to process.
1115 * Create a new I/O manager and let it handle some endpoints.
1116 */
1117 pdmacFileAioMgrNormalBalanceLoad(pAioMgr);
1118#else
1119 /* Grow the I/O manager */
1120 pAioMgr->enmState = PDMACEPFILEMGRSTATE_GROWING;
1121#endif
1122 }
1123 }
1124
1125 /* Insufficient resources are not fatal. */
1126 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
1127 rc = VINF_SUCCESS;
1128
1129 return rc;
1130}
1131
1132/**
1133 * Adds all pending requests for the given endpoint
1134 * until a flush request is encountered or there is no
1135 * request anymore.
1136 *
1137 * @returns VBox status code.
1138 * @param pAioMgr The async I/O manager for the endpoint
1139 * @param pEndpoint The endpoint to get the requests from.
1140 */
1141static int pdmacFileAioMgrNormalQueueReqs(PPDMACEPFILEMGR pAioMgr,
1142 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
1143{
1144 int rc = VINF_SUCCESS;
1145 PPDMACTASKFILE pTasksHead = NULL;
1146
1147 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
1148 ("Trying to process request lists of a non active endpoint!\n"));
1149
1150 Assert(!pEndpoint->pFlushReq);
1151
1152 /* Check the pending list first */
1153 if (pEndpoint->AioMgr.pReqsPendingHead)
1154 {
1155 LogFlow(("Queuing pending requests first\n"));
1156
1157 pTasksHead = pEndpoint->AioMgr.pReqsPendingHead;
1158 /*
1159 * Clear the list as the processing routine will insert them into the list
1160 * again if it gets a flush request.
1161 */
1162 pEndpoint->AioMgr.pReqsPendingHead = NULL;
1163 pEndpoint->AioMgr.pReqsPendingTail = NULL;
1164 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
1165 AssertRC(rc); /** @todo r=bird: status code potentially overwritten. */
1166 }
1167
1168 if (!pEndpoint->pFlushReq && !pEndpoint->AioMgr.pReqsPendingHead)
1169 {
1170 /* Now the request queue. */
1171 pTasksHead = pdmacFileEpGetNewTasks(pEndpoint);
1172 if (pTasksHead)
1173 {
1174 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
1175 AssertRC(rc);
1176 }
1177 }
1178
1179 return rc;
1180}
1181
1182static int pdmacFileAioMgrNormalProcessBlockingEvent(PPDMACEPFILEMGR pAioMgr)
1183{
1184 int rc = VINF_SUCCESS;
1185 bool fNotifyWaiter = false;
1186
1187 LogFlowFunc((": Enter\n"));
1188
1189 Assert(pAioMgr->fBlockingEventPending);
1190
1191 switch (pAioMgr->enmBlockingEvent)
1192 {
1193 case PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT:
1194 {
1195 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointNew = ASMAtomicReadPtrT(&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint, PPDMASYNCCOMPLETIONENDPOINTFILE);
1196 AssertMsg(VALID_PTR(pEndpointNew), ("Adding endpoint event without a endpoint to add\n"));
1197
1198 pEndpointNew->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE;
1199
1200 pEndpointNew->AioMgr.pEndpointNext = pAioMgr->pEndpointsHead;
1201 pEndpointNew->AioMgr.pEndpointPrev = NULL;
1202 if (pAioMgr->pEndpointsHead)
1203 pAioMgr->pEndpointsHead->AioMgr.pEndpointPrev = pEndpointNew;
1204 pAioMgr->pEndpointsHead = pEndpointNew;
1205
1206 /* Assign the completion point to this file. */
1207 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pEndpointNew->hFile);
1208 fNotifyWaiter = true;
1209 pAioMgr->cEndpoints++;
1210 break;
1211 }
1212 case PDMACEPFILEAIOMGRBLOCKINGEVENT_REMOVE_ENDPOINT:
1213 {
1214 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove = ASMAtomicReadPtrT(&pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint, PPDMASYNCCOMPLETIONENDPOINTFILE);
1215 AssertMsg(VALID_PTR(pEndpointRemove), ("Removing endpoint event without a endpoint to remove\n"));
1216
1217 pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
1218 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointRemove);
1219 break;
1220 }
1221 case PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT:
1222 {
1223 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointClose = ASMAtomicReadPtrT(&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint, PPDMASYNCCOMPLETIONENDPOINTFILE);
1224 AssertMsg(VALID_PTR(pEndpointClose), ("Close endpoint event without a endpoint to close\n"));
1225
1226 if (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1227 {
1228 LogFlowFunc((": Closing endpoint %#p{%s}\n", pEndpointClose, pEndpointClose->Core.pszUri));
1229
1230 /* Make sure all tasks finished. Process the queues a last time first. */
1231 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpointClose);
1232 AssertRC(rc);
1233
1234 pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING;
1235 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointClose);
1236 }
1237 else if ( (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING)
1238 && (!pEndpointClose->AioMgr.cRequestsActive))
1239 fNotifyWaiter = true;
1240 break;
1241 }
1242 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN:
1243 {
1244 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SHUTDOWN;
1245 if (!pAioMgr->cRequestsActive)
1246 fNotifyWaiter = true;
1247 break;
1248 }
1249 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND:
1250 {
1251 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SUSPENDING;
1252 break;
1253 }
1254 case PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME:
1255 {
1256 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
1257 fNotifyWaiter = true;
1258 break;
1259 }
1260 default:
1261 AssertReleaseMsgFailed(("Invalid event type %d\n", pAioMgr->enmBlockingEvent));
1262 }
1263
1264 if (fNotifyWaiter)
1265 {
1266 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
1267 pAioMgr->enmBlockingEvent = PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID;
1268
1269 /* Release the waiting thread. */
1270 LogFlow(("Signalling waiter\n"));
1271 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
1272 AssertRC(rc);
1273 }
1274
1275 LogFlowFunc((": Leave\n"));
1276 return rc;
1277}
1278
1279/**
1280 * Checks all endpoints for pending events or new requests.
1281 *
1282 * @returns VBox status code.
1283 * @param pAioMgr The I/O manager handle.
1284 */
1285static int pdmacFileAioMgrNormalCheckEndpoints(PPDMACEPFILEMGR pAioMgr)
1286{
1287 /* Check the assigned endpoints for new tasks if there isn't a flush request active at the moment. */
1288 int rc = VINF_SUCCESS;
1289 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pAioMgr->pEndpointsHead;
1290
1291 pAioMgr->msBwLimitExpired = RT_INDEFINITE_WAIT;
1292
1293 while (pEndpoint)
1294 {
1295 if (!pEndpoint->pFlushReq
1296 && (pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1297 && !pEndpoint->AioMgr.fMoving)
1298 {
1299 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint);
1300 if (RT_FAILURE(rc))
1301 return rc;
1302 }
1303 else if ( !pEndpoint->AioMgr.cRequestsActive
1304 && pEndpoint->enmState != PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1305 {
1306 /* Reopen the file so that the new endpoint can re-associate with the file */
1307 RTFileClose(pEndpoint->hFile);
1308 rc = RTFileOpen(&pEndpoint->hFile, pEndpoint->Core.pszUri, pEndpoint->fFlags);
1309 AssertRC(rc);
1310
1311 if (pEndpoint->AioMgr.fMoving)
1312 {
1313 pEndpoint->AioMgr.fMoving = false;
1314 pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1315 }
1316 else
1317 {
1318 Assert(pAioMgr->fBlockingEventPending);
1319 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
1320
1321 /* Release the waiting thread. */
1322 LogFlow(("Signalling waiter\n"));
1323 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
1324 AssertRC(rc);
1325 }
1326 }
1327
1328 pEndpoint = pEndpoint->AioMgr.pEndpointNext;
1329 }
1330
1331 return rc;
1332}
1333
1334/**
1335 * Wrapper around pdmacFileAioMgrNormalReqCompleteRc().
1336 */
1337static void pdmacFileAioMgrNormalReqComplete(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
1338{
1339 size_t cbTransfered = 0;
1340 int rcReq = RTFileAioReqGetRC(hReq, &cbTransfered);
1341
1342 pdmacFileAioMgrNormalReqCompleteRc(pAioMgr, hReq, rcReq, cbTransfered);
1343}
1344
1345static void pdmacFileAioMgrNormalReqCompleteRc(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq,
1346 int rcReq, size_t cbTransfered)
1347{
1348 int rc = VINF_SUCCESS;
1349 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint;
1350 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(hReq);
1351 PPDMACTASKFILE pTasksWaiting;
1352
1353 LogFlowFunc(("pAioMgr=%#p hReq=%#p\n", pAioMgr, hReq));
1354
1355 pEndpoint = pTask->pEndpoint;
1356
1357 pTask->hReq = NIL_RTFILEAIOREQ;
1358
1359 pAioMgr->cRequestsActive--;
1360 pEndpoint->AioMgr.cRequestsActive--;
1361 pEndpoint->AioMgr.cReqsProcessed++;
1362
1363 /*
1364 * It is possible that the request failed on Linux with kernels < 2.6.23
1365 * if the passed buffer was allocated with remap_pfn_range or if the file
1366 * is on an NFS endpoint which does not support async and direct I/O at the same time.
1367 * The endpoint will be migrated to a failsafe manager in case a request fails.
1368 */
1369 if (RT_FAILURE(rcReq))
1370 {
1371 /* Free bounce buffers and the IPRT request. */
1372 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1373
1374 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
1375 {
1376 LogRel(("AIOMgr: Flush failed with %Rrc, disabling async flushes\n", rcReq));
1377 pEndpoint->fAsyncFlushSupported = false;
1378 AssertMsg(pEndpoint->pFlushReq == pTask, ("Failed flush request doesn't match active one\n"));
1379 /* The other method will take over now. */
1380
1381 pEndpoint->pFlushReq = NULL;
1382 /* Call completion callback */
1383 LogFlow(("Flush task=%#p completed with %Rrc\n", pTask, VINF_SUCCESS));
1384 pTask->pfnCompleted(pTask, pTask->pvUser, VINF_SUCCESS);
1385 pdmacFileTaskFree(pEndpoint, pTask);
1386 }
1387 else
1388 {
1389 /* Free the lock and process pending tasks if necessary */
1390 pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
1391 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksWaiting, pAioMgr, pEndpoint);
1392 AssertRC(rc);
1393
1394 if (pTask->cbBounceBuffer)
1395 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
1396
1397 /*
1398 * Fatal errors are reported to the guest and non-fatal errors
1399 * will cause a migration to the failsafe manager in the hope
1400 * that the error disappears.
1401 */
1402 if (!pdmacFileAioMgrNormalRcIsFatal(rcReq))
1403 {
1404 /* Queue the request on the pending list. */
1405 pTask->pNext = pEndpoint->AioMgr.pReqsPendingHead;
1406 pEndpoint->AioMgr.pReqsPendingHead = pTask;
1407
1408 /* Create a new failsafe manager if necessary. */
1409 if (!pEndpoint->AioMgr.fMoving)
1410 {
1411 PPDMACEPFILEMGR pAioMgrFailsafe;
1412
1413 LogRel(("%s: Request %#p failed with rc=%Rrc, migrating endpoint %s to failsafe manager.\n",
1414 RTThreadGetName(pAioMgr->Thread), pTask, rcReq, pEndpoint->Core.pszUri));
1415
1416 pEndpoint->AioMgr.fMoving = true;
1417
1418 rc = pdmacFileAioMgrCreate((PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass,
1419 &pAioMgrFailsafe, PDMACEPFILEMGRTYPE_SIMPLE);
1420 AssertRC(rc);
1421
1422 pEndpoint->AioMgr.pAioMgrDst = pAioMgrFailsafe;
1423
1424 /* Update the flags to open the file with. Disable async I/O and enable the host cache. */
1425 pEndpoint->fFlags &= ~(RTFILE_O_ASYNC_IO | RTFILE_O_NO_CACHE);
1426 }
1427
1428 /* If this was the last request for the endpoint migrate it to the new manager. */
1429 if (!pEndpoint->AioMgr.cRequestsActive)
1430 {
1431 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
1432 Assert(!fReqsPending); NOREF(fReqsPending);
1433
1434 rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1435 AssertRC(rc);
1436 }
1437 }
1438 else
1439 {
1440 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1441 pdmacFileTaskFree(pEndpoint, pTask);
1442 }
1443 }
1444 }
1445 else
1446 {
1447 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
1448 {
1449 /* Clear pending flush */
1450 AssertMsg(pEndpoint->pFlushReq == pTask, ("Completed flush request doesn't match active one\n"));
1451 pEndpoint->pFlushReq = NULL;
1452 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1453
1454 /* Call completion callback */
1455 LogFlow(("Flush task=%#p completed with %Rrc\n", pTask, rcReq));
1456 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1457 pdmacFileTaskFree(pEndpoint, pTask);
1458 }
1459 else
1460 {
1461 /*
1462 * Restart an incomplete transfer.
1463 * This usually means that the request will return an error now
1464 * but to get the cause of the error (disk full, file too big, I/O error, ...)
1465 * the transfer needs to be continued.
1466 */
1467 pTask->cbTransfered += cbTransfered;
1468
1469 if (RT_UNLIKELY( pTask->cbTransfered < pTask->DataSeg.cbSeg
1470 || ( pTask->cbBounceBuffer
1471 && pTask->cbTransfered < pTask->cbBounceBuffer)))
1472 {
1473 RTFOFF offStart;
1474 size_t cbToTransfer;
1475 uint8_t *pbBuf = NULL;
1476
1477 LogFlow(("Restarting incomplete transfer %#p (%zu bytes transferred)\n",
1478 pTask, cbTransfered));
1479 Assert(cbTransfered % 512 == 0);
1480
1481 if (pTask->cbBounceBuffer)
1482 {
1483 AssertPtr(pTask->pvBounceBuffer);
1484 offStart = (pTask->Off & ~((RTFOFF)512-1)) + pTask->cbTransfered;
1485 cbToTransfer = pTask->cbBounceBuffer - pTask->cbTransfered;
1486 pbBuf = (uint8_t *)pTask->pvBounceBuffer + pTask->cbTransfered;
1487 }
1488 else
1489 {
1490 Assert(!pTask->pvBounceBuffer);
1491 offStart = pTask->Off + pTask->cbTransfered;
1492 cbToTransfer = pTask->DataSeg.cbSeg - pTask->cbTransfered;
1493 pbBuf = (uint8_t *)pTask->DataSeg.pvSeg + pTask->cbTransfered;
1494 }
1495
1496 if (pTask->fPrefetch || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
1497 {
1498 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->hFile, offStart,
1499 pbBuf, cbToTransfer, pTask);
1500 }
1501 else
1502 {
1503 AssertMsg(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE,
1504 ("Invalid transfer type\n"));
1505 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->hFile, offStart,
1506 pbBuf, cbToTransfer, pTask);
1507 }
1508 AssertRC(rc);
1509
1510 pTask->hReq = hReq;
1511 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, &hReq, 1);
1512 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1513 ("Unexpected return code rc=%Rrc\n", rc));
1514 }
1515 else if (pTask->fPrefetch)
1516 {
1517 Assert(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE);
1518 Assert(pTask->cbBounceBuffer);
1519
1520 memcpy(((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer,
1521 pTask->DataSeg.pvSeg,
1522 pTask->DataSeg.cbSeg);
1523
1524 /* Write it now. */
1525 pTask->fPrefetch = false;
1526 RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
1527 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg + (pTask->Off - offStart), 512);
1528
1529 pTask->cbTransfered = 0;
1530
1531 /* Grow the file if needed. */
1532 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
1533 {
1534 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
1535 RTFileSetSize(pEndpoint->hFile, pTask->Off + pTask->DataSeg.cbSeg);
1536 }
1537
1538 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->hFile,
1539 offStart, pTask->pvBounceBuffer, cbToTransfer, pTask);
1540 AssertRC(rc);
1541 pTask->hReq = hReq;
1542 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, &hReq, 1);
1543 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1544 ("Unexpected return code rc=%Rrc\n", rc));
1545 }
1546 else
1547 {
1548 if (RT_SUCCESS(rc) && pTask->cbBounceBuffer)
1549 {
1550 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
1551 memcpy(pTask->DataSeg.pvSeg,
1552 ((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer,
1553 pTask->DataSeg.cbSeg);
1554
1555 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
1556 }
1557
1558 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1559
1560 /* Free the lock and process pending tasks if necessary */
1561 pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
1562 if (pTasksWaiting)
1563 {
1564 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksWaiting, pAioMgr, pEndpoint);
1565 AssertRC(rc);
1566 }
1567
1568 /* Call completion callback */
1569 LogFlow(("Task=%#p completed with %Rrc\n", pTask, rcReq));
1570 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1571 pdmacFileTaskFree(pEndpoint, pTask);
1572
1573 /*
1574 * If there is no request left on the endpoint but a flush request is set
1575 * it completed now and we notify the owner.
1576 * Furthermore we look for new requests and continue.
1577 */
1578 if (!pEndpoint->AioMgr.cRequestsActive && pEndpoint->pFlushReq)
1579 {
1580 /* Call completion callback */
1581 pTask = pEndpoint->pFlushReq;
1582 pEndpoint->pFlushReq = NULL;
1583
1584 AssertMsg(pTask->pEndpoint == pEndpoint, ("Endpoint of the flush request does not match assigned one\n"));
1585
1586 pTask->pfnCompleted(pTask, pTask->pvUser, VINF_SUCCESS);
1587 pdmacFileTaskFree(pEndpoint, pTask);
1588 }
1589 else if (RT_UNLIKELY(!pEndpoint->AioMgr.cRequestsActive && pEndpoint->AioMgr.fMoving))
1590 {
1591 /* If the endpoint is about to be migrated do it now. */
1592 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
1593 Assert(!fReqsPending); NOREF(fReqsPending);
1594
1595 rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1596 AssertRC(rc);
1597 }
1598 }
1599 } /* Not a flush request */
1600 } /* request completed successfully */
1601}
1602
1603/** Helper macro for checking for error codes. */
1604#define CHECK_RC(pAioMgr, rc) \
1605 if (RT_FAILURE(rc)) \
1606 {\
1607 int rc2 = pdmacFileAioMgrNormalErrorHandler(pAioMgr, rc, RT_SRC_POS);\
1608 return rc2;\
1609 }
1610
1611/**
1612 * The normal I/O manager using the RTFileAio* API
1613 *
1614 * @returns VBox status code.
1615 * @param hThreadSelf Handle of the thread.
1616 * @param pvUser Opaque user data.
1617 */
1618DECLCALLBACK(int) pdmacFileAioMgrNormal(RTTHREAD hThreadSelf, void *pvUser)
1619{
1620 int rc = VINF_SUCCESS;
1621 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser;
1622 uint64_t uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
1623 NOREF(hThreadSelf);
1624
1625 while ( pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING
1626 || pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING
1627 || pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING)
1628 {
1629 if (!pAioMgr->cRequestsActive)
1630 {
1631 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, true);
1632 if (!ASMAtomicReadBool(&pAioMgr->fWokenUp))
1633 rc = RTSemEventWait(pAioMgr->EventSem, pAioMgr->msBwLimitExpired);
1634 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, false);
1635 Assert(RT_SUCCESS(rc) || rc == VERR_TIMEOUT);
1636
1637 LogFlow(("Got woken up\n"));
1638 ASMAtomicWriteBool(&pAioMgr->fWokenUp, false);
1639 }
1640
1641 /* Check for an external blocking event first. */
1642 if (pAioMgr->fBlockingEventPending)
1643 {
1644 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
1645 CHECK_RC(pAioMgr, rc);
1646 }
1647
1648 if (RT_LIKELY( pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING
1649 || pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING))
1650 {
1651 /* We got woken up because an endpoint issued new requests. Queue them. */
1652 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1653 CHECK_RC(pAioMgr, rc);
1654
1655 while (pAioMgr->cRequestsActive)
1656 {
1657 RTFILEAIOREQ apReqs[20];
1658 uint32_t cReqsCompleted = 0;
1659 size_t cReqsWait;
1660
1661 if (pAioMgr->cRequestsActive > RT_ELEMENTS(apReqs))
1662 cReqsWait = RT_ELEMENTS(apReqs);
1663 else
1664 cReqsWait = pAioMgr->cRequestsActive;
1665
1666 LogFlow(("Waiting for %d of %d tasks to complete\n", 1, cReqsWait));
1667
1668 rc = RTFileAioCtxWait(pAioMgr->hAioCtx,
1669 1,
1670 RT_INDEFINITE_WAIT, apReqs,
1671 cReqsWait, &cReqsCompleted);
1672 if (RT_FAILURE(rc) && (rc != VERR_INTERRUPTED))
1673 CHECK_RC(pAioMgr, rc);
1674
1675 LogFlow(("%d tasks completed\n", cReqsCompleted));
1676
1677 for (uint32_t i = 0; i < cReqsCompleted; i++)
1678 pdmacFileAioMgrNormalReqComplete(pAioMgr, apReqs[i]);
1679
1680 /* Check for an external blocking event before we go to sleep again. */
1681 if (pAioMgr->fBlockingEventPending)
1682 {
1683 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
1684 CHECK_RC(pAioMgr, rc);
1685 }
1686
1687 /* Update load statistics. */
1688 uint64_t uMillisCurr = RTTimeMilliTS();
1689 if (uMillisCurr > uMillisEnd)
1690 {
1691 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointCurr = pAioMgr->pEndpointsHead;
1692
1693 /* Calculate timespan. */
1694 uMillisCurr -= uMillisEnd;
1695
1696 while (pEndpointCurr)
1697 {
1698 pEndpointCurr->AioMgr.cReqsPerSec = pEndpointCurr->AioMgr.cReqsProcessed / (uMillisCurr + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD);
1699 pEndpointCurr->AioMgr.cReqsProcessed = 0;
1700 pEndpointCurr = pEndpointCurr->AioMgr.pEndpointNext;
1701 }
1702
1703 /* Set new update interval */
1704 uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
1705 }
1706
1707 /* Check endpoints for new requests. */
1708 if (pAioMgr->enmState != PDMACEPFILEMGRSTATE_GROWING)
1709 {
1710 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1711 CHECK_RC(pAioMgr, rc);
1712 }
1713 } /* while requests are active. */
1714
1715 if (pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING)
1716 {
1717 rc = pdmacFileAioMgrNormalGrow(pAioMgr);
1718 AssertRC(rc);
1719 Assert(pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING);
1720
1721 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1722 CHECK_RC(pAioMgr, rc);
1723 }
1724 } /* if still running */
1725 } /* while running */
1726
1727 LogFlowFunc(("rc=%Rrc\n", rc));
1728 return rc;
1729}
1730
1731#undef CHECK_RC
1732
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette