VirtualBox

source: vbox/trunk/src/VBox/VMM/PDMAsyncCompletionFileCache.cpp@ 24442

Last change on this file since 24442 was 24415, checked in by vboxsync, 15 years ago

AsyncCompletion: Release the write lock when done. Should fix guest hangs

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 64.1 KB
Line 
1/* $Id: PDMAsyncCompletionFileCache.cpp 24415 2009-11-05 21:07:56Z vboxsync $ */
2/** @file
3 * PDM Async I/O - Transport data asynchronous in R3 using EMT.
4 * File data cache.
5 */
6
7/*
8 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
19 * Clara, CA 95054 USA or visit http://www.sun.com if you need
20 * additional information or have any questions.
21 */
22
23/** @page pg_pdm_async_completion_cache PDM Async Completion Cache - The file I/O cache
24 * This component implements an I/O cache for file endpoints based on the ARC algorithm.
25 * http://en.wikipedia.org/wiki/Adaptive_Replacement_Cache
26 *
27 * The algorithm uses four LRU (Least frequently used) lists to store data in the cache.
28 * Two of them contain data where one stores entries which were accessed recently and one
29 * which is used for frequently accessed data.
30 * The other two lists are called ghost lists and store information about the accessed range
31 * but do not contain data. They are used to track data access. If these entries are accessed
32 * they will push the data to a higher position in the cache preventing it from getting removed
33 * quickly again.
34 *
35 * The algorithm needs to be modified to meet our requirements. Like the implementation
36 * for the ZFS filesystem we need to handle pages with a variable size. It would
37 * be possible to use a fixed size but would increase the computational
38 * and memory overhead.
39 * Because we do I/O asynchronously we also need to mark entries which are currently accessed
40 * as non evictable to prevent removal of the entry while the data is being accessed.
41 */
42
43/*******************************************************************************
44* Header Files *
45*******************************************************************************/
46#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
47#define RT_STRICT
48#include <iprt/types.h>
49#include <iprt/mem.h>
50#include <iprt/path.h>
51#include <VBox/log.h>
52#include <VBox/stam.h>
53
54#include "PDMAsyncCompletionFileInternal.h"
55
56#ifdef VBOX_STRICT
57# define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) \
58 do \
59 { \
60 AssertMsg(RTCritSectIsOwner(&pCache->CritSect), \
61 ("Thread does not own critical section\n"));\
62 } while(0);
63#else
64# define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) do { } while(0);
65#endif
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70static void pdmacFileCacheTaskCompleted(PPDMACTASKFILE pTask, void *pvUser);
71
72DECLINLINE(void) pdmacFileEpCacheEntryRelease(PPDMACFILECACHEENTRY pEntry)
73{
74 AssertMsg(pEntry->cRefs > 0, ("Trying to release a not referenced entry\n"));
75 ASMAtomicDecU32(&pEntry->cRefs);
76}
77
78DECLINLINE(void) pdmacFileEpCacheEntryRef(PPDMACFILECACHEENTRY pEntry)
79{
80 ASMAtomicIncU32(&pEntry->cRefs);
81}
82
83/**
84 * Checks consistency of a LRU list.
85 *
86 * @returns nothing
87 * @param pList The LRU list to check.
88 * @param pNotInList Element which is not allowed to occur in the list.
89 */
90static void pdmacFileCacheCheckList(PPDMACFILELRULIST pList, PPDMACFILECACHEENTRY pNotInList)
91{
92#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
93 PPDMACFILECACHEENTRY pCurr = pList->pHead;
94
95 /* Check that there are no double entries and no cycles in the list. */
96 while (pCurr)
97 {
98 PPDMACFILECACHEENTRY pNext = pCurr->pNext;
99
100 while (pNext)
101 {
102 AssertMsg(pCurr != pNext,
103 ("Entry %#p is at least two times in list %#p or there is a cycle in the list\n",
104 pCurr, pList));
105 pNext = pNext->pNext;
106 }
107
108 AssertMsg(pCurr != pNotInList, ("Not allowed entry %#p is in list\n", pCurr));
109
110 if (!pCurr->pNext)
111 AssertMsg(pCurr == pList->pTail, ("End of list reached but last element is not list tail\n"));
112
113 pCurr = pCurr->pNext;
114 }
115#endif
116}
117
118/**
119 * Unlinks a cache entry from the LRU list it is assigned to.
120 *
121 * @returns nothing.
122 * @param pEntry The entry to unlink.
123 */
124static void pdmacFileCacheEntryRemoveFromList(PPDMACFILECACHEENTRY pEntry)
125{
126 PPDMACFILELRULIST pList = pEntry->pList;
127 PPDMACFILECACHEENTRY pPrev, pNext;
128
129 LogFlowFunc((": Deleting entry %#p from list %#p\n", pEntry, pList));
130
131 AssertPtr(pList);
132 pdmacFileCacheCheckList(pList, NULL);
133
134 pPrev = pEntry->pPrev;
135 pNext = pEntry->pNext;
136
137 AssertMsg(pEntry != pPrev, ("Entry links to itself as previous element\n"));
138 AssertMsg(pEntry != pNext, ("Entry links to itself as next element\n"));
139
140 if (pPrev)
141 pPrev->pNext = pNext;
142 else
143 {
144 pList->pHead = pNext;
145
146 if (pNext)
147 pNext->pPrev = NULL;
148 }
149
150 if (pNext)
151 pNext->pPrev = pPrev;
152 else
153 {
154 pList->pTail = pPrev;
155
156 if (pPrev)
157 pPrev->pNext = NULL;
158 }
159
160 pEntry->pList = NULL;
161 pEntry->pPrev = NULL;
162 pEntry->pNext = NULL;
163 pList->cbCached -= pEntry->cbData;
164 pdmacFileCacheCheckList(pList, pEntry);
165}
166
167/**
168 * Adds a cache entry to the given LRU list unlinking it from the currently
169 * assigned list if needed.
170 *
171 * @returns nothing.
172 * @param pList List to the add entry to.
173 * @param pEntry Entry to add.
174 */
175static void pdmacFileCacheEntryAddToList(PPDMACFILELRULIST pList, PPDMACFILECACHEENTRY pEntry)
176{
177 LogFlowFunc((": Adding entry %#p to list %#p\n", pEntry, pList));
178 pdmacFileCacheCheckList(pList, NULL);
179
180 /* Remove from old list if needed */
181 if (pEntry->pList)
182 pdmacFileCacheEntryRemoveFromList(pEntry);
183
184 pEntry->pNext = pList->pHead;
185 if (pList->pHead)
186 pList->pHead->pPrev = pEntry;
187 else
188 {
189 Assert(!pList->pTail);
190 pList->pTail = pEntry;
191 }
192
193 pEntry->pPrev = NULL;
194 pList->pHead = pEntry;
195 pList->cbCached += pEntry->cbData;
196 pEntry->pList = pList;
197 pdmacFileCacheCheckList(pList, NULL);
198}
199
200/**
201 * Destroys a LRU list freeing all entries.
202 *
203 * @returns nothing
204 * @param pList Pointer to the LRU list to destroy.
205 *
206 * @note The caller must own the critical section of the cache.
207 */
208static void pdmacFileCacheDestroyList(PPDMACFILELRULIST pList)
209{
210 while (pList->pHead)
211 {
212 PPDMACFILECACHEENTRY pEntry = pList->pHead;
213
214 pList->pHead = pEntry->pNext;
215
216 AssertMsg(!(pEntry->fFlags & (PDMACFILECACHE_ENTRY_IO_IN_PROGRESS | PDMACFILECACHE_ENTRY_IS_DIRTY)),
217 ("Entry is dirty and/or still in progress fFlags=%#x\n", pEntry->fFlags));
218
219 RTMemPageFree(pEntry->pbData);
220 RTMemFree(pEntry);
221 }
222}
223
224/**
225 * Tries to remove the given amount of bytes from a given list in the cache
226 * moving the entries to one of the given ghosts lists
227 *
228 * @returns Amount of data which could be freed.
229 * @param pCache Pointer to the global cache data.
230 * @param cbData The amount of the data to free.
231 * @param pListSrc The source list to evict data from.
232 * @param pGhostListSrc The ghost list removed entries should be moved to
233 * NULL if the entry should be freed.
234 *
235 * @notes This function may return fewer bytes than requested because entries
236 * may be marked as non evictable if they are used for I/O at the moment.
237 */
238static size_t pdmacFileCacheEvictPagesFrom(PPDMACFILECACHEGLOBAL pCache, size_t cbData,
239 PPDMACFILELRULIST pListSrc, PPDMACFILELRULIST pGhostListDst)
240{
241 size_t cbEvicted = 0;
242
243 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache);
244
245 AssertMsg(cbData > 0, ("Evicting 0 bytes not possible\n"));
246 AssertMsg( !pGhostListDst
247 || (pGhostListDst == &pCache->LruRecentlyGhost)
248 || (pGhostListDst == &pCache->LruFrequentlyGhost),
249 ("Destination list must be NULL or one of the ghost lists\n"));
250
251 /* Start deleting from the tail. */
252 PPDMACFILECACHEENTRY pEntry = pListSrc->pTail;
253
254 while ((cbEvicted < cbData) && pEntry)
255 {
256 PPDMACFILECACHEENTRY pCurr = pEntry;
257
258 pEntry = pEntry->pPrev;
259
260 /* We can't evict pages which are currently in progress */
261 if (!(pCurr->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS)
262 && (ASMAtomicReadU32(&pCurr->cRefs) == 0))
263 {
264 /* Ok eviction candidate. Grab the endpoint semaphore and check again
265 * because somebody else might have raced us. */
266 PPDMACFILEENDPOINTCACHE pEndpointCache = &pCurr->pEndpoint->DataCache;
267 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
268
269 if (!(pCurr->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS)
270 && (ASMAtomicReadU32(&pCurr->cRefs) == 0))
271 {
272 AssertMsg(!(pCurr->fFlags & PDMACFILECACHE_ENTRY_IS_DEPRECATED),
273 ("This entry is deprecated so it should have the I/O in progress flag set\n"));
274 Assert(!pCurr->pbDataReplace);
275
276 LogFlow(("Evicting entry %#p (%u bytes)\n", pCurr, pCurr->cbData));
277
278 if (pCurr->pbData)
279 {
280 RTMemPageFree(pCurr->pbData);
281 pCurr->pbData = NULL;
282 }
283
284 cbEvicted += pCurr->cbData;
285
286 if (pGhostListDst)
287 {
288 pdmacFileCacheEntryAddToList(pGhostListDst, pCurr);
289 }
290 else
291 {
292 /* Delete the entry from the AVL tree it is assigned to. */
293 STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
294 RTAvlrFileOffsetRemove(pCurr->pEndpoint->DataCache.pTree, pCurr->Core.Key);
295 STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
296
297 pdmacFileCacheEntryRemoveFromList(pCurr);
298 pCache->cbCached -= pCurr->cbData;
299
300 RTMemFree(pCurr);
301 }
302 }
303 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
304 }
305 else
306 LogFlow(("Entry %#p (%u bytes) is still in progress and can't be evicted\n", pCurr, pCurr->cbData));
307 }
308
309 return cbEvicted;
310}
311
312static size_t pdmacFileCacheReplace(PPDMACFILECACHEGLOBAL pCache, size_t cbData, PPDMACFILELRULIST pEntryList)
313{
314 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache);
315
316 if ( (pCache->LruRecentlyUsed.cbCached)
317 && ( (pCache->LruRecentlyUsed.cbCached > pCache->uAdaptVal)
318 || ( (pEntryList == &pCache->LruFrequentlyGhost)
319 && (pCache->LruRecentlyUsed.cbCached == pCache->uAdaptVal))))
320 {
321 /* We need to remove entry size pages from T1 and move the entries to B1 */
322 return pdmacFileCacheEvictPagesFrom(pCache, cbData,
323 &pCache->LruRecentlyUsed,
324 &pCache->LruRecentlyGhost);
325 }
326 else
327 {
328 /* We need to remove entry size pages from T2 and move the entries to B2 */
329 return pdmacFileCacheEvictPagesFrom(pCache, cbData,
330 &pCache->LruFrequentlyUsed,
331 &pCache->LruFrequentlyGhost);
332 }
333}
334
335/**
336 * Tries to evict the given amount of the data from the cache.
337 *
338 * @returns Bytes removed.
339 * @param pCache The global cache data.
340 * @param cbData Number of bytes to evict.
341 */
342static size_t pdmacFileCacheEvict(PPDMACFILECACHEGLOBAL pCache, size_t cbData)
343{
344 size_t cbRemoved = ~0;
345
346 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache);
347
348 if ((pCache->LruRecentlyUsed.cbCached + pCache->LruRecentlyGhost.cbCached) >= pCache->cbMax)
349 {
350 /* Delete desired pages from the cache. */
351 if (pCache->LruRecentlyUsed.cbCached < pCache->cbMax)
352 {
353 cbRemoved = pdmacFileCacheEvictPagesFrom(pCache, cbData,
354 &pCache->LruRecentlyGhost,
355 NULL);
356 }
357 else
358 {
359 cbRemoved = pdmacFileCacheEvictPagesFrom(pCache, cbData,
360 &pCache->LruRecentlyUsed,
361 NULL);
362 }
363 }
364 else
365 {
366 uint32_t cbUsed = pCache->LruRecentlyUsed.cbCached + pCache->LruRecentlyGhost.cbCached +
367 pCache->LruFrequentlyUsed.cbCached + pCache->LruFrequentlyGhost.cbCached;
368
369 if (cbUsed >= pCache->cbMax)
370 {
371 if (cbUsed == 2*pCache->cbMax)
372 cbRemoved = pdmacFileCacheEvictPagesFrom(pCache, cbData,
373 &pCache->LruFrequentlyGhost,
374 NULL);
375
376 if (cbRemoved >= cbData)
377 cbRemoved = pdmacFileCacheReplace(pCache, cbData, NULL);
378 }
379 }
380
381 return cbRemoved;
382}
383
384/**
385 * Updates the cache parameters
386 *
387 * @returns nothing.
388 * @param pCache The global cache data.
389 * @param pEntry The entry usign for the update.
390 */
391static void pdmacFileCacheUpdate(PPDMACFILECACHEGLOBAL pCache, PPDMACFILECACHEENTRY pEntry)
392{
393 int32_t uUpdateVal = 0;
394
395 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache);
396
397 /* Update parameters */
398 if (pEntry->pList == &pCache->LruRecentlyGhost)
399 {
400 if (pCache->LruRecentlyGhost.cbCached >= pCache->LruFrequentlyGhost.cbCached)
401 uUpdateVal = 1;
402 else
403 uUpdateVal = pCache->LruFrequentlyGhost.cbCached / pCache->LruRecentlyGhost.cbCached;
404
405 pCache->uAdaptVal = RT_MIN(pCache->uAdaptVal + uUpdateVal, pCache->cbMax);
406 }
407 else if (pEntry->pList == &pCache->LruFrequentlyGhost)
408 {
409 if (pCache->LruFrequentlyGhost.cbCached >= pCache->LruRecentlyGhost.cbCached)
410 uUpdateVal = 1;
411 else
412 uUpdateVal = pCache->LruRecentlyGhost.cbCached / pCache->LruFrequentlyGhost.cbCached;
413
414 pCache->uAdaptVal = RT_MIN(pCache->uAdaptVal - uUpdateVal, 0);
415 }
416 else
417 AssertMsgFailed(("Invalid list type\n"));
418}
419
420/**
421 * Initiates a read I/O task for the given entry.
422 *
423 * @returns nothing.
424 * @param pEntry The entry to fetch the data to.
425 */
426static void pdmacFileCacheReadFromEndpoint(PPDMACFILECACHEENTRY pEntry)
427{
428 LogFlowFunc((": Reading data into cache entry %#p\n", pEntry));
429
430 /* Make sure no one evicts the entry while it is accessed. */
431 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IO_IN_PROGRESS;
432
433 PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEntry->pEndpoint);
434 AssertPtr(pIoTask);
435
436 AssertMsg(pEntry->pbData, ("Entry is in ghost state\n"));
437
438 pIoTask->pEndpoint = pEntry->pEndpoint;
439 pIoTask->enmTransferType = PDMACTASKFILETRANSFER_READ;
440 pIoTask->Off = pEntry->Core.Key;
441 pIoTask->DataSeg.cbSeg = pEntry->cbData;
442 pIoTask->DataSeg.pvSeg = pEntry->pbData;
443 pIoTask->pvUser = pEntry;
444 pIoTask->pfnCompleted = pdmacFileCacheTaskCompleted;
445
446 /* Send it off to the I/O manager. */
447 pdmacFileEpAddTask(pEntry->pEndpoint, pIoTask);
448}
449
450/**
451 * Initiates a write I/O task for the given entry.
452 *
453 * @returns nothing.
454 * @param pEntry The entry to read the data from.
455 */
456static void pdmacFileCacheWriteToEndpoint(PPDMACFILECACHEENTRY pEntry)
457{
458 LogFlowFunc((": Writing data from cache entry %#p\n", pEntry));
459
460 /* Make sure no one evicts the entry while it is accessed. */
461 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IO_IN_PROGRESS;
462
463 PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEntry->pEndpoint);
464 AssertPtr(pIoTask);
465
466 AssertMsg(pEntry->pbData, ("Entry is in ghost state\n"));
467
468 pIoTask->pEndpoint = pEntry->pEndpoint;
469 pIoTask->enmTransferType = PDMACTASKFILETRANSFER_WRITE;
470 pIoTask->Off = pEntry->Core.Key;
471 pIoTask->DataSeg.cbSeg = pEntry->cbData;
472 pIoTask->DataSeg.pvSeg = pEntry->pbData;
473 pIoTask->pvUser = pEntry;
474 pIoTask->pfnCompleted = pdmacFileCacheTaskCompleted;
475
476 /* Send it off to the I/O manager. */
477 pdmacFileEpAddTask(pEntry->pEndpoint, pIoTask);
478}
479
480/**
481 * Completion callback for I/O tasks.
482 *
483 * @returns nothing.
484 * @param pTask The completed task.
485 * @param pvUser Opaque user data.
486 */
487static void pdmacFileCacheTaskCompleted(PPDMACTASKFILE pTask, void *pvUser)
488{
489 PPDMACFILECACHEENTRY pEntry = (PPDMACFILECACHEENTRY)pvUser;
490 PPDMACFILECACHEGLOBAL pCache = pEntry->pCache;
491 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pEntry->pEndpoint;
492
493 /* Reference the entry now as we are clearing the I/O in progres flag
494 * which protects the entry till now. */
495 pdmacFileEpCacheEntryRef(pEntry);
496
497 RTSemRWRequestWrite(pEndpoint->DataCache.SemRWEntries, RT_INDEFINITE_WAIT);
498 pEntry->fFlags &= ~PDMACFILECACHE_ENTRY_IO_IN_PROGRESS;
499
500 /* Process waiting segment list. The data in entry might have changed inbetween. */
501 PPDMACFILETASKSEG pCurr = pEntry->pWaitingHead;
502
503 AssertMsg((pCurr && pEntry->pWaitingTail) || (!pCurr && !pEntry->pWaitingTail),
504 ("The list tail was not updated correctly\n"));
505 pEntry->pWaitingTail = NULL;
506 pEntry->pWaitingHead = NULL;
507
508 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
509 {
510 if (pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DEPRECATED)
511 {
512 AssertMsg(!pCurr, ("The entry is deprecated but has waiting write segments attached\n"));
513
514 RTMemPageFree(pEntry->pbData);
515 pEntry->pbData = pEntry->pbDataReplace;
516 pEntry->pbDataReplace = NULL;
517 pEntry->fFlags &= ~PDMACFILECACHE_ENTRY_IS_DEPRECATED;
518 }
519 else
520 {
521 pEntry->fFlags &= ~PDMACFILECACHE_ENTRY_IS_DIRTY;
522
523 while (pCurr)
524 {
525 AssertMsg(pCurr->fWrite, ("Completed write entries should never have read tasks attached\n"));
526
527 memcpy(pEntry->pbData + pCurr->uBufOffset, pCurr->pvBuf, pCurr->cbTransfer);
528 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY;
529
530 uint32_t uOld = ASMAtomicSubU32(&pCurr->pTask->cbTransferLeft, pCurr->cbTransfer);
531 AssertMsg(uOld >= pCurr->cbTransfer, ("New value would overflow\n"));
532 if (!(uOld - pCurr->cbTransfer)
533 && !ASMAtomicXchgBool(&pCurr->pTask->fCompleted, true))
534 pdmR3AsyncCompletionCompleteTask(&pCurr->pTask->Core);
535
536 PPDMACFILETASKSEG pFree = pCurr;
537 pCurr = pCurr->pNext;
538
539 RTMemFree(pFree);
540 }
541 }
542 }
543 else
544 {
545 AssertMsg(pTask->enmTransferType == PDMACTASKFILETRANSFER_READ, ("Invalid transfer type\n"));
546 AssertMsg(!(pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY),("Invalid flags set\n"));
547
548 while (pCurr)
549 {
550 if (pCurr->fWrite)
551 {
552 memcpy(pEntry->pbData + pCurr->uBufOffset, pCurr->pvBuf, pCurr->cbTransfer);
553 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY;
554 }
555 else
556 memcpy(pCurr->pvBuf, pEntry->pbData + pCurr->uBufOffset, pCurr->cbTransfer);
557
558 uint32_t uOld = ASMAtomicSubU32(&pCurr->pTask->cbTransferLeft, pCurr->cbTransfer);
559 AssertMsg(uOld >= pCurr->cbTransfer, ("New value would overflow\n"));
560 if (!(uOld - pCurr->cbTransfer)
561 && !ASMAtomicXchgBool(&pCurr->pTask->fCompleted, true))
562 pdmR3AsyncCompletionCompleteTask(&pCurr->pTask->Core);
563
564 PPDMACFILETASKSEG pFree = pCurr;
565 pCurr = pCurr->pNext;
566
567 RTMemFree(pFree);
568 }
569 }
570
571 if (pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY)
572 pdmacFileCacheWriteToEndpoint(pEntry);
573
574 RTSemRWReleaseWrite(pEndpoint->DataCache.SemRWEntries);
575
576 /* Dereference so that it isn't protected anymore except we issued anyother write for it. */
577 pdmacFileEpCacheEntryRelease(pEntry);
578}
579
580/**
581 * Initializies the I/O cache.
582 *
583 * returns VBox status code.
584 * @param pClassFile The global class data for file endpoints.
585 * @param pCfgNode CFGM node to query configuration data from.
586 */
587int pdmacFileCacheInit(PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile, PCFGMNODE pCfgNode)
588{
589 int rc = VINF_SUCCESS;
590 PPDMACFILECACHEGLOBAL pCache = &pClassFile->Cache;
591
592 /* Initialize members */
593 pCache->LruRecentlyUsed.pHead = NULL;
594 pCache->LruRecentlyUsed.pTail = NULL;
595 pCache->LruRecentlyUsed.cbCached = 0;
596
597 pCache->LruFrequentlyUsed.pHead = NULL;
598 pCache->LruFrequentlyUsed.pTail = NULL;
599 pCache->LruFrequentlyUsed.cbCached = 0;
600
601 pCache->LruRecentlyGhost.pHead = NULL;
602 pCache->LruRecentlyGhost.pTail = NULL;
603 pCache->LruRecentlyGhost.cbCached = 0;
604
605 pCache->LruFrequentlyGhost.pHead = NULL;
606 pCache->LruFrequentlyGhost.pTail = NULL;
607 pCache->LruFrequentlyGhost.cbCached = 0;
608
609 rc = CFGMR3QueryU32Def(pCfgNode, "CacheSize", &pCache->cbMax, 5 * _1M);
610 AssertLogRelRCReturn(rc, rc);
611
612 pCache->cbCached = 0;
613 pCache->uAdaptVal = 0;
614 LogFlowFunc((": Maximum number of bytes cached %u\n", pCache->cbMax));
615
616 STAMR3Register(pClassFile->Core.pVM, &pCache->cbMax,
617 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
618 "/PDM/AsyncCompletion/File/cbMax",
619 STAMUNIT_BYTES,
620 "Maximum cache size");
621 STAMR3Register(pClassFile->Core.pVM, &pCache->cbCached,
622 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
623 "/PDM/AsyncCompletion/File/cbCached",
624 STAMUNIT_BYTES,
625 "Currently used cache");
626 STAMR3Register(pClassFile->Core.pVM, &pCache->LruRecentlyUsed.cbCached,
627 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
628 "/PDM/AsyncCompletion/File/cbCachedMru",
629 STAMUNIT_BYTES,
630 "Number of bytes cached in Mru list");
631 STAMR3Register(pClassFile->Core.pVM, &pCache->LruFrequentlyUsed.cbCached,
632 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
633 "/PDM/AsyncCompletion/File/cbCachedFru",
634 STAMUNIT_BYTES,
635 "Number of bytes cached in Fru list");
636 STAMR3Register(pClassFile->Core.pVM, &pCache->LruRecentlyGhost.cbCached,
637 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
638 "/PDM/AsyncCompletion/File/cbCachedMruGhost",
639 STAMUNIT_BYTES,
640 "Number of bytes cached in Mru ghost list");
641 STAMR3Register(pClassFile->Core.pVM, &pCache->LruFrequentlyGhost.cbCached,
642 STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
643 "/PDM/AsyncCompletion/File/cbCachedFruGhost",
644 STAMUNIT_BYTES, "Number of bytes cached in Fru ghost list");
645
646#ifdef VBOX_WITH_STATISTICS
647 STAMR3Register(pClassFile->Core.pVM, &pCache->cHits,
648 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
649 "/PDM/AsyncCompletion/File/CacheHits",
650 STAMUNIT_COUNT, "Number of hits in the cache");
651 STAMR3Register(pClassFile->Core.pVM, &pCache->cPartialHits,
652 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
653 "/PDM/AsyncCompletion/File/CachePartialHits",
654 STAMUNIT_COUNT, "Number of partial hits in the cache");
655 STAMR3Register(pClassFile->Core.pVM, &pCache->cMisses,
656 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
657 "/PDM/AsyncCompletion/File/CacheMisses",
658 STAMUNIT_COUNT, "Number of misses when accessing the cache");
659 STAMR3Register(pClassFile->Core.pVM, &pCache->StatRead,
660 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
661 "/PDM/AsyncCompletion/File/CacheRead",
662 STAMUNIT_BYTES, "Number of bytes read from the cache");
663 STAMR3Register(pClassFile->Core.pVM, &pCache->StatWritten,
664 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
665 "/PDM/AsyncCompletion/File/CacheWritten",
666 STAMUNIT_BYTES, "Number of bytes written to the cache");
667 STAMR3Register(pClassFile->Core.pVM, &pCache->StatTreeGet,
668 STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
669 "/PDM/AsyncCompletion/File/CacheTreeGet",
670 STAMUNIT_TICKS_PER_CALL, "Time taken to access an entry in the tree");
671 STAMR3Register(pClassFile->Core.pVM, &pCache->StatTreeInsert,
672 STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
673 "/PDM/AsyncCompletion/File/CacheTreeInsert",
674 STAMUNIT_TICKS_PER_CALL, "Time taken to insert an entry in the tree");
675 STAMR3Register(pClassFile->Core.pVM, &pCache->StatTreeRemove,
676 STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
677 "/PDM/AsyncCompletion/File/CacheTreeRemove",
678 STAMUNIT_TICKS_PER_CALL, "Time taken to remove an entry an the tree");
679#endif
680
681 /* Initialize the critical section */
682 rc = RTCritSectInit(&pCache->CritSect);
683
684 if (RT_SUCCESS(rc))
685 LogRel(("AIOMgr: Cache successfully initialised. Cache size is %u bytes\n", pCache->cbMax));
686
687 return rc;
688}
689
690/**
691 * Destroysthe cache freeing all data.
692 *
693 * returns nothing.
694 * @param pClassFile The global class data for file endpoints.
695 */
696void pdmacFileCacheDestroy(PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile)
697{
698 PPDMACFILECACHEGLOBAL pCache = &pClassFile->Cache;
699
700 /* Make sure no one else uses the cache now */
701 RTCritSectEnter(&pCache->CritSect);
702
703 /* Cleanup deleting all cache entries waiting for in progress entries to finish. */
704 pdmacFileCacheDestroyList(&pCache->LruRecentlyUsed);
705 pdmacFileCacheDestroyList(&pCache->LruFrequentlyUsed);
706 pdmacFileCacheDestroyList(&pCache->LruRecentlyGhost);
707 pdmacFileCacheDestroyList(&pCache->LruFrequentlyGhost);
708
709 RTCritSectLeave(&pCache->CritSect);
710
711 RTCritSectDelete(&pCache->CritSect);
712}
713
714/**
715 * Initializes per endpoint cache data
716 * like the AVL tree used to access cached entries.
717 *
718 * @returns VBox status code.
719 * @param pEndpoint The endpoint to init the cache for,
720 * @param pClassFile The global class data for file endpoints.
721 */
722int pdmacFileEpCacheInit(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile)
723{
724 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEndpoint->DataCache;
725
726 pEndpointCache->pCache = &pClassFile->Cache;
727
728 int rc = RTSemRWCreate(&pEndpointCache->SemRWEntries);
729 if (RT_SUCCESS(rc))
730 {
731 pEndpointCache->pTree = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE));
732 if (!pEndpointCache->pTree)
733 {
734 rc = VERR_NO_MEMORY;
735 RTSemRWDestroy(pEndpointCache->SemRWEntries);
736 }
737 }
738
739#ifdef VBOX_WITH_STATISTICS
740 if (RT_SUCCESS(rc))
741 {
742 STAMR3RegisterF(pClassFile->Core.pVM, &pEndpointCache->StatWriteDeferred,
743 STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
744 STAMUNIT_COUNT, "Number of deferred writes",
745 "/PDM/AsyncCompletion/File/%s/Cache/DeferredWrites", RTPathFilename(pEndpoint->Core.pszUri));
746 }
747#endif
748
749 return rc;
750}
751
752/**
753 * Callback for the AVL destroy routine. Frees a cache entry for this endpoint.
754 *
755 * @returns IPRT status code.
756 * @param pNode The node to destroy.
757 * @param pvUser Opaque user data.
758 */
759static int pdmacFileEpCacheEntryDestroy(PAVLRFOFFNODECORE pNode, void *pvUser)
760{
761 PPDMACFILECACHEENTRY pEntry = (PPDMACFILECACHEENTRY)pNode;
762 PPDMACFILECACHEGLOBAL pCache = (PPDMACFILECACHEGLOBAL)pvUser;
763 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEntry->pEndpoint->DataCache;
764
765 while (pEntry->fFlags & (PDMACFILECACHE_ENTRY_IO_IN_PROGRESS | PDMACFILECACHE_ENTRY_IS_DIRTY))
766 {
767 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
768 RTThreadSleep(250);
769 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
770 }
771
772 AssertMsg(!(pEntry->fFlags & (PDMACFILECACHE_ENTRY_IO_IN_PROGRESS | PDMACFILECACHE_ENTRY_IS_DIRTY)),
773 ("Entry is dirty and/or still in progress fFlags=%#x\n", pEntry->fFlags));
774
775 pdmacFileCacheEntryRemoveFromList(pEntry);
776 pCache->cbCached -= pEntry->cbData;
777
778 RTMemPageFree(pEntry->pbData);
779 RTMemFree(pEntry);
780
781 return VINF_SUCCESS;
782}
783
784/**
785 * Destroys all cache ressources used by the given endpoint.
786 *
787 * @returns nothing.
788 * @param pEndpoint The endpoint to the destroy.
789 */
790void pdmacFileEpCacheDestroy(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
791{
792 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEndpoint->DataCache;
793 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
794
795 /* Make sure nobody is accessing the cache while we delete the tree. */
796 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
797 RTCritSectEnter(&pCache->CritSect);
798 RTAvlrFileOffsetDestroy(pEndpointCache->pTree, pdmacFileEpCacheEntryDestroy, pCache);
799 RTCritSectLeave(&pCache->CritSect);
800 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
801
802 RTSemRWDestroy(pEndpointCache->SemRWEntries);
803
804#ifdef VBOX_WITH_STATISTICS
805 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
806
807 STAMR3Deregister(pEpClassFile->Core.pVM, &pEndpointCache->StatWriteDeferred);
808#endif
809}
810
811static PPDMACFILECACHEENTRY pdmacFileEpCacheGetCacheEntryByOffset(PPDMACFILEENDPOINTCACHE pEndpointCache, RTFOFF off)
812{
813 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
814 PPDMACFILECACHEENTRY pEntry = NULL;
815
816 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache);
817
818 RTSemRWRequestRead(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
819 pEntry = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetRangeGet(pEndpointCache->pTree, off);
820 if (pEntry)
821 pdmacFileEpCacheEntryRef(pEntry);
822 RTSemRWReleaseRead(pEndpointCache->SemRWEntries);
823
824 STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache);
825
826 return pEntry;
827}
828
829static PPDMACFILECACHEENTRY pdmacFileEpCacheGetCacheBestFitEntryByOffset(PPDMACFILEENDPOINTCACHE pEndpointCache, RTFOFF off)
830{
831 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
832 PPDMACFILECACHEENTRY pEntry = NULL;
833
834 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache);
835
836 RTSemRWRequestRead(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
837 pEntry = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetGetBestFit(pEndpointCache->pTree, off, true);
838 if (pEntry)
839 pdmacFileEpCacheEntryRef(pEntry);
840 RTSemRWReleaseRead(pEndpointCache->SemRWEntries);
841
842 STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache);
843
844 return pEntry;
845}
846
847static void pdmacFileEpCacheInsertEntry(PPDMACFILEENDPOINTCACHE pEndpointCache, PPDMACFILECACHEENTRY pEntry)
848{
849 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
850
851 STAM_PROFILE_ADV_START(&pCache->StatTreeInsert, Cache);
852 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
853 bool fInserted = RTAvlrFileOffsetInsert(pEndpointCache->pTree, &pEntry->Core);
854 AssertMsg(fInserted, ("Node was not inserted into tree\n"));
855 STAM_PROFILE_ADV_STOP(&pCache->StatTreeInsert, Cache);
856 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
857}
858
859/**
860 * Allocates and initializes a new entry for the cache.
861 * The entry has a reference count of 1.
862 *
863 * @returns Pointer to the new cache entry or NULL if out of memory.
864 * @param pCache The cache the entry belongs to.
865 * @param pEndoint The endpoint the entry holds data for.
866 * @param off Start offset.
867 * @param cbData Size of the cache entry.
868 */
869static PPDMACFILECACHEENTRY pdmacFileCacheEntryAlloc(PPDMACFILECACHEGLOBAL pCache,
870 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
871 RTFOFF off, size_t cbData)
872{
873 PPDMACFILECACHEENTRY pEntryNew = (PPDMACFILECACHEENTRY)RTMemAllocZ(sizeof(PDMACFILECACHEENTRY));
874
875 if (RT_UNLIKELY(!pEntryNew))
876 return NULL;
877
878 pEntryNew->Core.Key = off;
879 pEntryNew->Core.KeyLast = off + cbData - 1;
880 pEntryNew->pEndpoint = pEndpoint;
881 pEntryNew->pCache = pCache;
882 pEntryNew->fFlags = 0;
883 pEntryNew->cRefs = 1; /* We are using it now. */
884 pEntryNew->pList = NULL;
885 pEntryNew->cbData = cbData;
886 pEntryNew->pWaitingHead = NULL;
887 pEntryNew->pWaitingTail = NULL;
888 pEntryNew->pbDataReplace = NULL;
889 pEntryNew->pbData = (uint8_t *)RTMemPageAlloc(cbData);
890
891 if (RT_UNLIKELY(!pEntryNew->pbData))
892 {
893 RTMemFree(pEntryNew);
894 return NULL;
895 }
896
897 return pEntryNew;
898}
899
900/**
901 * Adds a segment to the waiting list for a cache entry
902 * which is currently in progress.
903 *
904 * @returns nothing.
905 * @param pEntry The cache entry to add the segment to.
906 * @param pSeg The segment to add.
907 */
908static void pdmacFileEpCacheEntryAddWaitingSegment(PPDMACFILECACHEENTRY pEntry, PPDMACFILETASKSEG pSeg)
909{
910 pSeg->pNext = NULL;
911
912 if (pEntry->pWaitingHead)
913 {
914 AssertPtr(pEntry->pWaitingTail);
915
916 pEntry->pWaitingTail->pNext = pSeg;
917 pEntry->pWaitingTail = pSeg;
918 }
919 else
920 {
921 Assert(!pEntry->pWaitingTail);
922
923 pEntry->pWaitingHead = pSeg;
924 pEntry->pWaitingTail = pSeg;
925 }
926}
927
928/**
929 * Checks that a set of flags is set/clear acquiring the R/W semaphore
930 * in exclusive mode.
931 *
932 * @returns true if the flag in fSet is set and the one in fClear is clear.
933 * false othwerise.
934 * The R/W semaphore is only held if true is returned.
935 *
936 * @param pEndpointCache The endpoint cache instance data.
937 * @param pEntry The entry to check the flags for.
938 * @param fSet The flag which is tested to be set.
939 * @param fClear The flag which is tested to be clear.
940 */
941DECLINLINE(bool) pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(PPDMACFILEENDPOINTCACHE pEndpointCache,
942 PPDMACFILECACHEENTRY pEntry,
943 uint32_t fSet, uint32_t fClear)
944{
945 bool fPassed = ((pEntry->fFlags & fSet) && !(pEntry->fFlags & fClear));
946
947 if (fPassed)
948 {
949 /* Acquire the lock and check again becuase the completion callback might have raced us. */
950 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT);
951
952 fPassed = ((pEntry->fFlags & fSet) && !(pEntry->fFlags & fClear));
953
954 /* Drop the lock if we didn't passed the test. */
955 if (!fPassed)
956 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
957 }
958
959 return fPassed;
960}
961
962/**
963 * Advances the current segment buffer by the number of bytes transfered
964 * or gets the next segment.
965 */
966#define ADVANCE_SEGMENT_BUFFER(BytesTransfered) \
967 do \
968 { \
969 cbSegLeft -= BytesTransfered; \
970 if (!cbSegLeft) \
971 { \
972 iSegCurr++; \
973 cbSegLeft = paSegments[iSegCurr].cbSeg; \
974 pbSegBuf = (uint8_t *)paSegments[iSegCurr].pvSeg; \
975 } \
976 else \
977 pbSegBuf += BytesTransfered; \
978 } \
979 while (0)
980
981/**
982 * Reads the specified data from the endpoint using the cache if possible.
983 *
984 * @returns VBox status code.
985 * @param pEndpoint The endpoint to read from.
986 * @param pTask The task structure used as identifier for this request.
987 * @param off The offset to start reading from.
988 * @param paSegments Pointer to the array holding the destination buffers.
989 * @param cSegments Number of segments in the array.
990 * @param cbRead Number of bytes to read.
991 */
992int pdmacFileEpCacheRead(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONTASKFILE pTask,
993 RTFOFF off, PCPDMDATASEG paSegments, size_t cSegments,
994 size_t cbRead)
995{
996 int rc = VINF_SUCCESS;
997 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEndpoint->DataCache;
998 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
999 PPDMACFILECACHEENTRY pEntry;
1000
1001 LogFlowFunc((": pEndpoint=%#p{%s} pTask=%#p off=%RTfoff paSegments=%#p cSegments=%u cbRead=%u\n",
1002 pEndpoint, pEndpoint->Core.pszUri, pTask, off, paSegments, cSegments, cbRead));
1003
1004 pTask->cbTransferLeft = cbRead;
1005 /* Set to completed to make sure that the task is valid while we access it. */
1006 ASMAtomicWriteBool(&pTask->fCompleted, true);
1007
1008 int iSegCurr = 0;
1009 uint8_t *pbSegBuf = (uint8_t *)paSegments[iSegCurr].pvSeg;
1010 size_t cbSegLeft = paSegments[iSegCurr].cbSeg;
1011
1012 while (cbRead)
1013 {
1014 size_t cbToRead;
1015
1016 pEntry = pdmacFileEpCacheGetCacheEntryByOffset(pEndpointCache, off);
1017
1018 /*
1019 * If there is no entry we try to create a new one eviciting unused pages
1020 * if the cache is full. If this is not possible we will pass the request through
1021 * and skip the caching (all entries may be still in progress so they can't
1022 * be evicted)
1023 * If we have an entry it can be in one of the LRU lists where the entry
1024 * contains data (recently used or frequently used LRU) so we can just read
1025 * the data we need and put the entry at the head of the frequently used LRU list.
1026 * In case the entry is in one of the ghost lists it doesn't contain any data.
1027 * We have to fetch it again evicting pages from either T1 or T2 to make room.
1028 */
1029 if (pEntry)
1030 {
1031 RTFOFF OffDiff = off - pEntry->Core.Key;
1032
1033 AssertMsg(off >= pEntry->Core.Key,
1034 ("Overflow in calculation off=%RTfoff OffsetAligned=%RTfoff\n",
1035 off, pEntry->Core.Key));
1036
1037 AssertPtr(pEntry->pList);
1038
1039 cbToRead = RT_MIN(pEntry->cbData - OffDiff, cbRead);
1040 cbRead -= cbToRead;
1041
1042 if (!cbRead)
1043 STAM_COUNTER_INC(&pCache->cHits);
1044 else
1045 STAM_COUNTER_INC(&pCache->cPartialHits);
1046
1047 STAM_COUNTER_ADD(&pCache->StatRead, cbToRead);
1048
1049 /* Ghost lists contain no data. */
1050 if ( (pEntry->pList == &pCache->LruRecentlyUsed)
1051 || (pEntry->pList == &pCache->LruFrequentlyUsed))
1052 {
1053 if(pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(pEndpointCache, pEntry,
1054 PDMACFILECACHE_ENTRY_IS_DEPRECATED,
1055 0))
1056 {
1057 /* Entry is deprecated. Read data from the new buffer. */
1058 while (cbToRead)
1059 {
1060 size_t cbCopy = RT_MIN(cbSegLeft, cbToRead);
1061
1062 memcpy(pbSegBuf, pEntry->pbDataReplace + OffDiff, cbCopy);
1063
1064 ADVANCE_SEGMENT_BUFFER(cbCopy);
1065
1066 cbToRead -= cbCopy;
1067 off += cbCopy;
1068 OffDiff += cbCopy;
1069 ASMAtomicSubS32(&pTask->cbTransferLeft, cbCopy);
1070 }
1071 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1072 }
1073 else
1074 {
1075 if (pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(pEndpointCache, pEntry,
1076 PDMACFILECACHE_ENTRY_IO_IN_PROGRESS,
1077 PDMACFILECACHE_ENTRY_IS_DIRTY))
1078 {
1079 /* Entry didn't completed yet. Append to the list */
1080 while (cbToRead)
1081 {
1082 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG));
1083
1084 pSeg->pTask = pTask;
1085 pSeg->uBufOffset = OffDiff;
1086 pSeg->cbTransfer = RT_MIN(cbToRead, cbSegLeft);
1087 pSeg->pvBuf = pbSegBuf;
1088 pSeg->fWrite = false;
1089
1090 ADVANCE_SEGMENT_BUFFER(pSeg->cbTransfer);
1091
1092 pdmacFileEpCacheEntryAddWaitingSegment(pEntry, pSeg);
1093
1094 off += pSeg->cbTransfer;
1095 cbToRead -= pSeg->cbTransfer;
1096 OffDiff += pSeg->cbTransfer;
1097 }
1098 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1099 }
1100 else
1101 {
1102 /* Read as much as we can from the entry. */
1103 while (cbToRead)
1104 {
1105 size_t cbCopy = RT_MIN(cbSegLeft, cbToRead);
1106
1107 memcpy(pbSegBuf, pEntry->pbData + OffDiff, cbCopy);
1108
1109 ADVANCE_SEGMENT_BUFFER(cbCopy);
1110
1111 cbToRead -= cbCopy;
1112 off += cbCopy;
1113 OffDiff += cbCopy;
1114 ASMAtomicSubS32(&pTask->cbTransferLeft, cbCopy);
1115 }
1116 }
1117 }
1118
1119 /* Move this entry to the top position */
1120 RTCritSectEnter(&pCache->CritSect);
1121 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
1122 RTCritSectLeave(&pCache->CritSect);
1123 }
1124 else
1125 {
1126 LogFlow(("Fetching data for ghost entry %#p from file\n", pEntry));
1127
1128 RTCritSectEnter(&pCache->CritSect);
1129 pdmacFileCacheUpdate(pCache, pEntry);
1130 pdmacFileCacheReplace(pCache, pEntry->cbData, pEntry->pList);
1131
1132 /* Move the entry to T2 and fetch it to the cache. */
1133 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
1134 RTCritSectLeave(&pCache->CritSect);
1135
1136 pEntry->pbData = (uint8_t *)RTMemPageAlloc(pEntry->cbData);
1137 AssertPtr(pEntry->pbData);
1138
1139 while (cbToRead)
1140 {
1141 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG));
1142
1143 AssertMsg(off >= pEntry->Core.Key,
1144 ("Overflow in calculation off=%RTfoff OffsetAligned=%RTfoff\n",
1145 off, pEntry->Core.Key));
1146
1147 pSeg->pTask = pTask;
1148 pSeg->uBufOffset = OffDiff;
1149 pSeg->cbTransfer = RT_MIN(cbToRead, cbSegLeft);
1150 pSeg->pvBuf = pbSegBuf;
1151
1152 ADVANCE_SEGMENT_BUFFER(pSeg->cbTransfer);
1153
1154 pdmacFileEpCacheEntryAddWaitingSegment(pEntry, pSeg);
1155
1156 off += pSeg->cbTransfer;
1157 OffDiff += pSeg->cbTransfer;
1158 cbToRead -= pSeg->cbTransfer;
1159 }
1160
1161 pdmacFileCacheReadFromEndpoint(pEntry);
1162 }
1163 pdmacFileEpCacheEntryRelease(pEntry);
1164 }
1165 else
1166 {
1167 /* No entry found for this offset. Get best fit entry and fetch the data to the cache. */
1168 size_t cbToReadAligned;
1169 PPDMACFILECACHEENTRY pEntryBestFit = pdmacFileEpCacheGetCacheBestFitEntryByOffset(pEndpointCache, off);
1170
1171 LogFlow(("%sbest fit entry for off=%RTfoff (BestFit=%RTfoff BestFitEnd=%RTfoff BestFitSize=%u)\n",
1172 pEntryBestFit ? "" : "No ",
1173 off,
1174 pEntryBestFit ? pEntryBestFit->Core.Key : 0,
1175 pEntryBestFit ? pEntryBestFit->Core.KeyLast : 0,
1176 pEntryBestFit ? pEntryBestFit->cbData : 0));
1177
1178 if (pEntryBestFit && ((off + (RTFOFF)cbRead) > pEntryBestFit->Core.Key))
1179 {
1180 cbToRead = pEntryBestFit->Core.Key - off;
1181 pdmacFileEpCacheEntryRelease(pEntryBestFit);
1182 cbToReadAligned = cbToRead;
1183 }
1184 else
1185 {
1186 /*
1187 * Align the size to a 4KB boundary.
1188 * Memory size is aligned to a page boundary
1189 * and memory is wasted if the size is rahter small.
1190 * (For example reads with a size of 512 bytes.
1191 */
1192 cbToRead = cbRead;
1193 cbToReadAligned = RT_ALIGN_Z(cbRead, PAGE_SIZE);
1194
1195 /* Clip read to file size */
1196 cbToReadAligned = RT_MIN(pEndpoint->cbFile - off, cbToReadAligned);
1197 if (pEntryBestFit)
1198 cbToReadAligned = RT_MIN(cbToReadAligned, pEntryBestFit->Core.Key - off);
1199 }
1200
1201 cbRead -= cbToRead;
1202
1203 if (!cbRead)
1204 STAM_COUNTER_INC(&pCache->cMisses);
1205 else
1206 STAM_COUNTER_INC(&pCache->cPartialHits);
1207
1208 RTCritSectEnter(&pCache->CritSect);
1209 size_t cbRemoved = pdmacFileCacheEvict(pCache, cbToReadAligned);
1210 RTCritSectLeave(&pCache->CritSect);
1211
1212 if (cbRemoved >= cbToReadAligned)
1213 {
1214 LogFlow(("Evicted %u bytes (%u requested). Creating new cache entry\n", cbRemoved, cbToReadAligned));
1215 PPDMACFILECACHEENTRY pEntryNew = pdmacFileCacheEntryAlloc(pCache, pEndpoint, off, cbToReadAligned);
1216 AssertPtr(pEntryNew);
1217
1218 RTCritSectEnter(&pCache->CritSect);
1219 pdmacFileCacheEntryAddToList(&pCache->LruRecentlyUsed, pEntryNew);
1220 pCache->cbCached += cbToReadAligned;
1221 RTCritSectLeave(&pCache->CritSect);
1222
1223 pdmacFileEpCacheInsertEntry(pEndpointCache, pEntryNew);
1224 uint32_t uBufOffset = 0;
1225
1226 while (cbToRead)
1227 {
1228 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG));
1229
1230 pSeg->pTask = pTask;
1231 pSeg->uBufOffset = uBufOffset;
1232 pSeg->cbTransfer = RT_MIN(cbToRead, cbSegLeft);
1233 pSeg->pvBuf = pbSegBuf;
1234
1235 ADVANCE_SEGMENT_BUFFER(pSeg->cbTransfer);
1236
1237 pdmacFileEpCacheEntryAddWaitingSegment(pEntryNew, pSeg);
1238
1239 off += pSeg->cbTransfer;
1240 cbToRead -= pSeg->cbTransfer;
1241 uBufOffset += pSeg->cbTransfer;
1242 }
1243
1244 pdmacFileCacheReadFromEndpoint(pEntryNew);
1245 pdmacFileEpCacheEntryRelease(pEntryNew); /* it is protected by the I/O in progress flag now. */
1246 }
1247 else
1248 {
1249 /*
1250 * There is not enough free space in the cache.
1251 * Pass the request directly to the I/O manager.
1252 */
1253 LogFlow(("Couldn't evict %u bytes from the cache (%u actually removed). Remaining request will be passed through\n", cbToRead, cbRemoved));
1254
1255 while (cbToRead)
1256 {
1257 PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEndpoint);
1258 AssertPtr(pIoTask);
1259
1260 pIoTask->pEndpoint = pEndpoint;
1261 pIoTask->enmTransferType = PDMACTASKFILETRANSFER_READ;
1262 pIoTask->Off = off;
1263 pIoTask->DataSeg.cbSeg = RT_MIN(cbToRead, cbSegLeft);
1264 pIoTask->DataSeg.pvSeg = pbSegBuf;
1265 pIoTask->pvUser = pTask;
1266 pIoTask->pfnCompleted = pdmacFileEpTaskCompleted;
1267
1268 off += pIoTask->DataSeg.cbSeg;
1269 cbToRead -= pIoTask->DataSeg.cbSeg;
1270
1271 ADVANCE_SEGMENT_BUFFER(pIoTask->DataSeg.cbSeg);
1272
1273 /* Send it off to the I/O manager. */
1274 pdmacFileEpAddTask(pEndpoint, pIoTask);
1275 }
1276 }
1277 }
1278 }
1279
1280 ASMAtomicWriteBool(&pTask->fCompleted, false);
1281
1282 if (ASMAtomicReadS32(&pTask->cbTransferLeft) == 0
1283 && !ASMAtomicXchgBool(&pTask->fCompleted, true))
1284 pdmR3AsyncCompletionCompleteTask(&pTask->Core);
1285
1286 return rc;
1287}
1288
1289/**
1290 * Writes the given data to the endpoint using the cache if possible.
1291 *
1292 * @returns VBox status code.
1293 * @param pEndpoint The endpoint to write to.
1294 * @param pTask The task structure used as identifier for this request.
1295 * @param off The offset to start writing to
1296 * @param paSegments Pointer to the array holding the source buffers.
1297 * @param cSegments Number of segments in the array.
1298 * @param cbWrite Number of bytes to write.
1299 */
1300int pdmacFileEpCacheWrite(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONTASKFILE pTask,
1301 RTFOFF off, PCPDMDATASEG paSegments, size_t cSegments,
1302 size_t cbWrite)
1303{
1304 int rc = VINF_SUCCESS;
1305 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEndpoint->DataCache;
1306 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache;
1307 PPDMACFILECACHEENTRY pEntry;
1308
1309 LogFlowFunc((": pEndpoint=%#p{%s} pTask=%#p off=%RTfoff paSegments=%#p cSegments=%u cbWrite=%u\n",
1310 pEndpoint, pEndpoint->Core.pszUri, pTask, off, paSegments, cSegments, cbWrite));
1311
1312 pTask->cbTransferLeft = cbWrite;
1313 /* Set to completed to make sure that the task is valid while we access it. */
1314 ASMAtomicWriteBool(&pTask->fCompleted, true);
1315
1316 int iSegCurr = 0;
1317 uint8_t *pbSegBuf = (uint8_t *)paSegments[iSegCurr].pvSeg;
1318 size_t cbSegLeft = paSegments[iSegCurr].cbSeg;
1319
1320 while (cbWrite)
1321 {
1322 size_t cbToWrite;
1323
1324 pEntry = pdmacFileEpCacheGetCacheEntryByOffset(pEndpointCache, off);
1325
1326 if (pEntry)
1327 {
1328 /* Write the data into the entry and mark it as dirty */
1329 AssertPtr(pEntry->pList);
1330
1331 RTFOFF OffDiff = off - pEntry->Core.Key;
1332
1333 AssertMsg(off >= pEntry->Core.Key,
1334 ("Overflow in calculation off=%RTfoff OffsetAligned=%RTfoff\n",
1335 off, pEntry->Core.Key));
1336
1337 cbToWrite = RT_MIN(pEntry->cbData - OffDiff, cbWrite);
1338 cbWrite -= cbToWrite;
1339
1340 if (!cbWrite)
1341 STAM_COUNTER_INC(&pCache->cHits);
1342 else
1343 STAM_COUNTER_INC(&pCache->cPartialHits);
1344
1345 STAM_COUNTER_ADD(&pCache->StatWritten, cbToWrite);
1346
1347 /* Ghost lists contain no data. */
1348 if ( (pEntry->pList == &pCache->LruRecentlyUsed)
1349 || (pEntry->pList == &pCache->LruFrequentlyUsed))
1350 {
1351 /* Check if the buffer is deprecated. */
1352 if(pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(pEndpointCache, pEntry,
1353 PDMACFILECACHE_ENTRY_IS_DEPRECATED,
1354 0))
1355 {
1356 AssertMsg(pEntry->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS,
1357 ("Entry is deprecated but not in progress\n"));
1358 AssertPtr(pEntry->pbDataReplace);
1359
1360 LogFlow(("Writing to deprecated buffer of entry %#p\n", pEntry));
1361
1362 /* Update the data from the write. */
1363 while (cbToWrite)
1364 {
1365 size_t cbCopy = RT_MIN(cbSegLeft, cbToWrite);
1366
1367 memcpy(pEntry->pbDataReplace + OffDiff, pbSegBuf, cbCopy);
1368
1369 ADVANCE_SEGMENT_BUFFER(cbCopy);
1370
1371 cbToWrite-= cbCopy;
1372 off += cbCopy;
1373 OffDiff += cbCopy;
1374 ASMAtomicSubS32(&pTask->cbTransferLeft, cbCopy);
1375 }
1376 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1377 }
1378 else
1379 {
1380 /* If the entry is dirty it must be also in progress now and we have to defer updating it again. */
1381 if(pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(pEndpointCache, pEntry,
1382 PDMACFILECACHE_ENTRY_IS_DIRTY,
1383 0))
1384 {
1385 AssertMsg(pEntry->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS,
1386 ("Entry is dirty but not in progress\n"));
1387 Assert(!pEntry->pbDataReplace);
1388
1389 /* Deprecate the current buffer. */
1390 if (!pEntry->pWaitingHead)
1391 pEntry->pbDataReplace = (uint8_t *)RTMemPageAlloc(pEntry->cbData);
1392
1393 /* If we are out of memory or have waiting segments
1394 * defer the write. */
1395 if (!pEntry->pbDataReplace || pEntry->pWaitingHead)
1396 {
1397 /* The data isn't written to the file yet */
1398 while (cbToWrite)
1399 {
1400 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG));
1401
1402 pSeg->pTask = pTask;
1403 pSeg->uBufOffset = OffDiff;
1404 pSeg->cbTransfer = RT_MIN(cbToWrite, cbSegLeft);
1405 pSeg->pvBuf = pbSegBuf;
1406 pSeg->fWrite = true;
1407
1408 ADVANCE_SEGMENT_BUFFER(pSeg->cbTransfer);
1409
1410 pdmacFileEpCacheEntryAddWaitingSegment(pEntry, pSeg);
1411
1412 off += pSeg->cbTransfer;
1413 OffDiff += pSeg->cbTransfer;
1414 cbToWrite -= pSeg->cbTransfer;
1415 }
1416 STAM_COUNTER_INC(&pEndpointCache->StatWriteDeferred);
1417 }
1418 else
1419 {
1420 LogFlow(("Deprecating buffer for entry %#p\n", pEntry));
1421 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DEPRECATED;
1422
1423#if 1
1424 /* Copy the data before the update. */
1425 if (OffDiff)
1426 memcpy(pEntry->pbDataReplace, pEntry->pbData, OffDiff);
1427
1428 /* Copy data behind the update. */
1429 if ((pEntry->cbData - OffDiff - cbToWrite) > 0)
1430 memcpy(pEntry->pbDataReplace + OffDiff + cbToWrite,
1431 pEntry->pbData + OffDiff + cbToWrite,
1432 (pEntry->cbData - OffDiff - cbToWrite));
1433#else
1434 /* A safer method but probably slower. */
1435 memcpy(pEntry->pbDataReplace, pEntry->pbData, pEntry->cbData);
1436#endif
1437
1438 /* Update the data from the write. */
1439 while (cbToWrite)
1440 {
1441 size_t cbCopy = RT_MIN(cbSegLeft, cbToWrite);
1442
1443 memcpy(pEntry->pbDataReplace + OffDiff, pbSegBuf, cbCopy);
1444
1445 ADVANCE_SEGMENT_BUFFER(cbCopy);
1446
1447 cbToWrite-= cbCopy;
1448 off += cbCopy;
1449 OffDiff += cbCopy;
1450 ASMAtomicSubS32(&pTask->cbTransferLeft, cbCopy);
1451 }
1452
1453 /* We are done here. A new write is initiated if the current request completes. */
1454 }
1455
1456 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1457 }
1458 else
1459 {
1460 /*
1461 * Check if a read is in progress for this entry.
1462 * We have to defer processing in that case.
1463 */
1464 if(pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(pEndpointCache, pEntry,
1465 PDMACFILECACHE_ENTRY_IO_IN_PROGRESS,
1466 0))
1467 {
1468 while (cbToWrite)
1469 {
1470 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG));
1471
1472 pSeg->pTask = pTask;
1473 pSeg->uBufOffset = OffDiff;
1474 pSeg->cbTransfer = RT_MIN(cbToWrite, cbSegLeft);
1475 pSeg->pvBuf = pbSegBuf;
1476 pSeg->fWrite = true;
1477
1478 ADVANCE_SEGMENT_BUFFER(pSeg->cbTransfer);
1479
1480 pdmacFileEpCacheEntryAddWaitingSegment(pEntry, pSeg);
1481
1482 off += pSeg->cbTransfer;
1483 OffDiff += pSeg->cbTransfer;
1484 cbToWrite -= pSeg->cbTransfer;
1485 }
1486 STAM_COUNTER_INC(&pEndpointCache->StatWriteDeferred);
1487 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries);
1488 }
1489 else
1490 {
1491 /* Write as much as we can into the entry and update the file. */
1492 while (cbToWrite)
1493 {
1494 size_t cbCopy = RT_MIN(cbSegLeft, cbToWrite);
1495
1496 memcpy(pEntry->pbData + OffDiff, pbSegBuf, cbCopy);
1497
1498 ADVANCE_SEGMENT_BUFFER(cbCopy);
1499
1500 cbToWrite-= cbCopy;
1501 off += cbCopy;
1502 OffDiff += cbCopy;
1503 ASMAtomicSubS32(&pTask->cbTransferLeft, cbCopy);
1504 }
1505
1506 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY;
1507 pdmacFileCacheWriteToEndpoint(pEntry);
1508 }
1509 }
1510
1511 /* Move this entry to the top position */
1512 RTCritSectEnter(&pCache->CritSect);
1513 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
1514 RTCritSectLeave(&pCache->CritSect);
1515 }
1516 }
1517 else
1518 {
1519 RTCritSectEnter(&pCache->CritSect);
1520 pdmacFileCacheUpdate(pCache, pEntry);
1521 pdmacFileCacheReplace(pCache, pEntry->cbData, pEntry->pList);
1522
1523 /* Move the entry to T2 and fetch it to the cache. */
1524 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
1525 RTCritSectLeave(&pCache->CritSect);
1526
1527 pEntry->pbData = (uint8_t *)RTMemPageAlloc(pEntry->cbData);
1528 AssertPtr(pEntry->pbData);
1529
1530 while (cbToWrite)
1531 {
1532 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG));
1533
1534 AssertMsg(off >= pEntry->Core.Key,
1535 ("Overflow in calculation off=%RTfoff OffsetAligned=%RTfoff\n",
1536 off, pEntry->Core.Key));
1537
1538 pSeg->pTask = pTask;
1539 pSeg->uBufOffset = OffDiff;
1540 pSeg->cbTransfer = RT_MIN(cbToWrite, cbSegLeft);
1541 pSeg->pvBuf = pbSegBuf;
1542 pSeg->fWrite = true;
1543
1544 ADVANCE_SEGMENT_BUFFER(pSeg->cbTransfer);
1545
1546 pdmacFileEpCacheEntryAddWaitingSegment(pEntry, pSeg);
1547
1548 off += pSeg->cbTransfer;
1549 OffDiff += pSeg->cbTransfer;
1550 cbToWrite -= pSeg->cbTransfer;
1551 }
1552
1553 STAM_COUNTER_INC(&pEndpointCache->StatWriteDeferred);
1554 pdmacFileCacheReadFromEndpoint(pEntry);
1555 }
1556
1557 /* Release the reference. If it is still needed the I/O in progress flag should protect it now. */
1558 pdmacFileEpCacheEntryRelease(pEntry);
1559 }
1560 else
1561 {
1562 /*
1563 * No entry found. Try to create a new cache entry to store the data in and if that fails
1564 * write directly to the file.
1565 */
1566 PPDMACFILECACHEENTRY pEntryBestFit = pdmacFileEpCacheGetCacheBestFitEntryByOffset(pEndpointCache, off);
1567
1568 LogFlow(("%sest fit entry for off=%RTfoff (BestFit=%RTfoff BestFitEnd=%RTfoff BestFitSize=%u)\n",
1569 pEntryBestFit ? "B" : "No b",
1570 off,
1571 pEntryBestFit ? pEntryBestFit->Core.Key : 0,
1572 pEntryBestFit ? pEntryBestFit->Core.KeyLast : 0,
1573 pEntryBestFit ? pEntryBestFit->cbData : 0));
1574
1575 if (pEntryBestFit && ((off + (RTFOFF)cbWrite) > pEntryBestFit->Core.Key))
1576 {
1577 cbToWrite = pEntryBestFit->Core.Key - off;
1578 pdmacFileEpCacheEntryRelease(pEntryBestFit);
1579 }
1580 else
1581 cbToWrite = cbWrite;
1582
1583 cbWrite -= cbToWrite;
1584
1585 STAM_COUNTER_INC(&pCache->cMisses);
1586 STAM_COUNTER_ADD(&pCache->StatWritten, cbToWrite);
1587
1588 RTCritSectEnter(&pCache->CritSect);
1589 size_t cbRemoved = pdmacFileCacheEvict(pCache, cbToWrite);
1590 RTCritSectLeave(&pCache->CritSect);
1591
1592 if (cbRemoved >= cbToWrite)
1593 {
1594 uint8_t *pbBuf;
1595 PPDMACFILECACHEENTRY pEntryNew;
1596
1597 LogFlow(("Evicted %u bytes (%u requested). Creating new cache entry\n", cbRemoved, cbToWrite));
1598
1599 pEntryNew = pdmacFileCacheEntryAlloc(pCache, pEndpoint, off, cbToWrite);
1600 AssertPtr(pEntryNew);
1601
1602 RTCritSectEnter(&pCache->CritSect);
1603 pdmacFileCacheEntryAddToList(&pCache->LruRecentlyUsed, pEntryNew);
1604 pCache->cbCached += cbToWrite;
1605 RTCritSectLeave(&pCache->CritSect);
1606
1607 pdmacFileEpCacheInsertEntry(pEndpointCache, pEntryNew);
1608
1609 off += cbToWrite;
1610 pbBuf = pEntryNew->pbData;
1611
1612 while (cbToWrite)
1613 {
1614 size_t cbCopy = RT_MIN(cbSegLeft, cbToWrite);
1615
1616 memcpy(pbBuf, pbSegBuf, cbCopy);
1617
1618 ADVANCE_SEGMENT_BUFFER(cbCopy);
1619
1620 cbToWrite -= cbCopy;
1621 pbBuf += cbCopy;
1622 ASMAtomicSubS32(&pTask->cbTransferLeft, cbCopy);
1623 }
1624
1625 pEntryNew->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY;
1626 pdmacFileCacheWriteToEndpoint(pEntryNew);
1627 pdmacFileEpCacheEntryRelease(pEntryNew); /* it is protected by the I/O in progress flag now. */
1628 }
1629 else
1630 {
1631 /*
1632 * There is not enough free space in the cache.
1633 * Pass the request directly to the I/O manager.
1634 */
1635 LogFlow(("Couldn't evict %u bytes from the cache (%u actually removed). Remaining request will be passed through\n", cbToWrite, cbRemoved));
1636
1637 while (cbToWrite)
1638 {
1639 PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEndpoint);
1640 AssertPtr(pIoTask);
1641
1642 pIoTask->pEndpoint = pEndpoint;
1643 pIoTask->enmTransferType = PDMACTASKFILETRANSFER_WRITE;
1644 pIoTask->Off = off;
1645 pIoTask->DataSeg.cbSeg = RT_MIN(cbToWrite, cbSegLeft);
1646 pIoTask->DataSeg.pvSeg = pbSegBuf;
1647 pIoTask->pvUser = pTask;
1648 pIoTask->pfnCompleted = pdmacFileEpTaskCompleted;
1649
1650 off += pIoTask->DataSeg.cbSeg;
1651 cbToWrite -= pIoTask->DataSeg.cbSeg;
1652
1653 ADVANCE_SEGMENT_BUFFER(pIoTask->DataSeg.cbSeg);
1654
1655 /* Send it off to the I/O manager. */
1656 pdmacFileEpAddTask(pEndpoint, pIoTask);
1657 }
1658 }
1659 }
1660 }
1661
1662 ASMAtomicWriteBool(&pTask->fCompleted, false);
1663
1664 if (ASMAtomicReadS32(&pTask->cbTransferLeft) == 0
1665 && !ASMAtomicXchgBool(&pTask->fCompleted, true))
1666 pdmR3AsyncCompletionCompleteTask(&pTask->Core);
1667
1668 return VINF_SUCCESS;
1669}
1670
1671#undef ADVANCE_SEGMENT_BUFFER
1672
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette