VirtualBox

source: vbox/trunk/src/VBox/Storage/VD.cpp@ 65123

Last change on this file since 65123 was 64766, checked in by vboxsync, 8 years ago

src/VBox: Make the use of the iterator for RTListForEach()/RTListForEachSafe() more obvious. There is no need to initialize the iterator and we also must not depend on the iterator being NULL if the list was empty.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 397.0 KB
Line 
1/* $Id: VD.cpp 64766 2016-11-30 10:59:48Z vboxsync $ */
2/** @file
3 * VBoxHDD - VBox HDD Container implementation.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VD
23#include <VBox/vd.h>
24#include <VBox/err.h>
25#include <VBox/sup.h>
26#include <VBox/log.h>
27
28#include <iprt/alloc.h>
29#include <iprt/assert.h>
30#include <iprt/uuid.h>
31#include <iprt/file.h>
32#include <iprt/string.h>
33#include <iprt/asm.h>
34#include <iprt/ldr.h>
35#include <iprt/dir.h>
36#include <iprt/path.h>
37#include <iprt/param.h>
38#include <iprt/memcache.h>
39#include <iprt/sg.h>
40#include <iprt/list.h>
41#include <iprt/avl.h>
42#include <iprt/semaphore.h>
43
44#include <VBox/vd-plugin.h>
45
46#include "VDBackends.h"
47
48/** Disable dynamic backends on non x86 architectures. This feature
49 * requires the SUPR3 library which is not available there.
50 */
51#if !defined(VBOX_HDD_NO_DYNAMIC_BACKENDS) && !defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)
52# define VBOX_HDD_NO_DYNAMIC_BACKENDS
53#endif
54
55#define VBOXHDDDISK_SIGNATURE 0x6f0e2a7d
56
57/** Buffer size used for merging images. */
58#define VD_MERGE_BUFFER_SIZE (16 * _1M)
59
60/** Maximum number of segments in one I/O task. */
61#define VD_IO_TASK_SEGMENTS_MAX 64
62
63/** Threshold after not recently used blocks are removed from the list. */
64#define VD_DISCARD_REMOVE_THRESHOLD (10 * _1M) /** @todo experiment */
65
66/**
67 * VD async I/O interface storage descriptor.
68 */
69typedef struct VDIIOFALLBACKSTORAGE
70{
71 /** File handle. */
72 RTFILE File;
73 /** Completion callback. */
74 PFNVDCOMPLETED pfnCompleted;
75 /** Thread for async access. */
76 RTTHREAD ThreadAsync;
77} VDIIOFALLBACKSTORAGE, *PVDIIOFALLBACKSTORAGE;
78
79/**
80 * Structure containing everything I/O related
81 * for the image and cache descriptors.
82 */
83typedef struct VDIO
84{
85 /** I/O interface to the upper layer. */
86 PVDINTERFACEIO pInterfaceIo;
87
88 /** Per image internal I/O interface. */
89 VDINTERFACEIOINT VDIfIoInt;
90
91 /** Fallback I/O interface, only used if the caller doesn't provide it. */
92 VDINTERFACEIO VDIfIo;
93
94 /** Opaque backend data. */
95 void *pBackendData;
96 /** Disk this image is part of */
97 PVBOXHDD pDisk;
98 /** Flag whether to ignore flush requests. */
99 bool fIgnoreFlush;
100} VDIO, *PVDIO;
101
102/** Forward declaration of an I/O task */
103typedef struct VDIOTASK *PVDIOTASK;
104
105/**
106 * VBox HDD Container image descriptor.
107 */
108typedef struct VDIMAGE
109{
110 /** Link to parent image descriptor, if any. */
111 struct VDIMAGE *pPrev;
112 /** Link to child image descriptor, if any. */
113 struct VDIMAGE *pNext;
114 /** Container base filename. (UTF-8) */
115 char *pszFilename;
116 /** Data managed by the backend which keeps the actual info. */
117 void *pBackendData;
118 /** Cached sanitized image flags. */
119 unsigned uImageFlags;
120 /** Image open flags (only those handled generically in this code and which
121 * the backends will never ever see). */
122 unsigned uOpenFlags;
123
124 /** Function pointers for the various backend methods. */
125 PCVDIMAGEBACKEND Backend;
126 /** Pointer to list of VD interfaces, per-image. */
127 PVDINTERFACE pVDIfsImage;
128 /** I/O related things. */
129 VDIO VDIo;
130} VDIMAGE, *PVDIMAGE;
131
132/**
133 * uModified bit flags.
134 */
135#define VD_IMAGE_MODIFIED_FLAG RT_BIT(0)
136#define VD_IMAGE_MODIFIED_FIRST RT_BIT(1)
137#define VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE RT_BIT(2)
138
139
140/**
141 * VBox HDD Cache image descriptor.
142 */
143typedef struct VDCACHE
144{
145 /** Cache base filename. (UTF-8) */
146 char *pszFilename;
147 /** Data managed by the backend which keeps the actual info. */
148 void *pBackendData;
149 /** Cached sanitized image flags. */
150 unsigned uImageFlags;
151 /** Image open flags (only those handled generically in this code and which
152 * the backends will never ever see). */
153 unsigned uOpenFlags;
154
155 /** Function pointers for the various backend methods. */
156 PCVDCACHEBACKEND Backend;
157
158 /** Pointer to list of VD interfaces, per-cache. */
159 PVDINTERFACE pVDIfsCache;
160 /** I/O related things. */
161 VDIO VDIo;
162} VDCACHE, *PVDCACHE;
163
164/**
165 * A block waiting for a discard.
166 */
167typedef struct VDDISCARDBLOCK
168{
169 /** AVL core. */
170 AVLRU64NODECORE Core;
171 /** LRU list node. */
172 RTLISTNODE NodeLru;
173 /** Number of bytes to discard. */
174 size_t cbDiscard;
175 /** Bitmap of allocated sectors. */
176 void *pbmAllocated;
177} VDDISCARDBLOCK, *PVDDISCARDBLOCK;
178
179/**
180 * VD discard state.
181 */
182typedef struct VDDISCARDSTATE
183{
184 /** Number of bytes waiting for a discard. */
185 size_t cbDiscarding;
186 /** AVL tree with blocks waiting for a discard.
187 * The uOffset + cbDiscard range is the search key. */
188 PAVLRU64TREE pTreeBlocks;
189 /** LRU list of the least frequently discarded blocks.
190 * If there are to many blocks waiting the least frequently used
191 * will be removed and the range will be set to 0.
192 */
193 RTLISTNODE ListLru;
194} VDDISCARDSTATE, *PVDDISCARDSTATE;
195
196/**
197 * VD filter instance.
198 */
199typedef struct VDFILTER
200{
201 /** List node for the read filter chain. */
202 RTLISTNODE ListNodeChainRead;
203 /** List node for the write filter chain. */
204 RTLISTNODE ListNodeChainWrite;
205 /** Number of references to this filter. */
206 uint32_t cRefs;
207 /** Opaque VD filter backend instance data. */
208 void *pvBackendData;
209 /** Pointer to the filter backend interface. */
210 PCVDFILTERBACKEND pBackend;
211 /** Pointer to list of VD interfaces, per-filter. */
212 PVDINTERFACE pVDIfsFilter;
213 /** I/O related things. */
214 VDIO VDIo;
215} VDFILTER;
216/** Pointer to a VD filter instance. */
217typedef VDFILTER *PVDFILTER;
218
219/**
220 * VBox HDD Container main structure, private part.
221 */
222struct VBOXHDD
223{
224 /** Structure signature (VBOXHDDDISK_SIGNATURE). */
225 uint32_t u32Signature;
226
227 /** Image type. */
228 VDTYPE enmType;
229
230 /** Number of opened images. */
231 unsigned cImages;
232
233 /** Base image. */
234 PVDIMAGE pBase;
235
236 /** Last opened image in the chain.
237 * The same as pBase if only one image is used. */
238 PVDIMAGE pLast;
239
240 /** If a merge to one of the parents is running this may be non-NULL
241 * to indicate to what image the writes should be additionally relayed. */
242 PVDIMAGE pImageRelay;
243
244 /** Flags representing the modification state. */
245 unsigned uModified;
246
247 /** Cached size of this disk. */
248 uint64_t cbSize;
249 /** Cached PCHS geometry for this disk. */
250 VDGEOMETRY PCHSGeometry;
251 /** Cached LCHS geometry for this disk. */
252 VDGEOMETRY LCHSGeometry;
253
254 /** Pointer to list of VD interfaces, per-disk. */
255 PVDINTERFACE pVDIfsDisk;
256 /** Pointer to the common interface structure for error reporting. */
257 PVDINTERFACEERROR pInterfaceError;
258 /** Pointer to the optional thread synchronization callbacks. */
259 PVDINTERFACETHREADSYNC pInterfaceThreadSync;
260
261 /** Memory cache for I/O contexts */
262 RTMEMCACHE hMemCacheIoCtx;
263 /** Memory cache for I/O tasks. */
264 RTMEMCACHE hMemCacheIoTask;
265 /** An I/O context is currently using the disk structures
266 * Every I/O context must be placed on one of the lists below. */
267 volatile bool fLocked;
268 /** Head of pending I/O tasks waiting for completion - LIFO order. */
269 volatile PVDIOTASK pIoTasksPendingHead;
270 /** Head of newly queued I/O contexts - LIFO order. */
271 volatile PVDIOCTX pIoCtxHead;
272 /** Head of halted I/O contexts which are given back to generic
273 * disk framework by the backend. - LIFO order. */
274 volatile PVDIOCTX pIoCtxHaltedHead;
275
276 /** Head of blocked I/O contexts, processed only
277 * after pIoCtxLockOwner was freed - LIFO order. */
278 volatile PVDIOCTX pIoCtxBlockedHead;
279 /** I/O context which locked the disk for a growing write or flush request.
280 * Other flush or growing write requests need to wait until
281 * the current one completes. - NIL_VDIOCTX if unlocked. */
282 volatile PVDIOCTX pIoCtxLockOwner;
283 /** If the disk was locked by a growing write, flush or discard request this
284 * contains the start offset to check for interfering I/O while it is in progress. */
285 uint64_t uOffsetStartLocked;
286 /** If the disk was locked by a growing write, flush or discard request this contains
287 * the first non affected offset to check for interfering I/O while it is in progress. */
288 uint64_t uOffsetEndLocked;
289
290 /** Pointer to the L2 disk cache if any. */
291 PVDCACHE pCache;
292 /** Pointer to the discard state if any. */
293 PVDDISCARDSTATE pDiscard;
294
295 /** Read filter chain - PVDFILTER. */
296 RTLISTANCHOR ListFilterChainRead;
297 /** Write filter chain - PVDFILTER. */
298 RTLISTANCHOR ListFilterChainWrite;
299};
300
301# define VD_IS_LOCKED(a_pDisk) \
302 do \
303 { \
304 NOREF(a_pDisk); \
305 AssertMsg((a_pDisk)->fLocked, \
306 ("Lock not held\n"));\
307 } while(0)
308
309/**
310 * VBox parent read descriptor, used internally for compaction.
311 */
312typedef struct VDPARENTSTATEDESC
313{
314 /** Pointer to disk descriptor. */
315 PVBOXHDD pDisk;
316 /** Pointer to image descriptor. */
317 PVDIMAGE pImage;
318} VDPARENTSTATEDESC, *PVDPARENTSTATEDESC;
319
320/**
321 * Transfer direction.
322 */
323typedef enum VDIOCTXTXDIR
324{
325 /** Read */
326 VDIOCTXTXDIR_READ = 0,
327 /** Write */
328 VDIOCTXTXDIR_WRITE,
329 /** Flush */
330 VDIOCTXTXDIR_FLUSH,
331 /** Discard */
332 VDIOCTXTXDIR_DISCARD,
333 /** 32bit hack */
334 VDIOCTXTXDIR_32BIT_HACK = 0x7fffffff
335} VDIOCTXTXDIR, *PVDIOCTXTXDIR;
336
337/** Transfer function */
338typedef DECLCALLBACK(int) FNVDIOCTXTRANSFER (PVDIOCTX pIoCtx);
339/** Pointer to a transfer function. */
340typedef FNVDIOCTXTRANSFER *PFNVDIOCTXTRANSFER;
341
342/**
343 * I/O context
344 */
345typedef struct VDIOCTX
346{
347 /** Pointer to the next I/O context. */
348 struct VDIOCTX * volatile pIoCtxNext;
349 /** Disk this is request is for. */
350 PVBOXHDD pDisk;
351 /** Return code. */
352 int rcReq;
353 /** Various flags for the I/O context. */
354 uint32_t fFlags;
355 /** Number of data transfers currently pending. */
356 volatile uint32_t cDataTransfersPending;
357 /** How many meta data transfers are pending. */
358 volatile uint32_t cMetaTransfersPending;
359 /** Flag whether the request finished */
360 volatile bool fComplete;
361 /** Temporary allocated memory which is freed
362 * when the context completes. */
363 void *pvAllocation;
364 /** Transfer function. */
365 PFNVDIOCTXTRANSFER pfnIoCtxTransfer;
366 /** Next transfer part after the current one completed. */
367 PFNVDIOCTXTRANSFER pfnIoCtxTransferNext;
368 /** Transfer direction */
369 VDIOCTXTXDIR enmTxDir;
370 /** Request type dependent data. */
371 union
372 {
373 /** I/O request (read/write). */
374 struct
375 {
376 /** Number of bytes left until this context completes. */
377 volatile uint32_t cbTransferLeft;
378 /** Current offset */
379 volatile uint64_t uOffset;
380 /** Number of bytes to transfer */
381 volatile size_t cbTransfer;
382 /** Current image in the chain. */
383 PVDIMAGE pImageCur;
384 /** Start image to read from. pImageCur is reset to this
385 * value after it reached the first image in the chain. */
386 PVDIMAGE pImageStart;
387 /** S/G buffer */
388 RTSGBUF SgBuf;
389 /** Number of bytes to clear in the buffer before the current read. */
390 size_t cbBufClear;
391 /** Number of images to read. */
392 unsigned cImagesRead;
393 /** Override for the parent image to start reading from. */
394 PVDIMAGE pImageParentOverride;
395 /** Original offset of the transfer - required for filtering read requests. */
396 uint64_t uOffsetXferOrig;
397 /** Original size of the transfer - required for fitlering read requests. */
398 size_t cbXferOrig;
399 } Io;
400 /** Discard requests. */
401 struct
402 {
403 /** Pointer to the range descriptor array. */
404 PCRTRANGE paRanges;
405 /** Number of ranges in the array. */
406 unsigned cRanges;
407 /** Range descriptor index which is processed. */
408 unsigned idxRange;
409 /** Start offset to discard currently. */
410 uint64_t offCur;
411 /** How many bytes left to discard in the current range. */
412 size_t cbDiscardLeft;
413 /** How many bytes to discard in the current block (<= cbDiscardLeft). */
414 size_t cbThisDiscard;
415 /** Discard block handled currently. */
416 PVDDISCARDBLOCK pBlock;
417 } Discard;
418 } Req;
419 /** Parent I/O context if any. Sets the type of the context (root/child) */
420 PVDIOCTX pIoCtxParent;
421 /** Type dependent data (root/child) */
422 union
423 {
424 /** Root data */
425 struct
426 {
427 /** Completion callback */
428 PFNVDASYNCTRANSFERCOMPLETE pfnComplete;
429 /** User argument 1 passed on completion. */
430 void *pvUser1;
431 /** User argument 2 passed on completion. */
432 void *pvUser2;
433 } Root;
434 /** Child data */
435 struct
436 {
437 /** Saved start offset */
438 uint64_t uOffsetSaved;
439 /** Saved transfer size */
440 size_t cbTransferLeftSaved;
441 /** Number of bytes transferred from the parent if this context completes. */
442 size_t cbTransferParent;
443 /** Number of bytes to pre read */
444 size_t cbPreRead;
445 /** Number of bytes to post read. */
446 size_t cbPostRead;
447 /** Number of bytes to write left in the parent. */
448 size_t cbWriteParent;
449 /** Write type dependent data. */
450 union
451 {
452 /** Optimized */
453 struct
454 {
455 /** Bytes to fill to satisfy the block size. Not part of the virtual disk. */
456 size_t cbFill;
457 /** Bytes to copy instead of reading from the parent */
458 size_t cbWriteCopy;
459 /** Bytes to read from the image. */
460 size_t cbReadImage;
461 } Optimized;
462 } Write;
463 } Child;
464 } Type;
465} VDIOCTX;
466
467/** Default flags for an I/O context, i.e. unblocked and async. */
468#define VDIOCTX_FLAGS_DEFAULT (0)
469/** Flag whether the context is blocked. */
470#define VDIOCTX_FLAGS_BLOCKED RT_BIT_32(0)
471/** Flag whether the I/O context is using synchronous I/O. */
472#define VDIOCTX_FLAGS_SYNC RT_BIT_32(1)
473/** Flag whether the read should update the cache. */
474#define VDIOCTX_FLAGS_READ_UPDATE_CACHE RT_BIT_32(2)
475/** Flag whether free blocks should be zeroed.
476 * If false and no image has data for sepcified
477 * range VERR_VD_BLOCK_FREE is returned for the I/O context.
478 * Note that unallocated blocks are still zeroed
479 * if at least one image has valid data for a part
480 * of the range.
481 */
482#define VDIOCTX_FLAGS_ZERO_FREE_BLOCKS RT_BIT_32(3)
483/** Don't free the I/O context when complete because
484 * it was alloacted elsewhere (stack, ...). */
485#define VDIOCTX_FLAGS_DONT_FREE RT_BIT_32(4)
486/** Don't set the modified flag for this I/O context when writing. */
487#define VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG RT_BIT_32(5)
488/** The write filter was applied already and shouldn't be applied a second time.
489 * Used at the beginning of vdWriteHelperAsync() because it might be called
490 * multiple times.
491 */
492#define VDIOCTX_FLAGS_WRITE_FILTER_APPLIED RT_BIT_32(6)
493
494/** NIL I/O context pointer value. */
495#define NIL_VDIOCTX ((PVDIOCTX)0)
496
497/**
498 * List node for deferred I/O contexts.
499 */
500typedef struct VDIOCTXDEFERRED
501{
502 /** Node in the list of deferred requests.
503 * A request can be deferred if the image is growing
504 * and the request accesses the same range or if
505 * the backend needs to read or write metadata from the disk
506 * before it can continue. */
507 RTLISTNODE NodeDeferred;
508 /** I/O context this entry points to. */
509 PVDIOCTX pIoCtx;
510} VDIOCTXDEFERRED, *PVDIOCTXDEFERRED;
511
512/**
513 * I/O task.
514 */
515typedef struct VDIOTASK
516{
517 /** Next I/O task waiting in the list. */
518 struct VDIOTASK * volatile pNext;
519 /** Storage this task belongs to. */
520 PVDIOSTORAGE pIoStorage;
521 /** Optional completion callback. */
522 PFNVDXFERCOMPLETED pfnComplete;
523 /** Opaque user data. */
524 void *pvUser;
525 /** Completion status code for the task. */
526 int rcReq;
527 /** Flag whether this is a meta data transfer. */
528 bool fMeta;
529 /** Type dependent data. */
530 union
531 {
532 /** User data transfer. */
533 struct
534 {
535 /** Number of bytes this task transferred. */
536 uint32_t cbTransfer;
537 /** Pointer to the I/O context the task belongs. */
538 PVDIOCTX pIoCtx;
539 } User;
540 /** Meta data transfer. */
541 struct
542 {
543 /** Meta transfer this task is for. */
544 PVDMETAXFER pMetaXfer;
545 } Meta;
546 } Type;
547} VDIOTASK;
548
549/**
550 * Storage handle.
551 */
552typedef struct VDIOSTORAGE
553{
554 /** Image I/O state this storage handle belongs to. */
555 PVDIO pVDIo;
556 /** AVL tree for pending async metadata transfers. */
557 PAVLRFOFFTREE pTreeMetaXfers;
558 /** Storage handle */
559 void *pStorage;
560} VDIOSTORAGE;
561
562/**
563 * Metadata transfer.
564 *
565 * @note This entry can't be freed if either the list is not empty or
566 * the reference counter is not 0.
567 * The assumption is that the backends don't need to read huge amounts of
568 * metadata to complete a transfer so the additional memory overhead should
569 * be relatively small.
570 */
571typedef struct VDMETAXFER
572{
573 /** AVL core for fast search (the file offset is the key) */
574 AVLRFOFFNODECORE Core;
575 /** I/O storage for this transfer. */
576 PVDIOSTORAGE pIoStorage;
577 /** Flags. */
578 uint32_t fFlags;
579 /** List of I/O contexts waiting for this metadata transfer to complete. */
580 RTLISTNODE ListIoCtxWaiting;
581 /** Number of references to this entry. */
582 unsigned cRefs;
583 /** Size of the data stored with this entry. */
584 size_t cbMeta;
585 /** Shadow buffer which is used in case a write is still active and other
586 * writes update the shadow buffer. */
587 uint8_t *pbDataShw;
588 /** List of I/O contexts updating the shadow buffer while there is a write
589 * in progress. */
590 RTLISTNODE ListIoCtxShwWrites;
591 /** Data stored - variable size. */
592 uint8_t abData[1];
593} VDMETAXFER;
594
595/**
596 * The transfer direction for the metadata.
597 */
598#define VDMETAXFER_TXDIR_MASK 0x3
599#define VDMETAXFER_TXDIR_NONE 0x0
600#define VDMETAXFER_TXDIR_WRITE 0x1
601#define VDMETAXFER_TXDIR_READ 0x2
602#define VDMETAXFER_TXDIR_FLUSH 0x3
603#define VDMETAXFER_TXDIR_GET(flags) ((flags) & VDMETAXFER_TXDIR_MASK)
604#define VDMETAXFER_TXDIR_SET(flags, dir) ((flags) = (flags & ~VDMETAXFER_TXDIR_MASK) | (dir))
605
606/**
607 * Plugin structure.
608 */
609typedef struct VDPLUGIN
610{
611 /** Pointer to the next plugin structure. */
612 RTLISTNODE NodePlugin;
613 /** Handle of loaded plugin library. */
614 RTLDRMOD hPlugin;
615 /** Filename of the loaded plugin. */
616 char *pszFilename;
617} VDPLUGIN;
618/** Pointer to a plugin structure. */
619typedef VDPLUGIN *PVDPLUGIN;
620
621/** Head of loaded plugin list. */
622static RTLISTANCHOR g_ListPluginsLoaded;
623
624/** Number of image backends supported. */
625static unsigned g_cBackends = 0;
626/** Array of pointers to the image backends. */
627static PCVDIMAGEBACKEND *g_apBackends = NULL;
628/** Array of handles to the corresponding plugin. */
629static RTLDRMOD *g_ahBackendPlugins = NULL;
630/** Builtin image backends. */
631static PCVDIMAGEBACKEND aStaticBackends[] =
632{
633 &g_VmdkBackend,
634 &g_VDIBackend,
635 &g_VhdBackend,
636 &g_ParallelsBackend,
637 &g_DmgBackend,
638 &g_QedBackend,
639 &g_QCowBackend,
640 &g_VhdxBackend,
641 &g_RawBackend,
642 &g_ISCSIBackend
643};
644
645/** Number of supported cache backends. */
646static unsigned g_cCacheBackends = 0;
647/** Array of pointers to the cache backends. */
648static PCVDCACHEBACKEND *g_apCacheBackends = NULL;
649/** Array of handles to the corresponding plugin. */
650static RTLDRMOD *g_ahCacheBackendPlugins = NULL;
651/** Builtin cache backends. */
652static PCVDCACHEBACKEND aStaticCacheBackends[] =
653{
654 &g_VciCacheBackend
655};
656
657/** Number of supported filter backends. */
658static unsigned g_cFilterBackends = 0;
659/** Array of pointers to the filters backends. */
660static PCVDFILTERBACKEND *g_apFilterBackends = NULL;
661#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
662/** Array of handles to the corresponding plugin. */
663static PRTLDRMOD g_pahFilterBackendPlugins = NULL;
664#endif
665
666/** Forward declaration of the async discard helper. */
667static DECLCALLBACK(int) vdDiscardHelperAsync(PVDIOCTX pIoCtx);
668static DECLCALLBACK(int) vdWriteHelperAsync(PVDIOCTX pIoCtx);
669static void vdDiskProcessBlockedIoCtx(PVBOXHDD pDisk);
670static int vdDiskUnlock(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc);
671static DECLCALLBACK(void) vdIoCtxSyncComplete(void *pvUser1, void *pvUser2, int rcReq);
672
673/**
674 * internal: add several backends.
675 */
676static int vdAddBackends(RTLDRMOD hPlugin, PCVDIMAGEBACKEND *ppBackends, unsigned cBackends)
677{
678 PCVDIMAGEBACKEND *pTmp = (PCVDIMAGEBACKEND *)RTMemRealloc(g_apBackends,
679 (g_cBackends + cBackends) * sizeof(PCVDIMAGEBACKEND));
680 if (RT_UNLIKELY(!pTmp))
681 return VERR_NO_MEMORY;
682 g_apBackends = pTmp;
683
684 RTLDRMOD *pTmpPlugins = (RTLDRMOD*)RTMemRealloc(g_ahBackendPlugins,
685 (g_cBackends + cBackends) * sizeof(RTLDRMOD));
686 if (RT_UNLIKELY(!pTmpPlugins))
687 return VERR_NO_MEMORY;
688 g_ahBackendPlugins = pTmpPlugins;
689 memcpy(&g_apBackends[g_cBackends], ppBackends, cBackends * sizeof(PCVDIMAGEBACKEND));
690 for (unsigned i = g_cBackends; i < g_cBackends + cBackends; i++)
691 g_ahBackendPlugins[i] = hPlugin;
692 g_cBackends += cBackends;
693 return VINF_SUCCESS;
694}
695
696#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
697/**
698 * internal: add single backend.
699 */
700DECLINLINE(int) vdAddBackend(RTLDRMOD hPlugin, PCVDIMAGEBACKEND pBackend)
701{
702 return vdAddBackends(hPlugin, &pBackend, 1);
703}
704#endif
705
706/**
707 * internal: add several cache backends.
708 */
709static int vdAddCacheBackends(RTLDRMOD hPlugin, PCVDCACHEBACKEND *ppBackends, unsigned cBackends)
710{
711 PCVDCACHEBACKEND *pTmp = (PCVDCACHEBACKEND*)RTMemRealloc(g_apCacheBackends,
712 (g_cCacheBackends + cBackends) * sizeof(PCVDCACHEBACKEND));
713 if (RT_UNLIKELY(!pTmp))
714 return VERR_NO_MEMORY;
715 g_apCacheBackends = pTmp;
716
717 RTLDRMOD *pTmpPlugins = (RTLDRMOD*)RTMemRealloc(g_ahCacheBackendPlugins,
718 (g_cCacheBackends + cBackends) * sizeof(RTLDRMOD));
719 if (RT_UNLIKELY(!pTmpPlugins))
720 return VERR_NO_MEMORY;
721 g_ahCacheBackendPlugins = pTmpPlugins;
722 memcpy(&g_apCacheBackends[g_cCacheBackends], ppBackends, cBackends * sizeof(PCVDCACHEBACKEND));
723 for (unsigned i = g_cCacheBackends; i < g_cCacheBackends + cBackends; i++)
724 g_ahCacheBackendPlugins[i] = hPlugin;
725 g_cCacheBackends += cBackends;
726 return VINF_SUCCESS;
727}
728
729#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
730
731/**
732 * internal: add single cache backend.
733 */
734DECLINLINE(int) vdAddCacheBackend(RTLDRMOD hPlugin, PCVDCACHEBACKEND pBackend)
735{
736 return vdAddCacheBackends(hPlugin, &pBackend, 1);
737}
738
739
740/**
741 * Add several filter backends.
742 *
743 * @returns VBox status code.
744 * @param hPlugin Plugin handle to add.
745 * @param ppBackends Array of filter backends to add.
746 * @param cBackends Number of backends to add.
747 */
748static int vdAddFilterBackends(RTLDRMOD hPlugin, PCVDFILTERBACKEND *ppBackends, unsigned cBackends)
749{
750 PCVDFILTERBACKEND *pTmp = (PCVDFILTERBACKEND *)RTMemRealloc(g_apFilterBackends,
751 (g_cFilterBackends + cBackends) * sizeof(PCVDFILTERBACKEND));
752 if (RT_UNLIKELY(!pTmp))
753 return VERR_NO_MEMORY;
754 g_apFilterBackends = pTmp;
755
756 PRTLDRMOD pTmpPlugins = (PRTLDRMOD)RTMemRealloc(g_pahFilterBackendPlugins,
757 (g_cFilterBackends + cBackends) * sizeof(RTLDRMOD));
758 if (RT_UNLIKELY(!pTmpPlugins))
759 return VERR_NO_MEMORY;
760
761 g_pahFilterBackendPlugins = pTmpPlugins;
762 memcpy(&g_apFilterBackends[g_cFilterBackends], ppBackends, cBackends * sizeof(PCVDFILTERBACKEND));
763 for (unsigned i = g_cFilterBackends; i < g_cFilterBackends + cBackends; i++)
764 g_pahFilterBackendPlugins[i] = hPlugin;
765 g_cFilterBackends += cBackends;
766 return VINF_SUCCESS;
767}
768
769
770/**
771 * Add a single filter backend to the list of supported filters.
772 *
773 * @returns VBox status code.
774 * @param hPlugin Plugin handle to add.
775 * @param pBackend The backend to add.
776 */
777DECLINLINE(int) vdAddFilterBackend(RTLDRMOD hPlugin, PCVDFILTERBACKEND pBackend)
778{
779 return vdAddFilterBackends(hPlugin, &pBackend, 1);
780}
781
782#endif /* VBOX_HDD_NO_DYNAMIC_BACKENDS*/
783
784/**
785 * internal: issue error message.
786 */
787static int vdError(PVBOXHDD pDisk, int rc, RT_SRC_POS_DECL,
788 const char *pszFormat, ...)
789{
790 va_list va;
791 va_start(va, pszFormat);
792 if (pDisk->pInterfaceError)
793 pDisk->pInterfaceError->pfnError(pDisk->pInterfaceError->Core.pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
794 va_end(va);
795 return rc;
796}
797
798/**
799 * internal: thread synchronization, start read.
800 */
801DECLINLINE(int) vdThreadStartRead(PVBOXHDD pDisk)
802{
803 int rc = VINF_SUCCESS;
804 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
805 rc = pDisk->pInterfaceThreadSync->pfnStartRead(pDisk->pInterfaceThreadSync->Core.pvUser);
806 return rc;
807}
808
809/**
810 * internal: thread synchronization, finish read.
811 */
812DECLINLINE(int) vdThreadFinishRead(PVBOXHDD pDisk)
813{
814 int rc = VINF_SUCCESS;
815 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
816 rc = pDisk->pInterfaceThreadSync->pfnFinishRead(pDisk->pInterfaceThreadSync->Core.pvUser);
817 return rc;
818}
819
820/**
821 * internal: thread synchronization, start write.
822 */
823DECLINLINE(int) vdThreadStartWrite(PVBOXHDD pDisk)
824{
825 int rc = VINF_SUCCESS;
826 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
827 rc = pDisk->pInterfaceThreadSync->pfnStartWrite(pDisk->pInterfaceThreadSync->Core.pvUser);
828 return rc;
829}
830
831/**
832 * internal: thread synchronization, finish write.
833 */
834DECLINLINE(int) vdThreadFinishWrite(PVBOXHDD pDisk)
835{
836 int rc = VINF_SUCCESS;
837 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
838 rc = pDisk->pInterfaceThreadSync->pfnFinishWrite(pDisk->pInterfaceThreadSync->Core.pvUser);
839 return rc;
840}
841
842/**
843 * internal: find image format backend.
844 */
845static int vdFindBackend(const char *pszBackend, PCVDIMAGEBACKEND *ppBackend)
846{
847 int rc = VINF_SUCCESS;
848 PCVDIMAGEBACKEND pBackend = NULL;
849
850 if (!g_apBackends)
851 VDInit();
852
853 for (unsigned i = 0; i < g_cBackends; i++)
854 {
855 if (!RTStrICmp(pszBackend, g_apBackends[i]->pszBackendName))
856 {
857 pBackend = g_apBackends[i];
858 break;
859 }
860 }
861 *ppBackend = pBackend;
862 return rc;
863}
864
865/**
866 * internal: find cache format backend.
867 */
868static int vdFindCacheBackend(const char *pszBackend, PCVDCACHEBACKEND *ppBackend)
869{
870 int rc = VINF_SUCCESS;
871 PCVDCACHEBACKEND pBackend = NULL;
872
873 if (!g_apCacheBackends)
874 VDInit();
875
876 for (unsigned i = 0; i < g_cCacheBackends; i++)
877 {
878 if (!RTStrICmp(pszBackend, g_apCacheBackends[i]->pszBackendName))
879 {
880 pBackend = g_apCacheBackends[i];
881 break;
882 }
883 }
884 *ppBackend = pBackend;
885 return rc;
886}
887
888/**
889 * internal: find filter backend.
890 */
891static int vdFindFilterBackend(const char *pszFilter, PCVDFILTERBACKEND *ppBackend)
892{
893 int rc = VINF_SUCCESS;
894 PCVDFILTERBACKEND pBackend = NULL;
895
896 for (unsigned i = 0; i < g_cFilterBackends; i++)
897 {
898 if (!RTStrICmp(pszFilter, g_apFilterBackends[i]->pszBackendName))
899 {
900 pBackend = g_apFilterBackends[i];
901 break;
902 }
903 }
904 *ppBackend = pBackend;
905 return rc;
906}
907
908
909/**
910 * internal: add image structure to the end of images list.
911 */
912static void vdAddImageToList(PVBOXHDD pDisk, PVDIMAGE pImage)
913{
914 pImage->pPrev = NULL;
915 pImage->pNext = NULL;
916
917 if (pDisk->pBase)
918 {
919 Assert(pDisk->cImages > 0);
920 pImage->pPrev = pDisk->pLast;
921 pDisk->pLast->pNext = pImage;
922 pDisk->pLast = pImage;
923 }
924 else
925 {
926 Assert(pDisk->cImages == 0);
927 pDisk->pBase = pImage;
928 pDisk->pLast = pImage;
929 }
930
931 pDisk->cImages++;
932}
933
934/**
935 * internal: remove image structure from the images list.
936 */
937static void vdRemoveImageFromList(PVBOXHDD pDisk, PVDIMAGE pImage)
938{
939 Assert(pDisk->cImages > 0);
940
941 if (pImage->pPrev)
942 pImage->pPrev->pNext = pImage->pNext;
943 else
944 pDisk->pBase = pImage->pNext;
945
946 if (pImage->pNext)
947 pImage->pNext->pPrev = pImage->pPrev;
948 else
949 pDisk->pLast = pImage->pPrev;
950
951 pImage->pPrev = NULL;
952 pImage->pNext = NULL;
953
954 pDisk->cImages--;
955}
956
957/**
958 * Release a referene to the filter decrementing the counter and destroying the filter
959 * when the counter reaches zero.
960 *
961 * @returns The new reference count.
962 * @param pFilter The filter to release.
963 */
964static uint32_t vdFilterRelease(PVDFILTER pFilter)
965{
966 uint32_t cRefs = ASMAtomicDecU32(&pFilter->cRefs);
967 if (!cRefs)
968 {
969 pFilter->pBackend->pfnDestroy(pFilter->pvBackendData);
970 RTMemFree(pFilter);
971 }
972
973 return cRefs;
974}
975
976/**
977 * Increments the reference counter of the given filter.
978 *
979 * @return The new reference count.
980 * @param pFilter The filter.
981 */
982static uint32_t vdFilterRetain(PVDFILTER pFilter)
983{
984 return ASMAtomicIncU32(&pFilter->cRefs);
985}
986
987/**
988 * internal: find image by index into the images list.
989 */
990static PVDIMAGE vdGetImageByNumber(PVBOXHDD pDisk, unsigned nImage)
991{
992 PVDIMAGE pImage = pDisk->pBase;
993 if (nImage == VD_LAST_IMAGE)
994 return pDisk->pLast;
995 while (pImage && nImage)
996 {
997 pImage = pImage->pNext;
998 nImage--;
999 }
1000 return pImage;
1001}
1002
1003/**
1004 * Applies the filter chain to the given write request.
1005 *
1006 * @returns VBox status code.
1007 * @param pDisk The HDD container.
1008 * @param uOffset The start offset of the write.
1009 * @param cbWrite Number of bytes to write.
1010 * @param pIoCtx The I/O context associated with the request.
1011 */
1012static int vdFilterChainApplyWrite(PVBOXHDD pDisk, uint64_t uOffset, size_t cbWrite,
1013 PVDIOCTX pIoCtx)
1014{
1015 int rc = VINF_SUCCESS;
1016
1017 VD_IS_LOCKED(pDisk);
1018
1019 PVDFILTER pFilter;
1020 RTListForEach(&pDisk->ListFilterChainWrite, pFilter, VDFILTER, ListNodeChainWrite)
1021 {
1022 rc = pFilter->pBackend->pfnFilterWrite(pFilter->pvBackendData, uOffset, cbWrite, pIoCtx);
1023 if (RT_FAILURE(rc))
1024 break;
1025 /* Reset S/G buffer for the next filter. */
1026 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1027 }
1028
1029 return rc;
1030}
1031
1032/**
1033 * Applies the filter chain to the given read request.
1034 *
1035 * @returns VBox status code.
1036 * @param pDisk The HDD container.
1037 * @param uOffset The start offset of the read.
1038 * @param cbRead Number of bytes read.
1039 * @param pIoCtx The I/O context associated with the request.
1040 */
1041static int vdFilterChainApplyRead(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRead,
1042 PVDIOCTX pIoCtx)
1043{
1044 int rc = VINF_SUCCESS;
1045
1046 VD_IS_LOCKED(pDisk);
1047
1048 /* Reset buffer before starting. */
1049 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1050
1051 PVDFILTER pFilter;
1052 RTListForEach(&pDisk->ListFilterChainRead, pFilter, VDFILTER, ListNodeChainRead)
1053 {
1054 rc = pFilter->pBackend->pfnFilterRead(pFilter->pvBackendData, uOffset, cbRead, pIoCtx);
1055 if (RT_FAILURE(rc))
1056 break;
1057 /* Reset S/G buffer for the next filter. */
1058 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1059 }
1060
1061 return rc;
1062}
1063
1064DECLINLINE(void) vdIoCtxRootComplete(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1065{
1066 if ( RT_SUCCESS(pIoCtx->rcReq)
1067 && pIoCtx->enmTxDir == VDIOCTXTXDIR_READ)
1068 pIoCtx->rcReq = vdFilterChainApplyRead(pDisk, pIoCtx->Req.Io.uOffsetXferOrig,
1069 pIoCtx->Req.Io.cbXferOrig, pIoCtx);
1070
1071 pIoCtx->Type.Root.pfnComplete(pIoCtx->Type.Root.pvUser1,
1072 pIoCtx->Type.Root.pvUser2,
1073 pIoCtx->rcReq);
1074}
1075
1076/**
1077 * Initialize the structure members of a given I/O context.
1078 */
1079DECLINLINE(void) vdIoCtxInit(PVDIOCTX pIoCtx, PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1080 uint64_t uOffset, size_t cbTransfer, PVDIMAGE pImageStart,
1081 PCRTSGBUF pcSgBuf, void *pvAllocation,
1082 PFNVDIOCTXTRANSFER pfnIoCtxTransfer, uint32_t fFlags)
1083{
1084 pIoCtx->pDisk = pDisk;
1085 pIoCtx->enmTxDir = enmTxDir;
1086 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbTransfer; Assert((uint32_t)cbTransfer == cbTransfer);
1087 pIoCtx->Req.Io.uOffset = uOffset;
1088 pIoCtx->Req.Io.cbTransfer = cbTransfer;
1089 pIoCtx->Req.Io.pImageStart = pImageStart;
1090 pIoCtx->Req.Io.pImageCur = pImageStart;
1091 pIoCtx->Req.Io.cbBufClear = 0;
1092 pIoCtx->Req.Io.pImageParentOverride = NULL;
1093 pIoCtx->Req.Io.uOffsetXferOrig = uOffset;
1094 pIoCtx->Req.Io.cbXferOrig = cbTransfer;
1095 pIoCtx->cDataTransfersPending = 0;
1096 pIoCtx->cMetaTransfersPending = 0;
1097 pIoCtx->fComplete = false;
1098 pIoCtx->fFlags = fFlags;
1099 pIoCtx->pvAllocation = pvAllocation;
1100 pIoCtx->pfnIoCtxTransfer = pfnIoCtxTransfer;
1101 pIoCtx->pfnIoCtxTransferNext = NULL;
1102 pIoCtx->rcReq = VINF_SUCCESS;
1103 pIoCtx->pIoCtxParent = NULL;
1104
1105 /* There is no S/G list for a flush request. */
1106 if ( enmTxDir != VDIOCTXTXDIR_FLUSH
1107 && enmTxDir != VDIOCTXTXDIR_DISCARD)
1108 RTSgBufClone(&pIoCtx->Req.Io.SgBuf, pcSgBuf);
1109 else
1110 memset(&pIoCtx->Req.Io.SgBuf, 0, sizeof(RTSGBUF));
1111}
1112
1113/**
1114 * Internal: Tries to read the desired range from the given cache.
1115 *
1116 * @returns VBox status code.
1117 * @retval VERR_VD_BLOCK_FREE if the block is not in the cache.
1118 * pcbRead will be set to the number of bytes not in the cache.
1119 * Everything thereafter might be in the cache.
1120 * @param pCache The cache to read from.
1121 * @param uOffset Offset of the virtual disk to read.
1122 * @param cbRead How much to read.
1123 * @param pIoCtx The I/O context to read into.
1124 * @param pcbRead Where to store the number of bytes actually read.
1125 * On success this indicates the number of bytes read from the cache.
1126 * If VERR_VD_BLOCK_FREE is returned this gives the number of bytes
1127 * which are not in the cache.
1128 * In both cases everything beyond this value
1129 * might or might not be in the cache.
1130 */
1131static int vdCacheReadHelper(PVDCACHE pCache, uint64_t uOffset,
1132 size_t cbRead, PVDIOCTX pIoCtx, size_t *pcbRead)
1133{
1134 int rc = VINF_SUCCESS;
1135
1136 LogFlowFunc(("pCache=%#p uOffset=%llu pIoCtx=%p cbRead=%zu pcbRead=%#p\n",
1137 pCache, uOffset, pIoCtx, cbRead, pcbRead));
1138
1139 AssertPtr(pCache);
1140 AssertPtr(pcbRead);
1141
1142 rc = pCache->Backend->pfnRead(pCache->pBackendData, uOffset, cbRead,
1143 pIoCtx, pcbRead);
1144
1145 LogFlowFunc(("returns rc=%Rrc pcbRead=%zu\n", rc, *pcbRead));
1146 return rc;
1147}
1148
1149/**
1150 * Internal: Writes data for the given block into the cache.
1151 *
1152 * @returns VBox status code.
1153 * @param pCache The cache to write to.
1154 * @param uOffset Offset of the virtual disk to write to the cache.
1155 * @param cbWrite How much to write.
1156 * @param pIoCtx The I/O context to write from.
1157 * @param pcbWritten How much data could be written, optional.
1158 */
1159static int vdCacheWriteHelper(PVDCACHE pCache, uint64_t uOffset, size_t cbWrite,
1160 PVDIOCTX pIoCtx, size_t *pcbWritten)
1161{
1162 int rc = VINF_SUCCESS;
1163
1164 LogFlowFunc(("pCache=%#p uOffset=%llu pIoCtx=%p cbWrite=%zu pcbWritten=%#p\n",
1165 pCache, uOffset, pIoCtx, cbWrite, pcbWritten));
1166
1167 AssertPtr(pCache);
1168 AssertPtr(pIoCtx);
1169 Assert(cbWrite > 0);
1170
1171 if (pcbWritten)
1172 rc = pCache->Backend->pfnWrite(pCache->pBackendData, uOffset, cbWrite,
1173 pIoCtx, pcbWritten);
1174 else
1175 {
1176 size_t cbWritten = 0;
1177
1178 do
1179 {
1180 rc = pCache->Backend->pfnWrite(pCache->pBackendData, uOffset, cbWrite,
1181 pIoCtx, &cbWritten);
1182 uOffset += cbWritten;
1183 cbWrite -= cbWritten;
1184 } while ( cbWrite
1185 && ( RT_SUCCESS(rc)
1186 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS));
1187 }
1188
1189 LogFlowFunc(("returns rc=%Rrc pcbWritten=%zu\n",
1190 rc, pcbWritten ? *pcbWritten : cbWrite));
1191 return rc;
1192}
1193
1194/**
1195 * Creates a new empty discard state.
1196 *
1197 * @returns Pointer to the new discard state or NULL if out of memory.
1198 */
1199static PVDDISCARDSTATE vdDiscardStateCreate(void)
1200{
1201 PVDDISCARDSTATE pDiscard = (PVDDISCARDSTATE)RTMemAllocZ(sizeof(VDDISCARDSTATE));
1202
1203 if (pDiscard)
1204 {
1205 RTListInit(&pDiscard->ListLru);
1206 pDiscard->pTreeBlocks = (PAVLRU64TREE)RTMemAllocZ(sizeof(AVLRU64TREE));
1207 if (!pDiscard->pTreeBlocks)
1208 {
1209 RTMemFree(pDiscard);
1210 pDiscard = NULL;
1211 }
1212 }
1213
1214 return pDiscard;
1215}
1216
1217/**
1218 * Removes the least recently used blocks from the waiting list until
1219 * the new value is reached.
1220 *
1221 * @returns VBox status code.
1222 * @param pDisk VD disk container.
1223 * @param pDiscard The discard state.
1224 * @param cbDiscardingNew How many bytes should be waiting on success.
1225 * The number of bytes waiting can be less.
1226 */
1227static int vdDiscardRemoveBlocks(PVBOXHDD pDisk, PVDDISCARDSTATE pDiscard, size_t cbDiscardingNew)
1228{
1229 int rc = VINF_SUCCESS;
1230
1231 LogFlowFunc(("pDisk=%#p pDiscard=%#p cbDiscardingNew=%zu\n",
1232 pDisk, pDiscard, cbDiscardingNew));
1233
1234 while (pDiscard->cbDiscarding > cbDiscardingNew)
1235 {
1236 PVDDISCARDBLOCK pBlock = RTListGetLast(&pDiscard->ListLru, VDDISCARDBLOCK, NodeLru);
1237
1238 Assert(!RTListIsEmpty(&pDiscard->ListLru));
1239
1240 /* Go over the allocation bitmap and mark all discarded sectors as unused. */
1241 uint64_t offStart = pBlock->Core.Key;
1242 uint32_t idxStart = 0;
1243 size_t cbLeft = pBlock->cbDiscard;
1244 bool fAllocated = ASMBitTest(pBlock->pbmAllocated, idxStart);
1245 uint32_t cSectors = (uint32_t)(pBlock->cbDiscard / 512);
1246
1247 while (cbLeft > 0)
1248 {
1249 int32_t idxEnd;
1250 size_t cbThis = cbLeft;
1251
1252 if (fAllocated)
1253 {
1254 /* Check for the first unallocated bit. */
1255 idxEnd = ASMBitNextClear(pBlock->pbmAllocated, cSectors, idxStart);
1256 if (idxEnd != -1)
1257 {
1258 cbThis = (idxEnd - idxStart) * 512;
1259 fAllocated = false;
1260 }
1261 }
1262 else
1263 {
1264 /* Mark as unused and check for the first set bit. */
1265 idxEnd = ASMBitNextSet(pBlock->pbmAllocated, cSectors, idxStart);
1266 if (idxEnd != -1)
1267 cbThis = (idxEnd - idxStart) * 512;
1268
1269
1270 VDIOCTX IoCtx;
1271 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_DISCARD, 0, 0, NULL,
1272 NULL, NULL, NULL, VDIOCTX_FLAGS_SYNC);
1273 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData,
1274 &IoCtx, offStart, cbThis, NULL,
1275 NULL, &cbThis, NULL,
1276 VD_DISCARD_MARK_UNUSED);
1277 if (RT_FAILURE(rc))
1278 break;
1279
1280 fAllocated = true;
1281 }
1282
1283 idxStart = idxEnd;
1284 offStart += cbThis;
1285 cbLeft -= cbThis;
1286 }
1287
1288 if (RT_FAILURE(rc))
1289 break;
1290
1291 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
1292 Assert(pBlockRemove == pBlock); NOREF(pBlockRemove);
1293 RTListNodeRemove(&pBlock->NodeLru);
1294
1295 pDiscard->cbDiscarding -= pBlock->cbDiscard;
1296 RTMemFree(pBlock->pbmAllocated);
1297 RTMemFree(pBlock);
1298 }
1299
1300 Assert(RT_FAILURE(rc) || pDiscard->cbDiscarding <= cbDiscardingNew);
1301
1302 LogFlowFunc(("returns rc=%Rrc\n", rc));
1303 return rc;
1304}
1305
1306/**
1307 * Destroys the current discard state, writing any waiting blocks to the image.
1308 *
1309 * @returns VBox status code.
1310 * @param pDisk VD disk container.
1311 */
1312static int vdDiscardStateDestroy(PVBOXHDD pDisk)
1313{
1314 int rc = VINF_SUCCESS;
1315
1316 if (pDisk->pDiscard)
1317 {
1318 rc = vdDiscardRemoveBlocks(pDisk, pDisk->pDiscard, 0 /* Remove all blocks. */);
1319 AssertRC(rc);
1320 RTMemFree(pDisk->pDiscard->pTreeBlocks);
1321 RTMemFree(pDisk->pDiscard);
1322 pDisk->pDiscard = NULL;
1323 }
1324
1325 return rc;
1326}
1327
1328/**
1329 * Marks the given range as allocated in the image.
1330 * Required if there are discards in progress and a write to a block which can get discarded
1331 * is written to.
1332 *
1333 * @returns VBox status code.
1334 * @param pDisk VD container data.
1335 * @param uOffset First byte to mark as allocated.
1336 * @param cbRange Number of bytes to mark as allocated.
1337 */
1338static int vdDiscardSetRangeAllocated(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRange)
1339{
1340 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
1341 int rc = VINF_SUCCESS;
1342
1343 if (pDiscard)
1344 {
1345 do
1346 {
1347 size_t cbThisRange = cbRange;
1348 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTAvlrU64RangeGet(pDiscard->pTreeBlocks, uOffset);
1349
1350 if (pBlock)
1351 {
1352 int32_t idxStart, idxEnd;
1353
1354 Assert(!(cbThisRange % 512));
1355 Assert(!((uOffset - pBlock->Core.Key) % 512));
1356
1357 cbThisRange = RT_MIN(cbThisRange, pBlock->Core.KeyLast - uOffset + 1);
1358
1359 idxStart = (uOffset - pBlock->Core.Key) / 512;
1360 idxEnd = idxStart + (int32_t)(cbThisRange / 512);
1361 ASMBitSetRange(pBlock->pbmAllocated, idxStart, idxEnd);
1362 }
1363 else
1364 {
1365 pBlock = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, uOffset, true);
1366 if (pBlock)
1367 cbThisRange = RT_MIN(cbThisRange, pBlock->Core.Key - uOffset);
1368 }
1369
1370 Assert(cbRange >= cbThisRange);
1371
1372 uOffset += cbThisRange;
1373 cbRange -= cbThisRange;
1374 } while (cbRange != 0);
1375 }
1376
1377 return rc;
1378}
1379
1380DECLINLINE(PVDIOCTX) vdIoCtxAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1381 uint64_t uOffset, size_t cbTransfer,
1382 PVDIMAGE pImageStart,PCRTSGBUF pcSgBuf,
1383 void *pvAllocation, PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1384 uint32_t fFlags)
1385{
1386 PVDIOCTX pIoCtx = NULL;
1387
1388 pIoCtx = (PVDIOCTX)RTMemCacheAlloc(pDisk->hMemCacheIoCtx);
1389 if (RT_LIKELY(pIoCtx))
1390 {
1391 vdIoCtxInit(pIoCtx, pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1392 pcSgBuf, pvAllocation, pfnIoCtxTransfer, fFlags);
1393 }
1394
1395 return pIoCtx;
1396}
1397
1398DECLINLINE(PVDIOCTX) vdIoCtxRootAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1399 uint64_t uOffset, size_t cbTransfer,
1400 PVDIMAGE pImageStart, PCRTSGBUF pcSgBuf,
1401 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1402 void *pvUser1, void *pvUser2,
1403 void *pvAllocation,
1404 PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1405 uint32_t fFlags)
1406{
1407 PVDIOCTX pIoCtx = vdIoCtxAlloc(pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1408 pcSgBuf, pvAllocation, pfnIoCtxTransfer, fFlags);
1409
1410 if (RT_LIKELY(pIoCtx))
1411 {
1412 pIoCtx->pIoCtxParent = NULL;
1413 pIoCtx->Type.Root.pfnComplete = pfnComplete;
1414 pIoCtx->Type.Root.pvUser1 = pvUser1;
1415 pIoCtx->Type.Root.pvUser2 = pvUser2;
1416 }
1417
1418 LogFlow(("Allocated root I/O context %#p\n", pIoCtx));
1419 return pIoCtx;
1420}
1421
1422DECLINLINE(void) vdIoCtxDiscardInit(PVDIOCTX pIoCtx, PVBOXHDD pDisk, PCRTRANGE paRanges,
1423 unsigned cRanges, PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1424 void *pvUser1, void *pvUser2, void *pvAllocation,
1425 PFNVDIOCTXTRANSFER pfnIoCtxTransfer, uint32_t fFlags)
1426{
1427 pIoCtx->pIoCtxNext = NULL;
1428 pIoCtx->pDisk = pDisk;
1429 pIoCtx->enmTxDir = VDIOCTXTXDIR_DISCARD;
1430 pIoCtx->cDataTransfersPending = 0;
1431 pIoCtx->cMetaTransfersPending = 0;
1432 pIoCtx->fComplete = false;
1433 pIoCtx->fFlags = fFlags;
1434 pIoCtx->pvAllocation = pvAllocation;
1435 pIoCtx->pfnIoCtxTransfer = pfnIoCtxTransfer;
1436 pIoCtx->pfnIoCtxTransferNext = NULL;
1437 pIoCtx->rcReq = VINF_SUCCESS;
1438 pIoCtx->Req.Discard.paRanges = paRanges;
1439 pIoCtx->Req.Discard.cRanges = cRanges;
1440 pIoCtx->Req.Discard.idxRange = 0;
1441 pIoCtx->Req.Discard.cbDiscardLeft = 0;
1442 pIoCtx->Req.Discard.offCur = 0;
1443 pIoCtx->Req.Discard.cbThisDiscard = 0;
1444
1445 pIoCtx->pIoCtxParent = NULL;
1446 pIoCtx->Type.Root.pfnComplete = pfnComplete;
1447 pIoCtx->Type.Root.pvUser1 = pvUser1;
1448 pIoCtx->Type.Root.pvUser2 = pvUser2;
1449}
1450
1451DECLINLINE(PVDIOCTX) vdIoCtxDiscardAlloc(PVBOXHDD pDisk, PCRTRANGE paRanges,
1452 unsigned cRanges,
1453 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1454 void *pvUser1, void *pvUser2,
1455 void *pvAllocation,
1456 PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1457 uint32_t fFlags)
1458{
1459 PVDIOCTX pIoCtx = NULL;
1460
1461 pIoCtx = (PVDIOCTX)RTMemCacheAlloc(pDisk->hMemCacheIoCtx);
1462 if (RT_LIKELY(pIoCtx))
1463 {
1464 vdIoCtxDiscardInit(pIoCtx, pDisk, paRanges, cRanges, pfnComplete, pvUser1,
1465 pvUser2, pvAllocation, pfnIoCtxTransfer, fFlags);
1466 }
1467
1468 LogFlow(("Allocated discard I/O context %#p\n", pIoCtx));
1469 return pIoCtx;
1470}
1471
1472DECLINLINE(PVDIOCTX) vdIoCtxChildAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1473 uint64_t uOffset, size_t cbTransfer,
1474 PVDIMAGE pImageStart, PCRTSGBUF pcSgBuf,
1475 PVDIOCTX pIoCtxParent, size_t cbTransferParent,
1476 size_t cbWriteParent, void *pvAllocation,
1477 PFNVDIOCTXTRANSFER pfnIoCtxTransfer)
1478{
1479 PVDIOCTX pIoCtx = vdIoCtxAlloc(pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1480 pcSgBuf, pvAllocation, pfnIoCtxTransfer, pIoCtxParent->fFlags & ~VDIOCTX_FLAGS_DONT_FREE);
1481
1482 AssertPtr(pIoCtxParent);
1483 Assert(!pIoCtxParent->pIoCtxParent);
1484
1485 if (RT_LIKELY(pIoCtx))
1486 {
1487 pIoCtx->pIoCtxParent = pIoCtxParent;
1488 pIoCtx->Type.Child.uOffsetSaved = uOffset;
1489 pIoCtx->Type.Child.cbTransferLeftSaved = cbTransfer;
1490 pIoCtx->Type.Child.cbTransferParent = cbTransferParent;
1491 pIoCtx->Type.Child.cbWriteParent = cbWriteParent;
1492 }
1493
1494 LogFlow(("Allocated child I/O context %#p\n", pIoCtx));
1495 return pIoCtx;
1496}
1497
1498DECLINLINE(PVDIOTASK) vdIoTaskUserAlloc(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser, PVDIOCTX pIoCtx, uint32_t cbTransfer)
1499{
1500 PVDIOTASK pIoTask = NULL;
1501
1502 pIoTask = (PVDIOTASK)RTMemCacheAlloc(pIoStorage->pVDIo->pDisk->hMemCacheIoTask);
1503 if (pIoTask)
1504 {
1505 pIoTask->pIoStorage = pIoStorage;
1506 pIoTask->pfnComplete = pfnComplete;
1507 pIoTask->pvUser = pvUser;
1508 pIoTask->fMeta = false;
1509 pIoTask->Type.User.cbTransfer = cbTransfer;
1510 pIoTask->Type.User.pIoCtx = pIoCtx;
1511 }
1512
1513 return pIoTask;
1514}
1515
1516DECLINLINE(PVDIOTASK) vdIoTaskMetaAlloc(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser, PVDMETAXFER pMetaXfer)
1517{
1518 PVDIOTASK pIoTask = NULL;
1519
1520 pIoTask = (PVDIOTASK)RTMemCacheAlloc(pIoStorage->pVDIo->pDisk->hMemCacheIoTask);
1521 if (pIoTask)
1522 {
1523 pIoTask->pIoStorage = pIoStorage;
1524 pIoTask->pfnComplete = pfnComplete;
1525 pIoTask->pvUser = pvUser;
1526 pIoTask->fMeta = true;
1527 pIoTask->Type.Meta.pMetaXfer = pMetaXfer;
1528 }
1529
1530 return pIoTask;
1531}
1532
1533DECLINLINE(void) vdIoCtxFree(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1534{
1535 Log(("Freeing I/O context %#p\n", pIoCtx));
1536
1537 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_DONT_FREE))
1538 {
1539 if (pIoCtx->pvAllocation)
1540 RTMemFree(pIoCtx->pvAllocation);
1541#ifdef DEBUG
1542 memset(&pIoCtx->pDisk, 0xff, sizeof(void *));
1543#endif
1544 RTMemCacheFree(pDisk->hMemCacheIoCtx, pIoCtx);
1545 }
1546}
1547
1548DECLINLINE(void) vdIoTaskFree(PVBOXHDD pDisk, PVDIOTASK pIoTask)
1549{
1550#ifdef DEBUG
1551 memset(pIoTask, 0xff, sizeof(VDIOTASK));
1552#endif
1553 RTMemCacheFree(pDisk->hMemCacheIoTask, pIoTask);
1554}
1555
1556DECLINLINE(void) vdIoCtxChildReset(PVDIOCTX pIoCtx)
1557{
1558 AssertPtr(pIoCtx->pIoCtxParent);
1559
1560 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1561 pIoCtx->Req.Io.uOffset = pIoCtx->Type.Child.uOffsetSaved;
1562 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)pIoCtx->Type.Child.cbTransferLeftSaved;
1563 Assert((uint32_t)pIoCtx->Type.Child.cbTransferLeftSaved == pIoCtx->Type.Child.cbTransferLeftSaved);
1564}
1565
1566DECLINLINE(PVDMETAXFER) vdMetaXferAlloc(PVDIOSTORAGE pIoStorage, uint64_t uOffset, size_t cb)
1567{
1568 PVDMETAXFER pMetaXfer = (PVDMETAXFER)RTMemAlloc(RT_OFFSETOF(VDMETAXFER, abData[cb]));
1569
1570 if (RT_LIKELY(pMetaXfer))
1571 {
1572 pMetaXfer->Core.Key = uOffset;
1573 pMetaXfer->Core.KeyLast = uOffset + cb - 1;
1574 pMetaXfer->fFlags = VDMETAXFER_TXDIR_NONE;
1575 pMetaXfer->cbMeta = cb;
1576 pMetaXfer->pIoStorage = pIoStorage;
1577 pMetaXfer->cRefs = 0;
1578 pMetaXfer->pbDataShw = NULL;
1579 RTListInit(&pMetaXfer->ListIoCtxWaiting);
1580 RTListInit(&pMetaXfer->ListIoCtxShwWrites);
1581 }
1582 return pMetaXfer;
1583}
1584
1585DECLINLINE(void) vdIoCtxAddToWaitingList(volatile PVDIOCTX *ppList, PVDIOCTX pIoCtx)
1586{
1587 /* Put it on the waiting list. */
1588 PVDIOCTX pNext = ASMAtomicUoReadPtrT(ppList, PVDIOCTX);
1589 PVDIOCTX pHeadOld;
1590 pIoCtx->pIoCtxNext = pNext;
1591 while (!ASMAtomicCmpXchgExPtr(ppList, pIoCtx, pNext, &pHeadOld))
1592 {
1593 pNext = pHeadOld;
1594 Assert(pNext != pIoCtx);
1595 pIoCtx->pIoCtxNext = pNext;
1596 ASMNopPause();
1597 }
1598}
1599
1600DECLINLINE(void) vdIoCtxDefer(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1601{
1602 LogFlowFunc(("Deferring I/O context pIoCtx=%#p\n", pIoCtx));
1603
1604 Assert(!pIoCtx->pIoCtxParent && !(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED));
1605 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
1606 vdIoCtxAddToWaitingList(&pDisk->pIoCtxBlockedHead, pIoCtx);
1607}
1608
1609static size_t vdIoCtxCopy(PVDIOCTX pIoCtxDst, PVDIOCTX pIoCtxSrc, size_t cbData)
1610{
1611 return RTSgBufCopy(&pIoCtxDst->Req.Io.SgBuf, &pIoCtxSrc->Req.Io.SgBuf, cbData);
1612}
1613
1614#if 0 /* unused */
1615static int vdIoCtxCmp(PVDIOCTX pIoCtx1, PVDIOCTX pIoCtx2, size_t cbData)
1616{
1617 return RTSgBufCmp(&pIoCtx1->Req.Io.SgBuf, &pIoCtx2->Req.Io.SgBuf, cbData);
1618}
1619#endif
1620
1621static size_t vdIoCtxCopyTo(PVDIOCTX pIoCtx, const uint8_t *pbData, size_t cbData)
1622{
1623 return RTSgBufCopyFromBuf(&pIoCtx->Req.Io.SgBuf, pbData, cbData);
1624}
1625
1626static size_t vdIoCtxCopyFrom(PVDIOCTX pIoCtx, uint8_t *pbData, size_t cbData)
1627{
1628 return RTSgBufCopyToBuf(&pIoCtx->Req.Io.SgBuf, pbData, cbData);
1629}
1630
1631static size_t vdIoCtxSet(PVDIOCTX pIoCtx, uint8_t ch, size_t cbData)
1632{
1633 return RTSgBufSet(&pIoCtx->Req.Io.SgBuf, ch, cbData);
1634}
1635
1636/**
1637 * Returns whether the given I/O context has completed.
1638 *
1639 * @returns Flag whether the I/O context is complete.
1640 * @param pIoCtx The I/O context to check.
1641 */
1642DECLINLINE(bool) vdIoCtxIsComplete(PVDIOCTX pIoCtx)
1643{
1644 if ( !pIoCtx->cMetaTransfersPending
1645 && !pIoCtx->cDataTransfersPending
1646 && !pIoCtx->pfnIoCtxTransfer)
1647 return true;
1648
1649 /*
1650 * We complete the I/O context in case of an error
1651 * if there is no I/O task pending.
1652 */
1653 if ( RT_FAILURE(pIoCtx->rcReq)
1654 && !pIoCtx->cMetaTransfersPending
1655 && !pIoCtx->cDataTransfersPending)
1656 return true;
1657
1658 return false;
1659}
1660
1661/**
1662 * Returns whether the given I/O context is blocked due to a metadata transfer
1663 * or because the backend blocked it.
1664 *
1665 * @returns Flag whether the I/O context is blocked.
1666 * @param pIoCtx The I/O context to check.
1667 */
1668DECLINLINE(bool) vdIoCtxIsBlocked(PVDIOCTX pIoCtx)
1669{
1670 /* Don't change anything if there is a metadata transfer pending or we are blocked. */
1671 if ( pIoCtx->cMetaTransfersPending
1672 || (pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
1673 return true;
1674
1675 return false;
1676}
1677
1678/**
1679 * Process the I/O context, core method which assumes that the I/O context
1680 * acquired the lock.
1681 *
1682 * @returns VBox status code.
1683 * @param pIoCtx I/O context to process.
1684 */
1685static int vdIoCtxProcessLocked(PVDIOCTX pIoCtx)
1686{
1687 int rc = VINF_SUCCESS;
1688
1689 VD_IS_LOCKED(pIoCtx->pDisk);
1690
1691 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
1692
1693 if (!vdIoCtxIsComplete(pIoCtx))
1694 {
1695 if (!vdIoCtxIsBlocked(pIoCtx))
1696 {
1697 if (pIoCtx->pfnIoCtxTransfer)
1698 {
1699 /* Call the transfer function advancing to the next while there is no error. */
1700 while ( pIoCtx->pfnIoCtxTransfer
1701 && !pIoCtx->cMetaTransfersPending
1702 && RT_SUCCESS(rc))
1703 {
1704 LogFlowFunc(("calling transfer function %#p\n", pIoCtx->pfnIoCtxTransfer));
1705 rc = pIoCtx->pfnIoCtxTransfer(pIoCtx);
1706
1707 /* Advance to the next part of the transfer if the current one succeeded. */
1708 if (RT_SUCCESS(rc))
1709 {
1710 pIoCtx->pfnIoCtxTransfer = pIoCtx->pfnIoCtxTransferNext;
1711 pIoCtx->pfnIoCtxTransferNext = NULL;
1712 }
1713 }
1714 }
1715
1716 if ( RT_SUCCESS(rc)
1717 && !pIoCtx->cMetaTransfersPending
1718 && !pIoCtx->cDataTransfersPending
1719 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
1720 rc = VINF_VD_ASYNC_IO_FINISHED;
1721 else if ( RT_SUCCESS(rc)
1722 || rc == VERR_VD_NOT_ENOUGH_METADATA
1723 || rc == VERR_VD_IOCTX_HALT)
1724 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1725 else if ( RT_FAILURE(rc)
1726 && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
1727 {
1728 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rc, VINF_SUCCESS);
1729
1730 /*
1731 * The I/O context completed if we have an error and there is no data
1732 * or meta data transfer pending.
1733 */
1734 if ( !pIoCtx->cMetaTransfersPending
1735 && !pIoCtx->cDataTransfersPending)
1736 rc = VINF_VD_ASYNC_IO_FINISHED;
1737 else
1738 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1739 }
1740 }
1741 else
1742 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1743 }
1744 else
1745 rc = VINF_VD_ASYNC_IO_FINISHED;
1746
1747 LogFlowFunc(("pIoCtx=%#p rc=%Rrc cDataTransfersPending=%u cMetaTransfersPending=%u fComplete=%RTbool\n",
1748 pIoCtx, rc, pIoCtx->cDataTransfersPending, pIoCtx->cMetaTransfersPending,
1749 pIoCtx->fComplete));
1750
1751 return rc;
1752}
1753
1754/**
1755 * Processes the list of waiting I/O contexts.
1756 *
1757 * @returns VBox status code, only valid if pIoCtxRc is not NULL, treat as void
1758 * function otherwise.
1759 * @param pDisk The disk structure.
1760 * @param pIoCtxRc An I/O context handle which waits on the list. When processed
1761 * The status code is returned. NULL if there is no I/O context
1762 * to return the status code for.
1763 */
1764static int vdDiskProcessWaitingIoCtx(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc)
1765{
1766 int rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1767
1768 LogFlowFunc(("pDisk=%#p pIoCtxRc=%#p\n", pDisk, pIoCtxRc));
1769
1770 VD_IS_LOCKED(pDisk);
1771
1772 /* Get the waiting list and process it in FIFO order. */
1773 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxHead, NULL, PVDIOCTX);
1774
1775 /* Reverse it. */
1776 PVDIOCTX pCur = pIoCtxHead;
1777 pIoCtxHead = NULL;
1778 while (pCur)
1779 {
1780 PVDIOCTX pInsert = pCur;
1781 pCur = pCur->pIoCtxNext;
1782 pInsert->pIoCtxNext = pIoCtxHead;
1783 pIoCtxHead = pInsert;
1784 }
1785
1786 /* Process now. */
1787 pCur = pIoCtxHead;
1788 while (pCur)
1789 {
1790 int rcTmp;
1791 PVDIOCTX pTmp = pCur;
1792
1793 pCur = pCur->pIoCtxNext;
1794 pTmp->pIoCtxNext = NULL;
1795
1796 /*
1797 * Need to clear the sync flag here if there is a new I/O context
1798 * with it set and the context is not given in pIoCtxRc.
1799 * This happens most likely on a different thread and that one shouldn't
1800 * process the context synchronously.
1801 *
1802 * The thread who issued the context will wait on the event semaphore
1803 * anyway which is signalled when the completion handler is called.
1804 */
1805 if ( pTmp->fFlags & VDIOCTX_FLAGS_SYNC
1806 && pTmp != pIoCtxRc)
1807 pTmp->fFlags &= ~VDIOCTX_FLAGS_SYNC;
1808
1809 rcTmp = vdIoCtxProcessLocked(pTmp);
1810 if (pTmp == pIoCtxRc)
1811 {
1812 if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1813 && RT_SUCCESS(pTmp->rcReq)
1814 && pTmp->enmTxDir == VDIOCTXTXDIR_READ)
1815 {
1816 int rc2 = vdFilterChainApplyRead(pDisk, pTmp->Req.Io.uOffsetXferOrig,
1817 pTmp->Req.Io.cbXferOrig, pTmp);
1818 if (RT_FAILURE(rc2))
1819 rcTmp = rc2;
1820 }
1821
1822 /* The given I/O context was processed, pass the return code to the caller. */
1823 if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1824 && (pTmp->fFlags & VDIOCTX_FLAGS_SYNC))
1825 rc = pTmp->rcReq;
1826 else
1827 rc = rcTmp;
1828 }
1829 else if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1830 && ASMAtomicCmpXchgBool(&pTmp->fComplete, true, false))
1831 {
1832 LogFlowFunc(("Waiting I/O context completed pTmp=%#p\n", pTmp));
1833 vdThreadFinishWrite(pDisk);
1834 vdIoCtxRootComplete(pDisk, pTmp);
1835 vdIoCtxFree(pDisk, pTmp);
1836 }
1837 }
1838
1839 LogFlowFunc(("returns rc=%Rrc\n", rc));
1840 return rc;
1841}
1842
1843/**
1844 * Processes the list of blocked I/O contexts.
1845 *
1846 * @returns nothing.
1847 * @param pDisk The disk structure.
1848 */
1849static void vdDiskProcessBlockedIoCtx(PVBOXHDD pDisk)
1850{
1851 LogFlowFunc(("pDisk=%#p\n", pDisk));
1852
1853 VD_IS_LOCKED(pDisk);
1854
1855 /* Get the waiting list and process it in FIFO order. */
1856 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxBlockedHead, NULL, PVDIOCTX);
1857
1858 /* Reverse it. */
1859 PVDIOCTX pCur = pIoCtxHead;
1860 pIoCtxHead = NULL;
1861 while (pCur)
1862 {
1863 PVDIOCTX pInsert = pCur;
1864 pCur = pCur->pIoCtxNext;
1865 pInsert->pIoCtxNext = pIoCtxHead;
1866 pIoCtxHead = pInsert;
1867 }
1868
1869 /* Process now. */
1870 pCur = pIoCtxHead;
1871 while (pCur)
1872 {
1873 int rc;
1874 PVDIOCTX pTmp = pCur;
1875
1876 pCur = pCur->pIoCtxNext;
1877 pTmp->pIoCtxNext = NULL;
1878
1879 Assert(!pTmp->pIoCtxParent);
1880 Assert(pTmp->fFlags & VDIOCTX_FLAGS_BLOCKED);
1881 pTmp->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
1882
1883 rc = vdIoCtxProcessLocked(pTmp);
1884 if ( rc == VINF_VD_ASYNC_IO_FINISHED
1885 && ASMAtomicCmpXchgBool(&pTmp->fComplete, true, false))
1886 {
1887 LogFlowFunc(("Waiting I/O context completed pTmp=%#p\n", pTmp));
1888 vdThreadFinishWrite(pDisk);
1889 vdIoCtxRootComplete(pDisk, pTmp);
1890 vdIoCtxFree(pDisk, pTmp);
1891 }
1892 }
1893
1894 LogFlowFunc(("returns\n"));
1895}
1896
1897/**
1898 * Processes the I/O context trying to lock the criticial section.
1899 * The context is deferred if the critical section is busy.
1900 *
1901 * @returns VBox status code.
1902 * @param pIoCtx The I/O context to process.
1903 */
1904static int vdIoCtxProcessTryLockDefer(PVDIOCTX pIoCtx)
1905{
1906 int rc = VINF_SUCCESS;
1907 PVBOXHDD pDisk = pIoCtx->pDisk;
1908
1909 Log(("Defer pIoCtx=%#p\n", pIoCtx));
1910
1911 /* Put it on the waiting list first. */
1912 vdIoCtxAddToWaitingList(&pDisk->pIoCtxHead, pIoCtx);
1913
1914 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
1915 {
1916 /* Leave it again, the context will be processed just before leaving the lock. */
1917 LogFlowFunc(("Successfully acquired the lock\n"));
1918 rc = vdDiskUnlock(pDisk, pIoCtx);
1919 }
1920 else
1921 {
1922 LogFlowFunc(("Lock is held\n"));
1923 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1924 }
1925
1926 return rc;
1927}
1928
1929/**
1930 * Process the I/O context in a synchronous manner, waiting
1931 * for it to complete.
1932 *
1933 * @returns VBox status code of the completed request.
1934 * @param pIoCtx The sync I/O context.
1935 * @param hEventComplete Event sempahore to wait on for completion.
1936 */
1937static int vdIoCtxProcessSync(PVDIOCTX pIoCtx, RTSEMEVENT hEventComplete)
1938{
1939 int rc = VINF_SUCCESS;
1940 PVBOXHDD pDisk = pIoCtx->pDisk;
1941
1942 LogFlowFunc(("pIoCtx=%p\n", pIoCtx));
1943
1944 AssertMsg(pIoCtx->fFlags & (VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE),
1945 ("I/O context is not marked as synchronous\n"));
1946
1947 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
1948 if (rc == VINF_VD_ASYNC_IO_FINISHED)
1949 rc = VINF_SUCCESS;
1950
1951 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1952 {
1953 rc = RTSemEventWait(hEventComplete, RT_INDEFINITE_WAIT);
1954 AssertRC(rc);
1955 }
1956
1957 rc = pIoCtx->rcReq;
1958 vdIoCtxFree(pDisk, pIoCtx);
1959
1960 return rc;
1961}
1962
1963DECLINLINE(bool) vdIoCtxIsDiskLockOwner(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1964{
1965 return pDisk->pIoCtxLockOwner == pIoCtx;
1966}
1967
1968static int vdIoCtxLockDisk(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1969{
1970 int rc = VINF_SUCCESS;
1971
1972 VD_IS_LOCKED(pDisk);
1973
1974 LogFlowFunc(("pDisk=%#p pIoCtx=%#p\n", pDisk, pIoCtx));
1975
1976 if (!ASMAtomicCmpXchgPtr(&pDisk->pIoCtxLockOwner, pIoCtx, NIL_VDIOCTX))
1977 {
1978 Assert(pDisk->pIoCtxLockOwner != pIoCtx); /* No nesting allowed. */
1979 vdIoCtxDefer(pDisk, pIoCtx);
1980 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1981 }
1982
1983 LogFlowFunc(("returns -> %Rrc\n", rc));
1984 return rc;
1985}
1986
1987static void vdIoCtxUnlockDisk(PVBOXHDD pDisk, PVDIOCTX pIoCtx, bool fProcessBlockedReqs)
1988{
1989 RT_NOREF1(pIoCtx);
1990 LogFlowFunc(("pDisk=%#p pIoCtx=%#p fProcessBlockedReqs=%RTbool\n",
1991 pDisk, pIoCtx, fProcessBlockedReqs));
1992
1993 VD_IS_LOCKED(pDisk);
1994
1995 LogFlow(("Unlocking disk lock owner is %#p\n", pDisk->pIoCtxLockOwner));
1996 Assert(pDisk->pIoCtxLockOwner == pIoCtx);
1997 ASMAtomicXchgPtrT(&pDisk->pIoCtxLockOwner, NIL_VDIOCTX, PVDIOCTX);
1998
1999 if (fProcessBlockedReqs)
2000 {
2001 /* Process any blocked writes if the current request didn't caused another growing. */
2002 vdDiskProcessBlockedIoCtx(pDisk);
2003 }
2004
2005 LogFlowFunc(("returns\n"));
2006}
2007
2008/**
2009 * Internal: Reads a given amount of data from the image chain of the disk.
2010 **/
2011static int vdDiskReadHelper(PVBOXHDD pDisk, PVDIMAGE pImage, PVDIMAGE pImageParentOverride,
2012 uint64_t uOffset, size_t cbRead, PVDIOCTX pIoCtx, size_t *pcbThisRead)
2013{
2014 RT_NOREF1(pDisk);
2015 int rc = VINF_SUCCESS;
2016 size_t cbThisRead = cbRead;
2017
2018 AssertPtr(pcbThisRead);
2019
2020 *pcbThisRead = 0;
2021
2022 /*
2023 * Try to read from the given image.
2024 * If the block is not allocated read from override chain if present.
2025 */
2026 rc = pImage->Backend->pfnRead(pImage->pBackendData,
2027 uOffset, cbThisRead, pIoCtx,
2028 &cbThisRead);
2029
2030 if (rc == VERR_VD_BLOCK_FREE)
2031 {
2032 for (PVDIMAGE pCurrImage = pImageParentOverride ? pImageParentOverride : pImage->pPrev;
2033 pCurrImage != NULL && rc == VERR_VD_BLOCK_FREE;
2034 pCurrImage = pCurrImage->pPrev)
2035 {
2036 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2037 uOffset, cbThisRead, pIoCtx,
2038 &cbThisRead);
2039 }
2040 }
2041
2042 if (RT_SUCCESS(rc) || rc == VERR_VD_BLOCK_FREE)
2043 *pcbThisRead = cbThisRead;
2044
2045 return rc;
2046}
2047
2048/**
2049 * internal: read the specified amount of data in whatever blocks the backend
2050 * will give us - async version.
2051 */
2052static DECLCALLBACK(int) vdReadHelperAsync(PVDIOCTX pIoCtx)
2053{
2054 int rc;
2055 PVBOXHDD pDisk = pIoCtx->pDisk;
2056 size_t cbToRead = pIoCtx->Req.Io.cbTransfer;
2057 uint64_t uOffset = pIoCtx->Req.Io.uOffset;
2058 PVDIMAGE pCurrImage = pIoCtx->Req.Io.pImageCur;
2059 PVDIMAGE pImageParentOverride = pIoCtx->Req.Io.pImageParentOverride;
2060 unsigned cImagesRead = pIoCtx->Req.Io.cImagesRead;
2061 size_t cbThisRead;
2062
2063 /*
2064 * Check whether there is a full block write in progress which was not allocated.
2065 * Defer I/O if the range interferes but only if it does not belong to the
2066 * write doing the allocation.
2067 */
2068 if ( pDisk->pIoCtxLockOwner != NIL_VDIOCTX
2069 && uOffset >= pDisk->uOffsetStartLocked
2070 && uOffset < pDisk->uOffsetEndLocked
2071 && ( !pIoCtx->pIoCtxParent
2072 || pIoCtx->pIoCtxParent != pDisk->pIoCtxLockOwner))
2073 {
2074 Log(("Interferring read while allocating a new block => deferring read\n"));
2075 vdIoCtxDefer(pDisk, pIoCtx);
2076 return VERR_VD_ASYNC_IO_IN_PROGRESS;
2077 }
2078
2079 /* Loop until all reads started or we have a backend which needs to read metadata. */
2080 do
2081 {
2082 /* Search for image with allocated block. Do not attempt to read more
2083 * than the previous reads marked as valid. Otherwise this would return
2084 * stale data when different block sizes are used for the images. */
2085 cbThisRead = cbToRead;
2086
2087 if ( pDisk->pCache
2088 && !pImageParentOverride)
2089 {
2090 rc = vdCacheReadHelper(pDisk->pCache, uOffset, cbThisRead,
2091 pIoCtx, &cbThisRead);
2092 if (rc == VERR_VD_BLOCK_FREE)
2093 {
2094 rc = vdDiskReadHelper(pDisk, pCurrImage, NULL, uOffset, cbThisRead,
2095 pIoCtx, &cbThisRead);
2096
2097 /* If the read was successful, write the data back into the cache. */
2098 if ( RT_SUCCESS(rc)
2099 && pIoCtx->fFlags & VDIOCTX_FLAGS_READ_UPDATE_CACHE)
2100 {
2101 rc = vdCacheWriteHelper(pDisk->pCache, uOffset, cbThisRead,
2102 pIoCtx, NULL);
2103 }
2104 }
2105 }
2106 else
2107 {
2108 /*
2109 * Try to read from the given image.
2110 * If the block is not allocated read from override chain if present.
2111 */
2112 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2113 uOffset, cbThisRead, pIoCtx,
2114 &cbThisRead);
2115
2116 if ( rc == VERR_VD_BLOCK_FREE
2117 && cImagesRead != 1)
2118 {
2119 unsigned cImagesToProcess = cImagesRead;
2120
2121 pCurrImage = pImageParentOverride ? pImageParentOverride : pCurrImage->pPrev;
2122 pIoCtx->Req.Io.pImageParentOverride = NULL;
2123
2124 while (pCurrImage && rc == VERR_VD_BLOCK_FREE)
2125 {
2126 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2127 uOffset, cbThisRead,
2128 pIoCtx, &cbThisRead);
2129 if (cImagesToProcess == 1)
2130 break;
2131 else if (cImagesToProcess > 0)
2132 cImagesToProcess--;
2133
2134 if (rc == VERR_VD_BLOCK_FREE)
2135 pCurrImage = pCurrImage->pPrev;
2136 }
2137 }
2138 }
2139
2140 /* The task state will be updated on success already, don't do it here!. */
2141 if (rc == VERR_VD_BLOCK_FREE)
2142 {
2143 /* No image in the chain contains the data for the block. */
2144 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbThisRead); Assert(cbThisRead == (uint32_t)cbThisRead);
2145
2146 /* Fill the free space with 0 if we are told to do so
2147 * or a previous read returned valid data. */
2148 if (pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS)
2149 vdIoCtxSet(pIoCtx, '\0', cbThisRead);
2150 else
2151 pIoCtx->Req.Io.cbBufClear += cbThisRead;
2152
2153 if (pIoCtx->Req.Io.pImageCur->uOpenFlags & VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS)
2154 rc = VINF_VD_NEW_ZEROED_BLOCK;
2155 else
2156 rc = VINF_SUCCESS;
2157 }
2158 else if (rc == VERR_VD_IOCTX_HALT)
2159 {
2160 uOffset += cbThisRead;
2161 cbToRead -= cbThisRead;
2162 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2163 }
2164 else if ( RT_SUCCESS(rc)
2165 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2166 {
2167 /* First not free block, fill the space before with 0. */
2168 if ( pIoCtx->Req.Io.cbBufClear
2169 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS))
2170 {
2171 RTSGBUF SgBuf;
2172 RTSgBufClone(&SgBuf, &pIoCtx->Req.Io.SgBuf);
2173 RTSgBufReset(&SgBuf);
2174 RTSgBufSet(&SgBuf, 0, pIoCtx->Req.Io.cbBufClear);
2175 pIoCtx->Req.Io.cbBufClear = 0;
2176 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2177 }
2178 rc = VINF_SUCCESS;
2179 }
2180
2181 if (RT_FAILURE(rc))
2182 break;
2183
2184 cbToRead -= cbThisRead;
2185 uOffset += cbThisRead;
2186 pCurrImage = pIoCtx->Req.Io.pImageStart; /* Start with the highest image in the chain. */
2187 } while (cbToRead != 0 && RT_SUCCESS(rc));
2188
2189 if ( rc == VERR_VD_NOT_ENOUGH_METADATA
2190 || rc == VERR_VD_IOCTX_HALT)
2191 {
2192 /* Save the current state. */
2193 pIoCtx->Req.Io.uOffset = uOffset;
2194 pIoCtx->Req.Io.cbTransfer = cbToRead;
2195 pIoCtx->Req.Io.pImageCur = pCurrImage ? pCurrImage : pIoCtx->Req.Io.pImageStart;
2196 }
2197
2198 return (!(pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS))
2199 ? VERR_VD_BLOCK_FREE
2200 : rc;
2201}
2202
2203/**
2204 * internal: parent image read wrapper for compacting.
2205 */
2206static DECLCALLBACK(int) vdParentRead(void *pvUser, uint64_t uOffset, void *pvBuf,
2207 size_t cbRead)
2208{
2209 PVDPARENTSTATEDESC pParentState = (PVDPARENTSTATEDESC)pvUser;
2210
2211 /** @todo
2212 * Only used for compaction so far which is not possible to mix with async I/O.
2213 * Needs to be changed if we want to support online compaction of images.
2214 */
2215 bool fLocked = ASMAtomicXchgBool(&pParentState->pDisk->fLocked, true);
2216 AssertMsgReturn(!fLocked,
2217 ("Calling synchronous parent read while another thread holds the disk lock\n"),
2218 VERR_VD_INVALID_STATE);
2219
2220 /* Fake an I/O context. */
2221 RTSGSEG Segment;
2222 RTSGBUF SgBuf;
2223 VDIOCTX IoCtx;
2224
2225 Segment.pvSeg = pvBuf;
2226 Segment.cbSeg = cbRead;
2227 RTSgBufInit(&SgBuf, &Segment, 1);
2228 vdIoCtxInit(&IoCtx, pParentState->pDisk, VDIOCTXTXDIR_READ, uOffset, cbRead, pParentState->pImage,
2229 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_ZERO_FREE_BLOCKS);
2230 int rc = vdReadHelperAsync(&IoCtx);
2231 ASMAtomicXchgBool(&pParentState->pDisk->fLocked, false);
2232 return rc;
2233}
2234
2235/**
2236 * Extended version of vdReadHelper(), implementing certain optimizations
2237 * for image cloning.
2238 *
2239 * @returns VBox status code.
2240 * @param pDisk The disk to read from.
2241 * @param pImage The image to start reading from.
2242 * @param pImageParentOverride The parent image to read from
2243 * if the starting image returns a free block.
2244 * If NULL is passed the real parent of the image
2245 * in the chain is used.
2246 * @param uOffset Offset in the disk to start reading from.
2247 * @param pvBuf Where to store the read data.
2248 * @param cbRead How much to read.
2249 * @param fZeroFreeBlocks Flag whether free blocks should be zeroed.
2250 * If false and no image has data for sepcified
2251 * range VERR_VD_BLOCK_FREE is returned.
2252 * Note that unallocated blocks are still zeroed
2253 * if at least one image has valid data for a part
2254 * of the range.
2255 * @param fUpdateCache Flag whether to update the attached cache if
2256 * available.
2257 * @param cImagesRead Number of images in the chain to read until
2258 * the read is cut off. A value of 0 disables the cut off.
2259 */
2260static int vdReadHelperEx(PVBOXHDD pDisk, PVDIMAGE pImage, PVDIMAGE pImageParentOverride,
2261 uint64_t uOffset, void *pvBuf, size_t cbRead,
2262 bool fZeroFreeBlocks, bool fUpdateCache, unsigned cImagesRead)
2263{
2264 int rc = VINF_SUCCESS;
2265 uint32_t fFlags = VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE;
2266 RTSGSEG Segment;
2267 RTSGBUF SgBuf;
2268 VDIOCTX IoCtx;
2269 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
2270
2271 rc = RTSemEventCreate(&hEventComplete);
2272 if (RT_FAILURE(rc))
2273 return rc;
2274
2275 if (fZeroFreeBlocks)
2276 fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2277 if (fUpdateCache)
2278 fFlags |= VDIOCTX_FLAGS_READ_UPDATE_CACHE;
2279
2280 Segment.pvSeg = pvBuf;
2281 Segment.cbSeg = cbRead;
2282 RTSgBufInit(&SgBuf, &Segment, 1);
2283 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, uOffset, cbRead, pImage, &SgBuf,
2284 NULL, vdReadHelperAsync, fFlags);
2285
2286 IoCtx.Req.Io.pImageParentOverride = pImageParentOverride;
2287 IoCtx.Req.Io.cImagesRead = cImagesRead;
2288 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
2289 IoCtx.Type.Root.pvUser1 = pDisk;
2290 IoCtx.Type.Root.pvUser2 = hEventComplete;
2291 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
2292 RTSemEventDestroy(hEventComplete);
2293 return rc;
2294}
2295
2296/**
2297 * internal: read the specified amount of data in whatever blocks the backend
2298 * will give us.
2299 */
2300static int vdReadHelper(PVBOXHDD pDisk, PVDIMAGE pImage, uint64_t uOffset,
2301 void *pvBuf, size_t cbRead, bool fUpdateCache)
2302{
2303 return vdReadHelperEx(pDisk, pImage, NULL, uOffset, pvBuf, cbRead,
2304 true /* fZeroFreeBlocks */, fUpdateCache, 0);
2305}
2306
2307/**
2308 * internal: mark the disk as not modified.
2309 */
2310static void vdResetModifiedFlag(PVBOXHDD pDisk)
2311{
2312 if (pDisk->uModified & VD_IMAGE_MODIFIED_FLAG)
2313 {
2314 /* generate new last-modified uuid */
2315 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2316 {
2317 RTUUID Uuid;
2318
2319 RTUuidCreate(&Uuid);
2320 pDisk->pLast->Backend->pfnSetModificationUuid(pDisk->pLast->pBackendData,
2321 &Uuid);
2322
2323 if (pDisk->pCache)
2324 pDisk->pCache->Backend->pfnSetModificationUuid(pDisk->pCache->pBackendData,
2325 &Uuid);
2326 }
2327
2328 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FLAG;
2329 }
2330}
2331
2332/**
2333 * internal: mark the disk as modified.
2334 */
2335static void vdSetModifiedFlag(PVBOXHDD pDisk)
2336{
2337 pDisk->uModified |= VD_IMAGE_MODIFIED_FLAG;
2338 if (pDisk->uModified & VD_IMAGE_MODIFIED_FIRST)
2339 {
2340 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FIRST;
2341
2342 /* First modify, so create a UUID and ensure it's written to disk. */
2343 vdResetModifiedFlag(pDisk);
2344
2345 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2346 {
2347 VDIOCTX IoCtx;
2348 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_FLUSH, 0, 0, NULL,
2349 NULL, NULL, NULL, VDIOCTX_FLAGS_SYNC);
2350 pDisk->pLast->Backend->pfnFlush(pDisk->pLast->pBackendData, &IoCtx);
2351 }
2352 }
2353}
2354
2355/**
2356 * internal: write buffer to the image, taking care of block boundaries and
2357 * write optimizations.
2358 */
2359static int vdWriteHelperEx(PVBOXHDD pDisk, PVDIMAGE pImage,
2360 PVDIMAGE pImageParentOverride, uint64_t uOffset,
2361 const void *pvBuf, size_t cbWrite,
2362 uint32_t fFlags, unsigned cImagesRead)
2363{
2364 int rc = VINF_SUCCESS;
2365 RTSGSEG Segment;
2366 RTSGBUF SgBuf;
2367 VDIOCTX IoCtx;
2368 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
2369
2370 rc = RTSemEventCreate(&hEventComplete);
2371 if (RT_FAILURE(rc))
2372 return rc;
2373
2374 fFlags |= VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE;
2375
2376 Segment.pvSeg = (void *)pvBuf;
2377 Segment.cbSeg = cbWrite;
2378 RTSgBufInit(&SgBuf, &Segment, 1);
2379 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_WRITE, uOffset, cbWrite, pImage, &SgBuf,
2380 NULL, vdWriteHelperAsync, fFlags);
2381
2382 IoCtx.Req.Io.pImageParentOverride = pImageParentOverride;
2383 IoCtx.Req.Io.cImagesRead = cImagesRead;
2384 IoCtx.pIoCtxParent = NULL;
2385 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
2386 IoCtx.Type.Root.pvUser1 = pDisk;
2387 IoCtx.Type.Root.pvUser2 = hEventComplete;
2388 if (RT_SUCCESS(rc))
2389 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
2390
2391 RTSemEventDestroy(hEventComplete);
2392 return rc;
2393}
2394
2395/**
2396 * internal: write buffer to the image, taking care of block boundaries and
2397 * write optimizations.
2398 */
2399static int vdWriteHelper(PVBOXHDD pDisk, PVDIMAGE pImage, uint64_t uOffset,
2400 const void *pvBuf, size_t cbWrite, uint32_t fFlags)
2401{
2402 return vdWriteHelperEx(pDisk, pImage, NULL, uOffset, pvBuf, cbWrite,
2403 fFlags, 0);
2404}
2405
2406/**
2407 * Internal: Copies the content of one disk to another one applying optimizations
2408 * to speed up the copy process if possible.
2409 */
2410static int vdCopyHelper(PVBOXHDD pDiskFrom, PVDIMAGE pImageFrom, PVBOXHDD pDiskTo,
2411 uint64_t cbSize, unsigned cImagesFromRead, unsigned cImagesToRead,
2412 bool fSuppressRedundantIo, PVDINTERFACEPROGRESS pIfProgress,
2413 PVDINTERFACEPROGRESS pDstIfProgress)
2414{
2415 int rc = VINF_SUCCESS;
2416 int rc2;
2417 uint64_t uOffset = 0;
2418 uint64_t cbRemaining = cbSize;
2419 void *pvBuf = NULL;
2420 bool fLockReadFrom = false;
2421 bool fLockWriteTo = false;
2422 bool fBlockwiseCopy = false;
2423 unsigned uProgressOld = 0;
2424
2425 LogFlowFunc(("pDiskFrom=%#p pImageFrom=%#p pDiskTo=%#p cbSize=%llu cImagesFromRead=%u cImagesToRead=%u fSuppressRedundantIo=%RTbool pIfProgress=%#p pDstIfProgress=%#p\n",
2426 pDiskFrom, pImageFrom, pDiskTo, cbSize, cImagesFromRead, cImagesToRead, fSuppressRedundantIo, pDstIfProgress, pDstIfProgress));
2427
2428 if ( (fSuppressRedundantIo || (cImagesFromRead > 0))
2429 && RTListIsEmpty(&pDiskFrom->ListFilterChainRead))
2430 fBlockwiseCopy = true;
2431
2432 /* Allocate tmp buffer. */
2433 pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
2434 if (!pvBuf)
2435 return rc;
2436
2437 do
2438 {
2439 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
2440
2441 /* Note that we don't attempt to synchronize cross-disk accesses.
2442 * It wouldn't be very difficult to do, just the lock order would
2443 * need to be defined somehow to prevent deadlocks. Postpone such
2444 * magic as there is no use case for this. */
2445
2446 rc2 = vdThreadStartRead(pDiskFrom);
2447 AssertRC(rc2);
2448 fLockReadFrom = true;
2449
2450 if (fBlockwiseCopy)
2451 {
2452 RTSGSEG SegmentBuf;
2453 RTSGBUF SgBuf;
2454 VDIOCTX IoCtx;
2455
2456 SegmentBuf.pvSeg = pvBuf;
2457 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
2458 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
2459 vdIoCtxInit(&IoCtx, pDiskFrom, VDIOCTXTXDIR_READ, 0, 0, NULL,
2460 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
2461
2462 /* Read the source data. */
2463 rc = pImageFrom->Backend->pfnRead(pImageFrom->pBackendData,
2464 uOffset, cbThisRead, &IoCtx,
2465 &cbThisRead);
2466
2467 if ( rc == VERR_VD_BLOCK_FREE
2468 && cImagesFromRead != 1)
2469 {
2470 unsigned cImagesToProcess = cImagesFromRead;
2471
2472 for (PVDIMAGE pCurrImage = pImageFrom->pPrev;
2473 pCurrImage != NULL && rc == VERR_VD_BLOCK_FREE;
2474 pCurrImage = pCurrImage->pPrev)
2475 {
2476 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2477 uOffset, cbThisRead,
2478 &IoCtx, &cbThisRead);
2479 if (cImagesToProcess == 1)
2480 break;
2481 else if (cImagesToProcess > 0)
2482 cImagesToProcess--;
2483 }
2484 }
2485 }
2486 else
2487 rc = vdReadHelper(pDiskFrom, pImageFrom, uOffset, pvBuf, cbThisRead,
2488 false /* fUpdateCache */);
2489
2490 if (RT_FAILURE(rc) && rc != VERR_VD_BLOCK_FREE)
2491 break;
2492
2493 rc2 = vdThreadFinishRead(pDiskFrom);
2494 AssertRC(rc2);
2495 fLockReadFrom = false;
2496
2497 if (rc != VERR_VD_BLOCK_FREE)
2498 {
2499 rc2 = vdThreadStartWrite(pDiskTo);
2500 AssertRC(rc2);
2501 fLockWriteTo = true;
2502
2503 /* Only do collapsed I/O if we are copying the data blockwise. */
2504 rc = vdWriteHelperEx(pDiskTo, pDiskTo->pLast, NULL, uOffset, pvBuf,
2505 cbThisRead, VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG /* fFlags */,
2506 fBlockwiseCopy ? cImagesToRead : 0);
2507 if (RT_FAILURE(rc))
2508 break;
2509
2510 rc2 = vdThreadFinishWrite(pDiskTo);
2511 AssertRC(rc2);
2512 fLockWriteTo = false;
2513 }
2514 else /* Don't propagate the error to the outside */
2515 rc = VINF_SUCCESS;
2516
2517 uOffset += cbThisRead;
2518 cbRemaining -= cbThisRead;
2519
2520 unsigned uProgressNew = uOffset * 99 / cbSize;
2521 if (uProgressNew != uProgressOld)
2522 {
2523 uProgressOld = uProgressNew;
2524
2525 if (pIfProgress && pIfProgress->pfnProgress)
2526 {
2527 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
2528 uProgressOld);
2529 if (RT_FAILURE(rc))
2530 break;
2531 }
2532 if (pDstIfProgress && pDstIfProgress->pfnProgress)
2533 {
2534 rc = pDstIfProgress->pfnProgress(pDstIfProgress->Core.pvUser,
2535 uProgressOld);
2536 if (RT_FAILURE(rc))
2537 break;
2538 }
2539 }
2540 } while (uOffset < cbSize);
2541
2542 RTMemFree(pvBuf);
2543
2544 if (fLockReadFrom)
2545 {
2546 rc2 = vdThreadFinishRead(pDiskFrom);
2547 AssertRC(rc2);
2548 }
2549
2550 if (fLockWriteTo)
2551 {
2552 rc2 = vdThreadFinishWrite(pDiskTo);
2553 AssertRC(rc2);
2554 }
2555
2556 LogFlowFunc(("returns rc=%Rrc\n", rc));
2557 return rc;
2558}
2559
2560/**
2561 * Flush helper async version.
2562 */
2563static DECLCALLBACK(int) vdSetModifiedHelperAsync(PVDIOCTX pIoCtx)
2564{
2565 int rc = VINF_SUCCESS;
2566 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
2567
2568 rc = pImage->Backend->pfnFlush(pImage->pBackendData, pIoCtx);
2569 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2570 rc = VINF_SUCCESS;
2571
2572 return rc;
2573}
2574
2575/**
2576 * internal: mark the disk as modified - async version.
2577 */
2578static int vdSetModifiedFlagAsync(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
2579{
2580 int rc = VINF_SUCCESS;
2581
2582 VD_IS_LOCKED(pDisk);
2583
2584 pDisk->uModified |= VD_IMAGE_MODIFIED_FLAG;
2585 if (pDisk->uModified & VD_IMAGE_MODIFIED_FIRST)
2586 {
2587 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
2588 if (RT_SUCCESS(rc))
2589 {
2590 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FIRST;
2591
2592 /* First modify, so create a UUID and ensure it's written to disk. */
2593 vdResetModifiedFlag(pDisk);
2594
2595 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2596 {
2597 PVDIOCTX pIoCtxFlush = vdIoCtxChildAlloc(pDisk, VDIOCTXTXDIR_FLUSH,
2598 0, 0, pDisk->pLast,
2599 NULL, pIoCtx, 0, 0, NULL,
2600 vdSetModifiedHelperAsync);
2601
2602 if (pIoCtxFlush)
2603 {
2604 rc = vdIoCtxProcessLocked(pIoCtxFlush);
2605 if (rc == VINF_VD_ASYNC_IO_FINISHED)
2606 {
2607 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs */);
2608 vdIoCtxFree(pDisk, pIoCtxFlush);
2609 }
2610 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2611 {
2612 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
2613 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2614 }
2615 else /* Another error */
2616 vdIoCtxFree(pDisk, pIoCtxFlush);
2617 }
2618 else
2619 rc = VERR_NO_MEMORY;
2620 }
2621 }
2622 }
2623
2624 return rc;
2625}
2626
2627static DECLCALLBACK(int) vdWriteHelperCommitAsync(PVDIOCTX pIoCtx)
2628{
2629 int rc = VINF_SUCCESS;
2630 PVDIMAGE pImage = pIoCtx->Req.Io.pImageStart;
2631 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2632 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2633 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2634
2635 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2636 rc = pImage->Backend->pfnWrite(pImage->pBackendData,
2637 pIoCtx->Req.Io.uOffset - cbPreRead,
2638 cbPreRead + cbThisWrite + cbPostRead,
2639 pIoCtx, NULL, &cbPreRead, &cbPostRead, 0);
2640 Assert(rc != VERR_VD_BLOCK_FREE);
2641 Assert(rc == VERR_VD_NOT_ENOUGH_METADATA || cbPreRead == 0);
2642 Assert(rc == VERR_VD_NOT_ENOUGH_METADATA || cbPostRead == 0);
2643 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2644 rc = VINF_SUCCESS;
2645 else if (rc == VERR_VD_IOCTX_HALT)
2646 {
2647 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2648 rc = VINF_SUCCESS;
2649 }
2650
2651 LogFlowFunc(("returns rc=%Rrc\n", rc));
2652 return rc;
2653}
2654
2655static DECLCALLBACK(int) vdWriteHelperOptimizedCmpAndWriteAsync(PVDIOCTX pIoCtx)
2656{
2657 int rc = VINF_SUCCESS;
2658 size_t cbThisWrite = 0;
2659 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2660 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2661 size_t cbWriteCopy = pIoCtx->Type.Child.Write.Optimized.cbWriteCopy;
2662 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2663 size_t cbReadImage = pIoCtx->Type.Child.Write.Optimized.cbReadImage;
2664 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
2665
2666 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2667
2668 AssertPtr(pIoCtxParent);
2669 Assert(!pIoCtxParent->pIoCtxParent);
2670 Assert(!pIoCtx->Req.Io.cbTransferLeft && !pIoCtx->cMetaTransfersPending);
2671
2672 vdIoCtxChildReset(pIoCtx);
2673 cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2674 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbPreRead);
2675
2676 /* Check if the write would modify anything in this block. */
2677 if (!RTSgBufCmp(&pIoCtx->Req.Io.SgBuf, &pIoCtxParent->Req.Io.SgBuf, cbThisWrite))
2678 {
2679 RTSGBUF SgBufSrcTmp;
2680
2681 RTSgBufClone(&SgBufSrcTmp, &pIoCtxParent->Req.Io.SgBuf);
2682 RTSgBufAdvance(&SgBufSrcTmp, cbThisWrite);
2683 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbThisWrite);
2684
2685 if (!cbWriteCopy || !RTSgBufCmp(&pIoCtx->Req.Io.SgBuf, &SgBufSrcTmp, cbWriteCopy))
2686 {
2687 /* Block is completely unchanged, so no need to write anything. */
2688 LogFlowFunc(("Block didn't changed\n"));
2689 ASMAtomicWriteU32(&pIoCtx->Req.Io.cbTransferLeft, 0);
2690 RTSgBufAdvance(&pIoCtxParent->Req.Io.SgBuf, cbThisWrite);
2691 return VINF_VD_ASYNC_IO_FINISHED;
2692 }
2693 }
2694
2695 /* Copy the data to the right place in the buffer. */
2696 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2697 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbPreRead);
2698 vdIoCtxCopy(pIoCtx, pIoCtxParent, cbThisWrite);
2699
2700 /* Handle the data that goes after the write to fill the block. */
2701 if (cbPostRead)
2702 {
2703 /* Now assemble the remaining data. */
2704 if (cbWriteCopy)
2705 {
2706 /*
2707 * The S/G buffer of the parent needs to be cloned because
2708 * it is not allowed to modify the state.
2709 */
2710 RTSGBUF SgBufParentTmp;
2711
2712 RTSgBufClone(&SgBufParentTmp, &pIoCtxParent->Req.Io.SgBuf);
2713 RTSgBufCopy(&pIoCtx->Req.Io.SgBuf, &SgBufParentTmp, cbWriteCopy);
2714 }
2715
2716 /* Zero out the remainder of this block. Will never be visible, as this
2717 * is beyond the limit of the image. */
2718 if (cbFill)
2719 {
2720 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbReadImage);
2721 vdIoCtxSet(pIoCtx, '\0', cbFill);
2722 }
2723 }
2724
2725 /* Write the full block to the virtual disk. */
2726 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2727 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2728
2729 return rc;
2730}
2731
2732static DECLCALLBACK(int) vdWriteHelperOptimizedPreReadAsync(PVDIOCTX pIoCtx)
2733{
2734 int rc = VINF_SUCCESS;
2735
2736 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2737
2738 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2739
2740 if ( pIoCtx->Req.Io.cbTransferLeft
2741 && !pIoCtx->cDataTransfersPending)
2742 rc = vdReadHelperAsync(pIoCtx);
2743
2744 if ( ( RT_SUCCESS(rc)
2745 || (rc == VERR_VD_ASYNC_IO_IN_PROGRESS))
2746 && ( pIoCtx->Req.Io.cbTransferLeft
2747 || pIoCtx->cMetaTransfersPending))
2748 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2749 else
2750 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperOptimizedCmpAndWriteAsync;
2751
2752 return rc;
2753}
2754
2755/**
2756 * internal: write a complete block (only used for diff images), taking the
2757 * remaining data from parent images. This implementation optimizes out writes
2758 * that do not change the data relative to the state as of the parent images.
2759 * All backends which support differential/growing images support this - async version.
2760 */
2761static DECLCALLBACK(int) vdWriteHelperOptimizedAsync(PVDIOCTX pIoCtx)
2762{
2763 PVBOXHDD pDisk = pIoCtx->pDisk;
2764 uint64_t uOffset = pIoCtx->Type.Child.uOffsetSaved;
2765 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2766 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2767 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2768 size_t cbWrite = pIoCtx->Type.Child.cbWriteParent;
2769 size_t cbFill = 0;
2770 size_t cbWriteCopy = 0;
2771 size_t cbReadImage = 0;
2772
2773 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2774
2775 AssertPtr(pIoCtx->pIoCtxParent);
2776 Assert(!pIoCtx->pIoCtxParent->pIoCtxParent);
2777
2778 if (cbPostRead)
2779 {
2780 /* Figure out how much we cannot read from the image, because
2781 * the last block to write might exceed the nominal size of the
2782 * image for technical reasons. */
2783 if (uOffset + cbThisWrite + cbPostRead > pDisk->cbSize)
2784 cbFill = uOffset + cbThisWrite + cbPostRead - pDisk->cbSize;
2785
2786 /* If we have data to be written, use that instead of reading
2787 * data from the image. */
2788 if (cbWrite > cbThisWrite)
2789 cbWriteCopy = RT_MIN(cbWrite - cbThisWrite, cbPostRead);
2790
2791 /* The rest must be read from the image. */
2792 cbReadImage = cbPostRead - cbWriteCopy - cbFill;
2793 }
2794
2795 pIoCtx->Type.Child.Write.Optimized.cbFill = cbFill;
2796 pIoCtx->Type.Child.Write.Optimized.cbWriteCopy = cbWriteCopy;
2797 pIoCtx->Type.Child.Write.Optimized.cbReadImage = cbReadImage;
2798
2799 /* Read the entire data of the block so that we can compare whether it will
2800 * be modified by the write or not. */
2801 size_t cbTmp = cbPreRead + cbThisWrite + cbPostRead - cbFill; Assert(cbTmp == (uint32_t)cbTmp);
2802 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbTmp;
2803 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2804 pIoCtx->Req.Io.uOffset -= cbPreRead;
2805
2806 /* Next step */
2807 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperOptimizedPreReadAsync;
2808 return VINF_SUCCESS;
2809}
2810
2811static DECLCALLBACK(int) vdWriteHelperStandardReadImageAsync(PVDIOCTX pIoCtx)
2812{
2813 int rc = VINF_SUCCESS;
2814
2815 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2816
2817 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2818
2819 if ( pIoCtx->Req.Io.cbTransferLeft
2820 && !pIoCtx->cDataTransfersPending)
2821 rc = vdReadHelperAsync(pIoCtx);
2822
2823 if ( RT_SUCCESS(rc)
2824 && ( pIoCtx->Req.Io.cbTransferLeft
2825 || pIoCtx->cMetaTransfersPending))
2826 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2827 else
2828 {
2829 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2830
2831 /* Zero out the remainder of this block. Will never be visible, as this
2832 * is beyond the limit of the image. */
2833 if (cbFill)
2834 vdIoCtxSet(pIoCtx, '\0', cbFill);
2835
2836 /* Write the full block to the virtual disk. */
2837 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2838
2839 vdIoCtxChildReset(pIoCtx);
2840 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2841 }
2842
2843 return rc;
2844}
2845
2846static DECLCALLBACK(int) vdWriteHelperStandardAssemble(PVDIOCTX pIoCtx)
2847{
2848 int rc = VINF_SUCCESS;
2849 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2850 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2851 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
2852
2853 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2854
2855 vdIoCtxCopy(pIoCtx, pIoCtxParent, cbThisWrite);
2856 if (cbPostRead)
2857 {
2858 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2859 size_t cbWriteCopy = pIoCtx->Type.Child.Write.Optimized.cbWriteCopy;
2860 size_t cbReadImage = pIoCtx->Type.Child.Write.Optimized.cbReadImage;
2861
2862 /* Now assemble the remaining data. */
2863 if (cbWriteCopy)
2864 {
2865 /*
2866 * The S/G buffer of the parent needs to be cloned because
2867 * it is not allowed to modify the state.
2868 */
2869 RTSGBUF SgBufParentTmp;
2870
2871 RTSgBufClone(&SgBufParentTmp, &pIoCtxParent->Req.Io.SgBuf);
2872 RTSgBufCopy(&pIoCtx->Req.Io.SgBuf, &SgBufParentTmp, cbWriteCopy);
2873 }
2874
2875 if (cbReadImage)
2876 {
2877 /* Read remaining data. */
2878 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardReadImageAsync;
2879
2880 /* Read the data that goes before the write to fill the block. */
2881 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbReadImage; Assert(cbReadImage == (uint32_t)cbReadImage);
2882 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2883 pIoCtx->Req.Io.uOffset += cbWriteCopy;
2884 }
2885 else
2886 {
2887 /* Zero out the remainder of this block. Will never be visible, as this
2888 * is beyond the limit of the image. */
2889 if (cbFill)
2890 vdIoCtxSet(pIoCtx, '\0', cbFill);
2891
2892 /* Write the full block to the virtual disk. */
2893 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2894 vdIoCtxChildReset(pIoCtx);
2895 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2896 }
2897 }
2898 else
2899 {
2900 /* Write the full block to the virtual disk. */
2901 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2902 vdIoCtxChildReset(pIoCtx);
2903 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2904 }
2905
2906 return rc;
2907}
2908
2909static DECLCALLBACK(int) vdWriteHelperStandardPreReadAsync(PVDIOCTX pIoCtx)
2910{
2911 int rc = VINF_SUCCESS;
2912
2913 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2914
2915 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2916
2917 if ( pIoCtx->Req.Io.cbTransferLeft
2918 && !pIoCtx->cDataTransfersPending)
2919 rc = vdReadHelperAsync(pIoCtx);
2920
2921 if ( RT_SUCCESS(rc)
2922 && ( pIoCtx->Req.Io.cbTransferLeft
2923 || pIoCtx->cMetaTransfersPending))
2924 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2925 else
2926 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardAssemble;
2927
2928 return rc;
2929}
2930
2931static DECLCALLBACK(int) vdWriteHelperStandardAsync(PVDIOCTX pIoCtx)
2932{
2933 PVBOXHDD pDisk = pIoCtx->pDisk;
2934 uint64_t uOffset = pIoCtx->Type.Child.uOffsetSaved;
2935 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2936 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2937 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2938 size_t cbWrite = pIoCtx->Type.Child.cbWriteParent;
2939 size_t cbFill = 0;
2940 size_t cbWriteCopy = 0;
2941 size_t cbReadImage = 0;
2942
2943 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2944
2945 AssertPtr(pIoCtx->pIoCtxParent);
2946 Assert(!pIoCtx->pIoCtxParent->pIoCtxParent);
2947
2948 /* Calculate the amount of data to read that goes after the write to fill the block. */
2949 if (cbPostRead)
2950 {
2951 /* If we have data to be written, use that instead of reading
2952 * data from the image. */
2953 if (cbWrite > cbThisWrite)
2954 cbWriteCopy = RT_MIN(cbWrite - cbThisWrite, cbPostRead);
2955 else
2956 cbWriteCopy = 0;
2957
2958 /* Figure out how much we cannot read from the image, because
2959 * the last block to write might exceed the nominal size of the
2960 * image for technical reasons. */
2961 if (uOffset + cbThisWrite + cbPostRead > pDisk->cbSize)
2962 cbFill = uOffset + cbThisWrite + cbPostRead - pDisk->cbSize;
2963
2964 /* The rest must be read from the image. */
2965 cbReadImage = cbPostRead - cbWriteCopy - cbFill;
2966 }
2967
2968 pIoCtx->Type.Child.Write.Optimized.cbFill = cbFill;
2969 pIoCtx->Type.Child.Write.Optimized.cbWriteCopy = cbWriteCopy;
2970 pIoCtx->Type.Child.Write.Optimized.cbReadImage = cbReadImage;
2971
2972 /* Next step */
2973 if (cbPreRead)
2974 {
2975 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardPreReadAsync;
2976
2977 /* Read the data that goes before the write to fill the block. */
2978 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbPreRead; Assert(cbPreRead == (uint32_t)cbPreRead);
2979 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2980 pIoCtx->Req.Io.uOffset -= cbPreRead;
2981 }
2982 else
2983 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardAssemble;
2984
2985 return VINF_SUCCESS;
2986}
2987
2988/**
2989 * internal: write buffer to the image, taking care of block boundaries and
2990 * write optimizations - async version.
2991 */
2992static DECLCALLBACK(int) vdWriteHelperAsync(PVDIOCTX pIoCtx)
2993{
2994 int rc;
2995 size_t cbWrite = pIoCtx->Req.Io.cbTransfer;
2996 uint64_t uOffset = pIoCtx->Req.Io.uOffset;
2997 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
2998 PVBOXHDD pDisk = pIoCtx->pDisk;
2999 unsigned fWrite;
3000 size_t cbThisWrite;
3001 size_t cbPreRead, cbPostRead;
3002
3003 /* Apply write filter chain here if it was not done already. */
3004 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_WRITE_FILTER_APPLIED))
3005 {
3006 rc = vdFilterChainApplyWrite(pDisk, uOffset, cbWrite, pIoCtx);
3007 if (RT_FAILURE(rc))
3008 return rc;
3009 pIoCtx->fFlags |= VDIOCTX_FLAGS_WRITE_FILTER_APPLIED;
3010 }
3011
3012 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG))
3013 {
3014 rc = vdSetModifiedFlagAsync(pDisk, pIoCtx);
3015 if (RT_FAILURE(rc)) /* Includes I/O in progress. */
3016 return rc;
3017 }
3018
3019 rc = vdDiscardSetRangeAllocated(pDisk, uOffset, cbWrite);
3020 if (RT_FAILURE(rc))
3021 return rc;
3022
3023 /* Loop until all written. */
3024 do
3025 {
3026 /* Try to write the possibly partial block to the last opened image.
3027 * This works when the block is already allocated in this image or
3028 * if it is a full-block write (and allocation isn't suppressed below).
3029 * For image formats which don't support zero blocks, it's beneficial
3030 * to avoid unnecessarily allocating unchanged blocks. This prevents
3031 * unwanted expanding of images. VMDK is an example. */
3032 cbThisWrite = cbWrite;
3033
3034 /*
3035 * Check whether there is a full block write in progress which was not allocated.
3036 * Defer I/O if the range interferes.
3037 */
3038 if ( pDisk->pIoCtxLockOwner != NIL_VDIOCTX
3039 && uOffset >= pDisk->uOffsetStartLocked
3040 && uOffset < pDisk->uOffsetEndLocked)
3041 {
3042 Log(("Interferring write while allocating a new block => deferring write\n"));
3043 vdIoCtxDefer(pDisk, pIoCtx);
3044 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3045 break;
3046 }
3047
3048 fWrite = (pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME)
3049 ? 0 : VD_WRITE_NO_ALLOC;
3050 rc = pImage->Backend->pfnWrite(pImage->pBackendData, uOffset, cbThisWrite,
3051 pIoCtx, &cbThisWrite, &cbPreRead, &cbPostRead,
3052 fWrite);
3053 if (rc == VERR_VD_BLOCK_FREE)
3054 {
3055 /* Lock the disk .*/
3056 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3057 if (RT_SUCCESS(rc))
3058 {
3059 /*
3060 * Allocate segment and buffer in one go.
3061 * A bit hackish but avoids the need to allocate memory twice.
3062 */
3063 PRTSGBUF pTmp = (PRTSGBUF)RTMemAlloc(cbPreRead + cbThisWrite + cbPostRead + sizeof(RTSGSEG) + sizeof(RTSGBUF));
3064 AssertBreakStmt(pTmp, rc = VERR_NO_MEMORY);
3065 PRTSGSEG pSeg = (PRTSGSEG)(pTmp + 1);
3066
3067 pSeg->pvSeg = pSeg + 1;
3068 pSeg->cbSeg = cbPreRead + cbThisWrite + cbPostRead;
3069 RTSgBufInit(pTmp, pSeg, 1);
3070
3071 PVDIOCTX pIoCtxWrite = vdIoCtxChildAlloc(pDisk, VDIOCTXTXDIR_WRITE,
3072 uOffset, pSeg->cbSeg, pImage,
3073 pTmp,
3074 pIoCtx, cbThisWrite,
3075 cbWrite,
3076 pTmp,
3077 (pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME)
3078 ? vdWriteHelperStandardAsync
3079 : vdWriteHelperOptimizedAsync);
3080 if (!VALID_PTR(pIoCtxWrite))
3081 {
3082 RTMemTmpFree(pTmp);
3083 rc = VERR_NO_MEMORY;
3084 break;
3085 }
3086
3087 LogFlowFunc(("Disk is growing because of pIoCtx=%#p pIoCtxWrite=%#p\n",
3088 pIoCtx, pIoCtxWrite));
3089
3090 /* Save the current range for the growing operation to check for intersecting requests later. */
3091 pDisk->uOffsetStartLocked = uOffset - cbPreRead;
3092 pDisk->uOffsetEndLocked = uOffset + cbThisWrite + cbPostRead;
3093
3094 pIoCtxWrite->Type.Child.cbPreRead = cbPreRead;
3095 pIoCtxWrite->Type.Child.cbPostRead = cbPostRead;
3096 pIoCtxWrite->Req.Io.pImageParentOverride = pIoCtx->Req.Io.pImageParentOverride;
3097
3098 /* Process the write request */
3099 rc = vdIoCtxProcessLocked(pIoCtxWrite);
3100
3101 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3102 {
3103 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs*/ );
3104 vdIoCtxFree(pDisk, pIoCtxWrite);
3105 break;
3106 }
3107 else if ( rc == VINF_VD_ASYNC_IO_FINISHED
3108 && ASMAtomicCmpXchgBool(&pIoCtxWrite->fComplete, true, false))
3109 {
3110 LogFlow(("Child write request completed\n"));
3111 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbThisWrite);
3112 Assert(cbThisWrite == (uint32_t)cbThisWrite);
3113 rc = pIoCtxWrite->rcReq;
3114 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbThisWrite);
3115 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs*/ );
3116 vdIoCtxFree(pDisk, pIoCtxWrite);
3117 }
3118 else
3119 {
3120 LogFlow(("Child write pending\n"));
3121 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
3122 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
3123 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3124 cbWrite -= cbThisWrite;
3125 uOffset += cbThisWrite;
3126 break;
3127 }
3128 }
3129 else
3130 {
3131 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3132 break;
3133 }
3134 }
3135
3136 if (rc == VERR_VD_IOCTX_HALT)
3137 {
3138 cbWrite -= cbThisWrite;
3139 uOffset += cbThisWrite;
3140 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
3141 break;
3142 }
3143 else if (rc == VERR_VD_NOT_ENOUGH_METADATA)
3144 break;
3145
3146 cbWrite -= cbThisWrite;
3147 uOffset += cbThisWrite;
3148 } while (cbWrite != 0 && (RT_SUCCESS(rc) || rc == VERR_VD_ASYNC_IO_IN_PROGRESS));
3149
3150 if ( rc == VERR_VD_ASYNC_IO_IN_PROGRESS
3151 || rc == VERR_VD_NOT_ENOUGH_METADATA
3152 || rc == VERR_VD_IOCTX_HALT)
3153 {
3154 /*
3155 * Tell the caller that we don't need to go back here because all
3156 * writes are initiated.
3157 */
3158 if ( !cbWrite
3159 && rc != VERR_VD_IOCTX_HALT)
3160 rc = VINF_SUCCESS;
3161
3162 pIoCtx->Req.Io.uOffset = uOffset;
3163 pIoCtx->Req.Io.cbTransfer = cbWrite;
3164 }
3165
3166 return rc;
3167}
3168
3169/**
3170 * Flush helper async version.
3171 */
3172static DECLCALLBACK(int) vdFlushHelperAsync(PVDIOCTX pIoCtx)
3173{
3174 int rc = VINF_SUCCESS;
3175 PVBOXHDD pDisk = pIoCtx->pDisk;
3176 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
3177
3178 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3179 if (RT_SUCCESS(rc))
3180 {
3181 /* Mark the whole disk as locked. */
3182 pDisk->uOffsetStartLocked = 0;
3183 pDisk->uOffsetEndLocked = UINT64_C(0xffffffffffffffff);
3184
3185 vdResetModifiedFlag(pDisk);
3186 rc = pImage->Backend->pfnFlush(pImage->pBackendData, pIoCtx);
3187 if ( ( RT_SUCCESS(rc)
3188 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS
3189 || rc == VERR_VD_IOCTX_HALT)
3190 && pDisk->pCache)
3191 {
3192 rc = pDisk->pCache->Backend->pfnFlush(pDisk->pCache->pBackendData, pIoCtx);
3193 if ( RT_SUCCESS(rc)
3194 || ( rc != VERR_VD_ASYNC_IO_IN_PROGRESS
3195 && rc != VERR_VD_IOCTX_HALT))
3196 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessBlockedReqs */);
3197 else if (rc != VERR_VD_IOCTX_HALT)
3198 rc = VINF_SUCCESS;
3199 }
3200 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3201 rc = VINF_SUCCESS;
3202 else if (rc != VERR_VD_IOCTX_HALT)/* Some other error. */
3203 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessBlockedReqs */);
3204 }
3205
3206 return rc;
3207}
3208
3209/**
3210 * Async discard helper - discards a whole block which is recorded in the block
3211 * tree.
3212 *
3213 * @returns VBox status code.
3214 * @param pIoCtx The I/O context to operate on.
3215 */
3216static DECLCALLBACK(int) vdDiscardWholeBlockAsync(PVDIOCTX pIoCtx)
3217{
3218 int rc = VINF_SUCCESS;
3219 PVBOXHDD pDisk = pIoCtx->pDisk;
3220 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3221 PVDDISCARDBLOCK pBlock = pIoCtx->Req.Discard.pBlock;
3222 size_t cbPreAllocated, cbPostAllocated, cbActuallyDiscarded;
3223
3224 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3225
3226 AssertPtr(pBlock);
3227
3228 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3229 pBlock->Core.Key, pBlock->cbDiscard,
3230 &cbPreAllocated, &cbPostAllocated,
3231 &cbActuallyDiscarded, NULL, 0);
3232 Assert(rc != VERR_VD_DISCARD_ALIGNMENT_NOT_MET);
3233 Assert(!cbPreAllocated);
3234 Assert(!cbPostAllocated);
3235 Assert(cbActuallyDiscarded == pBlock->cbDiscard || RT_FAILURE(rc));
3236
3237 /* Remove the block on success. */
3238 if ( RT_SUCCESS(rc)
3239 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3240 {
3241 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
3242 Assert(pBlockRemove == pBlock); RT_NOREF1(pBlockRemove);
3243
3244 pDiscard->cbDiscarding -= pBlock->cbDiscard;
3245 RTListNodeRemove(&pBlock->NodeLru);
3246 RTMemFree(pBlock->pbmAllocated);
3247 RTMemFree(pBlock);
3248 pIoCtx->Req.Discard.pBlock = NULL;/* Safety precaution. */
3249 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync; /* Next part. */
3250 rc = VINF_SUCCESS;
3251 }
3252
3253 LogFlowFunc(("returns rc=%Rrc\n", rc));
3254 return rc;
3255}
3256
3257/**
3258 * Removes the least recently used blocks from the waiting list until
3259 * the new value is reached - version for async I/O.
3260 *
3261 * @returns VBox status code.
3262 * @param pDisk VD disk container.
3263 * @param pIoCtx The I/O context associated with this discard operation.
3264 * @param cbDiscardingNew How many bytes should be waiting on success.
3265 * The number of bytes waiting can be less.
3266 */
3267static int vdDiscardRemoveBlocksAsync(PVBOXHDD pDisk, PVDIOCTX pIoCtx, size_t cbDiscardingNew)
3268{
3269 int rc = VINF_SUCCESS;
3270 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3271
3272 LogFlowFunc(("pDisk=%#p pDiscard=%#p cbDiscardingNew=%zu\n",
3273 pDisk, pDiscard, cbDiscardingNew));
3274
3275 while (pDiscard->cbDiscarding > cbDiscardingNew)
3276 {
3277 PVDDISCARDBLOCK pBlock = RTListGetLast(&pDiscard->ListLru, VDDISCARDBLOCK, NodeLru);
3278
3279 Assert(!RTListIsEmpty(&pDiscard->ListLru));
3280
3281 /* Go over the allocation bitmap and mark all discarded sectors as unused. */
3282 uint64_t offStart = pBlock->Core.Key;
3283 uint32_t idxStart = 0;
3284 size_t cbLeft = pBlock->cbDiscard;
3285 bool fAllocated = ASMBitTest(pBlock->pbmAllocated, idxStart);
3286 uint32_t cSectors = (uint32_t)(pBlock->cbDiscard / 512);
3287
3288 while (cbLeft > 0)
3289 {
3290 int32_t idxEnd;
3291 size_t cbThis = cbLeft;
3292
3293 if (fAllocated)
3294 {
3295 /* Check for the first unallocated bit. */
3296 idxEnd = ASMBitNextClear(pBlock->pbmAllocated, cSectors, idxStart);
3297 if (idxEnd != -1)
3298 {
3299 cbThis = (idxEnd - idxStart) * 512;
3300 fAllocated = false;
3301 }
3302 }
3303 else
3304 {
3305 /* Mark as unused and check for the first set bit. */
3306 idxEnd = ASMBitNextSet(pBlock->pbmAllocated, cSectors, idxStart);
3307 if (idxEnd != -1)
3308 cbThis = (idxEnd - idxStart) * 512;
3309
3310 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3311 offStart, cbThis, NULL, NULL, &cbThis,
3312 NULL, VD_DISCARD_MARK_UNUSED);
3313 if ( RT_FAILURE(rc)
3314 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
3315 break;
3316
3317 fAllocated = true;
3318 }
3319
3320 idxStart = idxEnd;
3321 offStart += cbThis;
3322 cbLeft -= cbThis;
3323 }
3324
3325 if ( RT_FAILURE(rc)
3326 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
3327 break;
3328
3329 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
3330 Assert(pBlockRemove == pBlock); NOREF(pBlockRemove);
3331 RTListNodeRemove(&pBlock->NodeLru);
3332
3333 pDiscard->cbDiscarding -= pBlock->cbDiscard;
3334 RTMemFree(pBlock->pbmAllocated);
3335 RTMemFree(pBlock);
3336 }
3337
3338 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3339 rc = VINF_SUCCESS;
3340
3341 Assert(RT_FAILURE(rc) || pDiscard->cbDiscarding <= cbDiscardingNew);
3342
3343 LogFlowFunc(("returns rc=%Rrc\n", rc));
3344 return rc;
3345}
3346
3347/**
3348 * Async discard helper - discards the current range if there is no matching
3349 * block in the tree.
3350 *
3351 * @returns VBox status code.
3352 * @param pIoCtx The I/O context to operate on.
3353 */
3354static DECLCALLBACK(int) vdDiscardCurrentRangeAsync(PVDIOCTX pIoCtx)
3355{
3356 PVBOXHDD pDisk = pIoCtx->pDisk;
3357 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3358 uint64_t offStart = pIoCtx->Req.Discard.offCur;
3359 size_t cbThisDiscard = pIoCtx->Req.Discard.cbThisDiscard;
3360 void *pbmAllocated = NULL;
3361 size_t cbPreAllocated, cbPostAllocated;
3362 int rc = VINF_SUCCESS;
3363
3364 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3365
3366 /* No block found, try to discard using the backend first. */
3367 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3368 offStart, cbThisDiscard, &cbPreAllocated,
3369 &cbPostAllocated, &cbThisDiscard,
3370 &pbmAllocated, 0);
3371 if (rc == VERR_VD_DISCARD_ALIGNMENT_NOT_MET)
3372 {
3373 /* Create new discard block. */
3374 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTMemAllocZ(sizeof(VDDISCARDBLOCK));
3375 if (pBlock)
3376 {
3377 pBlock->Core.Key = offStart - cbPreAllocated;
3378 pBlock->Core.KeyLast = offStart + cbThisDiscard + cbPostAllocated - 1;
3379 pBlock->cbDiscard = cbPreAllocated + cbThisDiscard + cbPostAllocated;
3380 pBlock->pbmAllocated = pbmAllocated;
3381 bool fInserted = RTAvlrU64Insert(pDiscard->pTreeBlocks, &pBlock->Core);
3382 Assert(fInserted); NOREF(fInserted);
3383
3384 RTListPrepend(&pDiscard->ListLru, &pBlock->NodeLru);
3385 pDiscard->cbDiscarding += pBlock->cbDiscard;
3386
3387 Assert(pIoCtx->Req.Discard.cbDiscardLeft >= cbThisDiscard);
3388 pIoCtx->Req.Discard.cbDiscardLeft -= cbThisDiscard;
3389 pIoCtx->Req.Discard.offCur += cbThisDiscard;
3390 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3391
3392 if (pDiscard->cbDiscarding > VD_DISCARD_REMOVE_THRESHOLD)
3393 rc = vdDiscardRemoveBlocksAsync(pDisk, pIoCtx, VD_DISCARD_REMOVE_THRESHOLD);
3394 else
3395 rc = VINF_SUCCESS;
3396
3397 if (RT_SUCCESS(rc))
3398 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync; /* Next part. */
3399 }
3400 else
3401 {
3402 RTMemFree(pbmAllocated);
3403 rc = VERR_NO_MEMORY;
3404 }
3405 }
3406 else if ( RT_SUCCESS(rc)
3407 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS) /* Save state and andvance to next range. */
3408 {
3409 Assert(pIoCtx->Req.Discard.cbDiscardLeft >= cbThisDiscard);
3410 pIoCtx->Req.Discard.cbDiscardLeft -= cbThisDiscard;
3411 pIoCtx->Req.Discard.offCur += cbThisDiscard;
3412 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3413 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync;
3414 rc = VINF_SUCCESS;
3415 }
3416
3417 LogFlowFunc(("returns rc=%Rrc\n", rc));
3418 return rc;
3419}
3420
3421/**
3422 * Async discard helper - entry point.
3423 *
3424 * @returns VBox status code.
3425 * @param pIoCtx The I/O context to operate on.
3426 */
3427static DECLCALLBACK(int) vdDiscardHelperAsync(PVDIOCTX pIoCtx)
3428{
3429 int rc = VINF_SUCCESS;
3430 PVBOXHDD pDisk = pIoCtx->pDisk;
3431 PCRTRANGE paRanges = pIoCtx->Req.Discard.paRanges;
3432 unsigned cRanges = pIoCtx->Req.Discard.cRanges;
3433 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3434
3435 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3436
3437 /* Check if the I/O context processed all ranges. */
3438 if ( pIoCtx->Req.Discard.idxRange == cRanges
3439 && !pIoCtx->Req.Discard.cbDiscardLeft)
3440 {
3441 LogFlowFunc(("All ranges discarded, completing\n"));
3442 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessDeferredReqs*/);
3443 return VINF_SUCCESS;
3444 }
3445
3446 if (pDisk->pIoCtxLockOwner != pIoCtx)
3447 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3448
3449 if (RT_SUCCESS(rc))
3450 {
3451 uint64_t offStart = pIoCtx->Req.Discard.offCur;
3452 size_t cbDiscardLeft = pIoCtx->Req.Discard.cbDiscardLeft;
3453 size_t cbThisDiscard;
3454
3455 pDisk->uOffsetStartLocked = offStart;
3456 pDisk->uOffsetEndLocked = offStart + cbDiscardLeft;
3457
3458 if (RT_UNLIKELY(!pDiscard))
3459 {
3460 pDiscard = vdDiscardStateCreate();
3461 if (!pDiscard)
3462 return VERR_NO_MEMORY;
3463
3464 pDisk->pDiscard = pDiscard;
3465 }
3466
3467 if (!pIoCtx->Req.Discard.cbDiscardLeft)
3468 {
3469 offStart = paRanges[pIoCtx->Req.Discard.idxRange].offStart;
3470 cbDiscardLeft = paRanges[pIoCtx->Req.Discard.idxRange].cbRange;
3471 LogFlowFunc(("New range descriptor loaded (%u) offStart=%llu cbDiscard=%zu\n",
3472 pIoCtx->Req.Discard.idxRange, offStart, cbDiscardLeft));
3473 pIoCtx->Req.Discard.idxRange++;
3474 }
3475
3476 /* Look for a matching block in the AVL tree first. */
3477 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, offStart, false);
3478 if (!pBlock || pBlock->Core.KeyLast < offStart)
3479 {
3480 PVDDISCARDBLOCK pBlockAbove = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, offStart, true);
3481
3482 /* Clip range to remain in the current block. */
3483 if (pBlockAbove)
3484 cbThisDiscard = RT_MIN(cbDiscardLeft, pBlockAbove->Core.KeyLast - offStart + 1);
3485 else
3486 cbThisDiscard = cbDiscardLeft;
3487
3488 Assert(!(cbThisDiscard % 512));
3489 pIoCtx->Req.Discard.pBlock = NULL;
3490 pIoCtx->pfnIoCtxTransferNext = vdDiscardCurrentRangeAsync;
3491 }
3492 else
3493 {
3494 /* Range lies partly in the block, update allocation bitmap. */
3495 int32_t idxStart, idxEnd;
3496
3497 cbThisDiscard = RT_MIN(cbDiscardLeft, pBlock->Core.KeyLast - offStart + 1);
3498
3499 AssertPtr(pBlock);
3500
3501 Assert(!(cbThisDiscard % 512));
3502 Assert(!((offStart - pBlock->Core.Key) % 512));
3503
3504 idxStart = (offStart - pBlock->Core.Key) / 512;
3505 idxEnd = idxStart + (int32_t)(cbThisDiscard / 512);
3506
3507 ASMBitClearRange(pBlock->pbmAllocated, idxStart, idxEnd);
3508
3509 cbDiscardLeft -= cbThisDiscard;
3510 offStart += cbThisDiscard;
3511
3512 /* Call the backend to discard the block if it is completely unallocated now. */
3513 if (ASMBitFirstSet((volatile void *)pBlock->pbmAllocated, (uint32_t)(pBlock->cbDiscard / 512)) == -1)
3514 {
3515 pIoCtx->Req.Discard.pBlock = pBlock;
3516 pIoCtx->pfnIoCtxTransferNext = vdDiscardWholeBlockAsync;
3517 rc = VINF_SUCCESS;
3518 }
3519 else
3520 {
3521 RTListNodeRemove(&pBlock->NodeLru);
3522 RTListPrepend(&pDiscard->ListLru, &pBlock->NodeLru);
3523
3524 /* Start with next range. */
3525 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync;
3526 rc = VINF_SUCCESS;
3527 }
3528 }
3529
3530 /* Save state in the context. */
3531 pIoCtx->Req.Discard.offCur = offStart;
3532 pIoCtx->Req.Discard.cbDiscardLeft = cbDiscardLeft;
3533 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3534 }
3535
3536 LogFlowFunc(("returns rc=%Rrc\n", rc));
3537 return rc;
3538}
3539
3540#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3541
3542/**
3543 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterImage}
3544 */
3545static DECLCALLBACK(int) vdPluginRegisterImage(void *pvUser, PCVDIMAGEBACKEND pBackend)
3546{
3547 int rc = VINF_SUCCESS;
3548
3549 if (VD_VERSION_ARE_COMPATIBLE(VD_IMGBACKEND_VERSION, pBackend->u32Version))
3550 vdAddBackend((RTLDRMOD)pvUser, pBackend);
3551 else
3552 {
3553 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3554 rc = VERR_IGNORED;
3555 }
3556
3557 return rc;
3558}
3559
3560/**
3561 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterCache}
3562 */
3563static DECLCALLBACK(int) vdPluginRegisterCache(void *pvUser, PCVDCACHEBACKEND pBackend)
3564{
3565 int rc = VINF_SUCCESS;
3566
3567 if (VD_VERSION_ARE_COMPATIBLE(VD_CACHEBACKEND_VERSION, pBackend->u32Version))
3568 vdAddCacheBackend((RTLDRMOD)pvUser, pBackend);
3569 else
3570 {
3571 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3572 rc = VERR_IGNORED;
3573 }
3574
3575 return rc;
3576}
3577
3578/**
3579 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterFilter}
3580 */
3581static DECLCALLBACK(int) vdPluginRegisterFilter(void *pvUser, PCVDFILTERBACKEND pBackend)
3582{
3583 int rc = VINF_SUCCESS;
3584
3585 if (VD_VERSION_ARE_COMPATIBLE(VD_FLTBACKEND_VERSION, pBackend->u32Version))
3586 vdAddFilterBackend((RTLDRMOD)pvUser, pBackend);
3587 else
3588 {
3589 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3590 rc = VERR_IGNORED;
3591 }
3592
3593 return rc;
3594}
3595
3596/**
3597 * Checks whether the given plugin filename was already loaded.
3598 *
3599 * @returns Pointer to already loaded plugin, NULL if not found.
3600 * @param pszFilename The filename to check.
3601 */
3602static PVDPLUGIN vdPluginFind(const char *pszFilename)
3603{
3604 PVDPLUGIN pIt;
3605 RTListForEach(&g_ListPluginsLoaded, pIt, VDPLUGIN, NodePlugin)
3606 {
3607 if (!RTStrCmp(pIt->pszFilename, pszFilename))
3608 return pIt;
3609 }
3610
3611 return NULL;
3612}
3613
3614/**
3615 * Adds a plugin to the list of loaded plugins.
3616 *
3617 * @returns VBox status code.
3618 * @param hPlugin Plugin handle to add.
3619 * @param pszFilename The associated filename, used for finding duplicates.
3620 */
3621static int vdAddPlugin(RTLDRMOD hPlugin, const char *pszFilename)
3622{
3623 int rc = VINF_SUCCESS;
3624 PVDPLUGIN pPlugin = (PVDPLUGIN)RTMemAllocZ(sizeof(VDPLUGIN));
3625
3626 if (pPlugin)
3627 {
3628 pPlugin->hPlugin = hPlugin;
3629 pPlugin->pszFilename = RTStrDup(pszFilename);
3630 if (pPlugin->pszFilename)
3631 RTListAppend(&g_ListPluginsLoaded, &pPlugin->NodePlugin);
3632 else
3633 {
3634 RTMemFree(pPlugin);
3635 rc = VERR_NO_MEMORY;
3636 }
3637 }
3638 else
3639 rc = VERR_NO_MEMORY;
3640
3641 return rc;
3642}
3643
3644static int vdRemovePlugin(const char *pszFilename)
3645{
3646 /* Find plugin to be removed from the list. */
3647 PVDPLUGIN pIt = vdPluginFind(pszFilename);
3648 if (!pIt)
3649 return VINF_SUCCESS;
3650
3651 /** @todo r=klaus: need to add a plugin entry point for unregistering the
3652 * backends. Only if this doesn't exist (or fails to work) we should fall
3653 * back to the following uncoordinated backend cleanup. */
3654 for (unsigned i = 0; i < g_cBackends; i++)
3655 {
3656 while (i < g_cBackends && g_ahBackendPlugins[i] == pIt->hPlugin)
3657 {
3658 memcpy(&g_apBackends[i], &g_apBackends[i + 1], (g_cBackends - i - 1) * sizeof(PCVDIMAGEBACKEND));
3659 memcpy(&g_ahBackendPlugins[i], &g_ahBackendPlugins[i + 1], (g_cBackends - i - 1) * sizeof(RTLDRMOD));
3660 /** @todo for now skip reallocating, doesn't save much */
3661 g_cBackends--;
3662 }
3663 }
3664 for (unsigned i = 0; i < g_cCacheBackends; i++)
3665 {
3666 while (i < g_cCacheBackends && g_ahCacheBackendPlugins[i] == pIt->hPlugin)
3667 {
3668 memcpy(&g_apCacheBackends[i], &g_apCacheBackends[i + 1], (g_cCacheBackends - i - 1) * sizeof(PCVDCACHEBACKEND));
3669 memcpy(&g_ahCacheBackendPlugins[i], &g_ahCacheBackendPlugins[i + 1], (g_cCacheBackends - i - 1) * sizeof(RTLDRMOD));
3670 /** @todo for now skip reallocating, doesn't save much */
3671 g_cCacheBackends--;
3672 }
3673 }
3674 for (unsigned i = 0; i < g_cFilterBackends; i++)
3675 {
3676 while (i < g_cFilterBackends && g_pahFilterBackendPlugins[i] == pIt->hPlugin)
3677 {
3678 memcpy(&g_apFilterBackends[i], &g_apFilterBackends[i + 1], (g_cFilterBackends - i - 1) * sizeof(PCVDFILTERBACKEND));
3679 memcpy(&g_pahFilterBackendPlugins[i], &g_pahFilterBackendPlugins[i + 1], (g_cFilterBackends - i - 1) * sizeof(RTLDRMOD));
3680 /** @todo for now skip reallocating, doesn't save much */
3681 g_cFilterBackends--;
3682 }
3683 }
3684
3685 /* Remove the plugin node now, all traces of it are gone. */
3686 RTListNodeRemove(&pIt->NodePlugin);
3687 RTLdrClose(pIt->hPlugin);
3688 RTStrFree(pIt->pszFilename);
3689 RTMemFree(pIt);
3690
3691 return VINF_SUCCESS;
3692}
3693
3694#endif /* !VBOX_HDD_NO_DYNAMIC_BACKENDS */
3695
3696/**
3697 * Worker for VDPluginLoadFromFilename() and vdPluginLoadFromPath().
3698 *
3699 * @returns VBox status code.
3700 * @param pszFilename The plugin filename to load.
3701 */
3702static int vdPluginLoadFromFilename(const char *pszFilename)
3703{
3704#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3705 /* Plugin loaded? Nothing to do. */
3706 if (vdPluginFind(pszFilename))
3707 return VINF_SUCCESS;
3708
3709 RTLDRMOD hPlugin = NIL_RTLDRMOD;
3710 int rc = SUPR3HardenedLdrLoadPlugIn(pszFilename, &hPlugin, NULL);
3711 if (RT_SUCCESS(rc))
3712 {
3713 VDBACKENDREGISTER BackendRegister;
3714 PFNVDPLUGINLOAD pfnVDPluginLoad = NULL;
3715
3716 BackendRegister.u32Version = VD_BACKENDREG_CB_VERSION;
3717 BackendRegister.pfnRegisterImage = vdPluginRegisterImage;
3718 BackendRegister.pfnRegisterCache = vdPluginRegisterCache;
3719 BackendRegister.pfnRegisterFilter = vdPluginRegisterFilter;
3720
3721 rc = RTLdrGetSymbol(hPlugin, VD_PLUGIN_LOAD_NAME, (void**)&pfnVDPluginLoad);
3722 if (RT_FAILURE(rc) || !pfnVDPluginLoad)
3723 {
3724 LogFunc(("error resolving the entry point %s in plugin %s, rc=%Rrc, pfnVDPluginLoad=%#p\n",
3725 VD_PLUGIN_LOAD_NAME, pszFilename, rc, pfnVDPluginLoad));
3726 if (RT_SUCCESS(rc))
3727 rc = VERR_SYMBOL_NOT_FOUND;
3728 }
3729
3730 if (RT_SUCCESS(rc))
3731 {
3732 /* Get the function table. */
3733 rc = pfnVDPluginLoad(hPlugin, &BackendRegister);
3734 }
3735 else
3736 LogFunc(("ignored plugin '%s': rc=%Rrc\n", pszFilename, rc));
3737
3738 /* Create a plugin entry on success. */
3739 if (RT_SUCCESS(rc))
3740 vdAddPlugin(hPlugin, pszFilename);
3741 else
3742 RTLdrClose(hPlugin);
3743 }
3744
3745 return rc;
3746#else
3747 RT_NOREF1(pszFilename);
3748 return VERR_NOT_IMPLEMENTED;
3749#endif
3750}
3751
3752/**
3753 * Worker for VDPluginLoadFromPath() and vdLoadDynamicBackends().
3754 *
3755 * @returns VBox status code.
3756 * @param pszPath The path to load plugins from.
3757 */
3758static int vdPluginLoadFromPath(const char *pszPath)
3759{
3760#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3761 /* To get all entries with VBoxHDD as prefix. */
3762 char *pszPluginFilter = RTPathJoinA(pszPath, VD_PLUGIN_PREFIX "*");
3763 if (!pszPluginFilter)
3764 return VERR_NO_STR_MEMORY;
3765
3766 PRTDIRENTRYEX pPluginDirEntry = NULL;
3767 PRTDIR pPluginDir = NULL;
3768 size_t cbPluginDirEntry = sizeof(RTDIRENTRYEX);
3769 int rc = RTDirOpenFiltered(&pPluginDir, pszPluginFilter, RTDIRFILTER_WINNT, 0);
3770 if (RT_SUCCESS(rc))
3771 {
3772 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(sizeof(RTDIRENTRYEX));
3773 if (pPluginDirEntry)
3774 {
3775 while ( (rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK))
3776 != VERR_NO_MORE_FILES)
3777 {
3778 char *pszPluginPath = NULL;
3779
3780 if (rc == VERR_BUFFER_OVERFLOW)
3781 {
3782 /* allocate new buffer. */
3783 RTMemFree(pPluginDirEntry);
3784 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(cbPluginDirEntry);
3785 if (!pPluginDirEntry)
3786 {
3787 rc = VERR_NO_MEMORY;
3788 break;
3789 }
3790 /* Retry. */
3791 rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK);
3792 if (RT_FAILURE(rc))
3793 break;
3794 }
3795 else if (RT_FAILURE(rc))
3796 break;
3797
3798 /* We got the new entry. */
3799 if (!RTFS_IS_FILE(pPluginDirEntry->Info.Attr.fMode))
3800 continue;
3801
3802 /* Prepend the path to the libraries. */
3803 pszPluginPath = RTPathJoinA(pszPath, pPluginDirEntry->szName);
3804 if (!pszPluginPath)
3805 {
3806 rc = VERR_NO_STR_MEMORY;
3807 break;
3808 }
3809
3810 rc = vdPluginLoadFromFilename(pszPluginPath);
3811 RTStrFree(pszPluginPath);
3812 }
3813
3814 RTMemFree(pPluginDirEntry);
3815 }
3816 else
3817 rc = VERR_NO_MEMORY;
3818
3819 RTDirClose(pPluginDir);
3820 }
3821 else
3822 {
3823 /* On Windows the above immediately signals that there are no
3824 * files matching, while on other platforms enumerating the
3825 * files below fails. Either way: no plugins. */
3826 }
3827
3828 if (rc == VERR_NO_MORE_FILES)
3829 rc = VINF_SUCCESS;
3830 RTStrFree(pszPluginFilter);
3831 return rc;
3832#else
3833 RT_NOREF1(pszPath);
3834 return VERR_NOT_IMPLEMENTED;
3835#endif
3836}
3837
3838/**
3839 * internal: scans plugin directory and loads found plugins.
3840 */
3841static int vdLoadDynamicBackends(void)
3842{
3843#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3844 /*
3845 * Enumerate plugin backends from the application directory where the other
3846 * shared libraries are.
3847 */
3848 char szPath[RTPATH_MAX];
3849 int rc = RTPathAppPrivateArch(szPath, sizeof(szPath));
3850 if (RT_FAILURE(rc))
3851 return rc;
3852
3853 return vdPluginLoadFromPath(szPath);
3854#else
3855 return VINF_SUCCESS;
3856#endif
3857}
3858
3859/**
3860 * Worker for VDPluginUnloadFromFilename() and vdPluginUnloadFromPath().
3861 *
3862 * @returns VBox status code.
3863 * @param pszFilename The plugin filename to unload.
3864 */
3865static int vdPluginUnloadFromFilename(const char *pszFilename)
3866{
3867#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3868 return vdRemovePlugin(pszFilename);
3869#else
3870 RT_NOREF1(pszFilename);
3871 return VERR_NOT_IMPLEMENTED;
3872#endif
3873}
3874
3875/**
3876 * Worker for VDPluginUnloadFromPath().
3877 *
3878 * @returns VBox status code.
3879 * @param pszPath The path to unload plugins from.
3880 */
3881static int vdPluginUnloadFromPath(const char *pszPath)
3882{
3883#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3884 /* To get all entries with VBoxHDD as prefix. */
3885 char *pszPluginFilter = RTPathJoinA(pszPath, VD_PLUGIN_PREFIX "*");
3886 if (!pszPluginFilter)
3887 return VERR_NO_STR_MEMORY;
3888
3889 PRTDIRENTRYEX pPluginDirEntry = NULL;
3890 PRTDIR pPluginDir = NULL;
3891 size_t cbPluginDirEntry = sizeof(RTDIRENTRYEX);
3892 int rc = RTDirOpenFiltered(&pPluginDir, pszPluginFilter, RTDIRFILTER_WINNT, 0);
3893 if (RT_SUCCESS(rc))
3894 {
3895 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(sizeof(RTDIRENTRYEX));
3896 if (pPluginDirEntry)
3897 {
3898 while ((rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK)) != VERR_NO_MORE_FILES)
3899 {
3900 char *pszPluginPath = NULL;
3901
3902 if (rc == VERR_BUFFER_OVERFLOW)
3903 {
3904 /* allocate new buffer. */
3905 RTMemFree(pPluginDirEntry);
3906 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(cbPluginDirEntry);
3907 if (!pPluginDirEntry)
3908 {
3909 rc = VERR_NO_MEMORY;
3910 break;
3911 }
3912 /* Retry. */
3913 rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK);
3914 if (RT_FAILURE(rc))
3915 break;
3916 }
3917 else if (RT_FAILURE(rc))
3918 break;
3919
3920 /* We got the new entry. */
3921 if (!RTFS_IS_FILE(pPluginDirEntry->Info.Attr.fMode))
3922 continue;
3923
3924 /* Prepend the path to the libraries. */
3925 pszPluginPath = RTPathJoinA(pszPath, pPluginDirEntry->szName);
3926 if (!pszPluginPath)
3927 {
3928 rc = VERR_NO_STR_MEMORY;
3929 break;
3930 }
3931
3932 rc = vdPluginUnloadFromFilename(pszPluginPath);
3933 RTStrFree(pszPluginPath);
3934 }
3935
3936 RTMemFree(pPluginDirEntry);
3937 }
3938 else
3939 rc = VERR_NO_MEMORY;
3940
3941 RTDirClose(pPluginDir);
3942 }
3943 else
3944 {
3945 /* On Windows the above immediately signals that there are no
3946 * files matching, while on other platforms enumerating the
3947 * files below fails. Either way: no plugins. */
3948 }
3949
3950 if (rc == VERR_NO_MORE_FILES)
3951 rc = VINF_SUCCESS;
3952 RTStrFree(pszPluginFilter);
3953 return rc;
3954#else
3955 RT_NOREF1(pszPath);
3956 return VERR_NOT_IMPLEMENTED;
3957#endif
3958}
3959
3960/**
3961 * VD async I/O interface open callback.
3962 */
3963static DECLCALLBACK(int) vdIOOpenFallback(void *pvUser, const char *pszLocation,
3964 uint32_t fOpen, PFNVDCOMPLETED pfnCompleted,
3965 void **ppStorage)
3966{
3967 RT_NOREF1(pvUser);
3968 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)RTMemAllocZ(sizeof(VDIIOFALLBACKSTORAGE));
3969
3970 if (!pStorage)
3971 return VERR_NO_MEMORY;
3972
3973 pStorage->pfnCompleted = pfnCompleted;
3974
3975 /* Open the file. */
3976 int rc = RTFileOpen(&pStorage->File, pszLocation, fOpen);
3977 if (RT_SUCCESS(rc))
3978 {
3979 *ppStorage = pStorage;
3980 return VINF_SUCCESS;
3981 }
3982
3983 RTMemFree(pStorage);
3984 return rc;
3985}
3986
3987/**
3988 * VD async I/O interface close callback.
3989 */
3990static DECLCALLBACK(int) vdIOCloseFallback(void *pvUser, void *pvStorage)
3991{
3992 RT_NOREF1(pvUser);
3993 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
3994
3995 RTFileClose(pStorage->File);
3996 RTMemFree(pStorage);
3997 return VINF_SUCCESS;
3998}
3999
4000static DECLCALLBACK(int) vdIODeleteFallback(void *pvUser, const char *pcszFilename)
4001{
4002 RT_NOREF1(pvUser);
4003 return RTFileDelete(pcszFilename);
4004}
4005
4006static DECLCALLBACK(int) vdIOMoveFallback(void *pvUser, const char *pcszSrc, const char *pcszDst, unsigned fMove)
4007{
4008 RT_NOREF1(pvUser);
4009 return RTFileMove(pcszSrc, pcszDst, fMove);
4010}
4011
4012static DECLCALLBACK(int) vdIOGetFreeSpaceFallback(void *pvUser, const char *pcszFilename, int64_t *pcbFreeSpace)
4013{
4014 RT_NOREF1(pvUser);
4015 return RTFsQuerySizes(pcszFilename, NULL, pcbFreeSpace, NULL, NULL);
4016}
4017
4018static DECLCALLBACK(int) vdIOGetModificationTimeFallback(void *pvUser, const char *pcszFilename, PRTTIMESPEC pModificationTime)
4019{
4020 RT_NOREF1(pvUser);
4021 RTFSOBJINFO info;
4022 int rc = RTPathQueryInfo(pcszFilename, &info, RTFSOBJATTRADD_NOTHING);
4023 if (RT_SUCCESS(rc))
4024 *pModificationTime = info.ModificationTime;
4025 return rc;
4026}
4027
4028/**
4029 * VD async I/O interface callback for retrieving the file size.
4030 */
4031static DECLCALLBACK(int) vdIOGetSizeFallback(void *pvUser, void *pvStorage, uint64_t *pcbSize)
4032{
4033 RT_NOREF1(pvUser);
4034 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4035
4036 return RTFileGetSize(pStorage->File, pcbSize);
4037}
4038
4039/**
4040 * VD async I/O interface callback for setting the file size.
4041 */
4042static DECLCALLBACK(int) vdIOSetSizeFallback(void *pvUser, void *pvStorage, uint64_t cbSize)
4043{
4044 RT_NOREF1(pvUser);
4045 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4046
4047 return RTFileSetSize(pStorage->File, cbSize);
4048}
4049
4050/**
4051 * VD async I/O interface callback for setting the file allocation size.
4052 */
4053static DECLCALLBACK(int) vdIOSetAllocationSizeFallback(void *pvUser, void *pvStorage, uint64_t cbSize,
4054 uint32_t fFlags)
4055{
4056 RT_NOREF2(pvUser, fFlags);
4057 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4058
4059 return RTFileSetAllocationSize(pStorage->File, cbSize, RTFILE_ALLOC_SIZE_F_DEFAULT);
4060}
4061
4062/**
4063 * VD async I/O interface callback for a synchronous write to the file.
4064 */
4065static DECLCALLBACK(int) vdIOWriteSyncFallback(void *pvUser, void *pvStorage, uint64_t uOffset,
4066 const void *pvBuf, size_t cbWrite, size_t *pcbWritten)
4067{
4068 RT_NOREF1(pvUser);
4069 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4070
4071 return RTFileWriteAt(pStorage->File, uOffset, pvBuf, cbWrite, pcbWritten);
4072}
4073
4074/**
4075 * VD async I/O interface callback for a synchronous read from the file.
4076 */
4077static DECLCALLBACK(int) vdIOReadSyncFallback(void *pvUser, void *pvStorage, uint64_t uOffset,
4078 void *pvBuf, size_t cbRead, size_t *pcbRead)
4079{
4080 RT_NOREF1(pvUser);
4081 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4082
4083 return RTFileReadAt(pStorage->File, uOffset, pvBuf, cbRead, pcbRead);
4084}
4085
4086/**
4087 * VD async I/O interface callback for a synchronous flush of the file data.
4088 */
4089static DECLCALLBACK(int) vdIOFlushSyncFallback(void *pvUser, void *pvStorage)
4090{
4091 RT_NOREF1(pvUser);
4092 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4093
4094 return RTFileFlush(pStorage->File);
4095}
4096
4097/**
4098 * VD async I/O interface callback for a asynchronous read from the file.
4099 */
4100static DECLCALLBACK(int) vdIOReadAsyncFallback(void *pvUser, void *pStorage, uint64_t uOffset,
4101 PCRTSGSEG paSegments, size_t cSegments,
4102 size_t cbRead, void *pvCompletion,
4103 void **ppTask)
4104{
4105 RT_NOREF8(pvUser, pStorage, uOffset, paSegments, cSegments, cbRead, pvCompletion, ppTask);
4106 return VERR_NOT_IMPLEMENTED;
4107}
4108
4109/**
4110 * VD async I/O interface callback for a asynchronous write to the file.
4111 */
4112static DECLCALLBACK(int) vdIOWriteAsyncFallback(void *pvUser, void *pStorage, uint64_t uOffset,
4113 PCRTSGSEG paSegments, size_t cSegments,
4114 size_t cbWrite, void *pvCompletion,
4115 void **ppTask)
4116{
4117 RT_NOREF8(pvUser, pStorage, uOffset, paSegments, cSegments, cbWrite, pvCompletion, ppTask);
4118 return VERR_NOT_IMPLEMENTED;
4119}
4120
4121/**
4122 * VD async I/O interface callback for a asynchronous flush of the file data.
4123 */
4124static DECLCALLBACK(int) vdIOFlushAsyncFallback(void *pvUser, void *pStorage,
4125 void *pvCompletion, void **ppTask)
4126{
4127 RT_NOREF4(pvUser, pStorage, pvCompletion, ppTask);
4128 return VERR_NOT_IMPLEMENTED;
4129}
4130
4131/**
4132 * Internal - Continues an I/O context after
4133 * it was halted because of an active transfer.
4134 */
4135static int vdIoCtxContinue(PVDIOCTX pIoCtx, int rcReq)
4136{
4137 PVBOXHDD pDisk = pIoCtx->pDisk;
4138 int rc = VINF_SUCCESS;
4139
4140 VD_IS_LOCKED(pDisk);
4141
4142 if (RT_FAILURE(rcReq))
4143 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rcReq, VINF_SUCCESS);
4144
4145 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
4146 {
4147 /* Continue the transfer */
4148 rc = vdIoCtxProcessLocked(pIoCtx);
4149
4150 if ( rc == VINF_VD_ASYNC_IO_FINISHED
4151 && ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
4152 {
4153 LogFlowFunc(("I/O context completed pIoCtx=%#p\n", pIoCtx));
4154 if (pIoCtx->pIoCtxParent)
4155 {
4156 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
4157
4158 Assert(!pIoCtxParent->pIoCtxParent);
4159 if (RT_FAILURE(pIoCtx->rcReq))
4160 ASMAtomicCmpXchgS32(&pIoCtxParent->rcReq, pIoCtx->rcReq, VINF_SUCCESS);
4161
4162 ASMAtomicDecU32(&pIoCtxParent->cDataTransfersPending);
4163
4164 if (pIoCtx->enmTxDir == VDIOCTXTXDIR_WRITE)
4165 {
4166 LogFlowFunc(("I/O context transferred %u bytes for the parent pIoCtxParent=%p\n",
4167 pIoCtx->Type.Child.cbTransferParent, pIoCtxParent));
4168
4169 /* Update the parent state. */
4170 Assert(pIoCtxParent->Req.Io.cbTransferLeft >= pIoCtx->Type.Child.cbTransferParent);
4171 ASMAtomicSubU32(&pIoCtxParent->Req.Io.cbTransferLeft, (uint32_t)pIoCtx->Type.Child.cbTransferParent);
4172 }
4173 else
4174 Assert(pIoCtx->enmTxDir == VDIOCTXTXDIR_FLUSH);
4175
4176 /*
4177 * A completed child write means that we finished growing the image.
4178 * We have to process any pending writes now.
4179 */
4180 vdIoCtxUnlockDisk(pDisk, pIoCtxParent, false /* fProcessDeferredReqs */);
4181
4182 /* Unblock the parent */
4183 pIoCtxParent->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
4184
4185 rc = vdIoCtxProcessLocked(pIoCtxParent);
4186
4187 if ( rc == VINF_VD_ASYNC_IO_FINISHED
4188 && ASMAtomicCmpXchgBool(&pIoCtxParent->fComplete, true, false))
4189 {
4190 LogFlowFunc(("Parent I/O context completed pIoCtxParent=%#p rcReq=%Rrc\n", pIoCtxParent, pIoCtxParent->rcReq));
4191 vdIoCtxRootComplete(pDisk, pIoCtxParent);
4192 vdThreadFinishWrite(pDisk);
4193 vdIoCtxFree(pDisk, pIoCtxParent);
4194 vdDiskProcessBlockedIoCtx(pDisk);
4195 }
4196 else if (!vdIoCtxIsDiskLockOwner(pDisk, pIoCtx))
4197 {
4198 /* Process any pending writes if the current request didn't caused another growing. */
4199 vdDiskProcessBlockedIoCtx(pDisk);
4200 }
4201 }
4202 else
4203 {
4204 if (pIoCtx->enmTxDir == VDIOCTXTXDIR_FLUSH)
4205 {
4206 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessDerredReqs */);
4207 vdThreadFinishWrite(pDisk);
4208 }
4209 else if ( pIoCtx->enmTxDir == VDIOCTXTXDIR_WRITE
4210 || pIoCtx->enmTxDir == VDIOCTXTXDIR_DISCARD)
4211 vdThreadFinishWrite(pDisk);
4212 else
4213 {
4214 Assert(pIoCtx->enmTxDir == VDIOCTXTXDIR_READ);
4215 vdThreadFinishRead(pDisk);
4216 }
4217
4218 LogFlowFunc(("I/O context completed pIoCtx=%#p rcReq=%Rrc\n", pIoCtx, pIoCtx->rcReq));
4219 vdIoCtxRootComplete(pDisk, pIoCtx);
4220 }
4221
4222 vdIoCtxFree(pDisk, pIoCtx);
4223 }
4224 }
4225
4226 return VINF_SUCCESS;
4227}
4228
4229/**
4230 * Internal - Called when user transfer completed.
4231 */
4232static int vdUserXferCompleted(PVDIOSTORAGE pIoStorage, PVDIOCTX pIoCtx,
4233 PFNVDXFERCOMPLETED pfnComplete, void *pvUser,
4234 size_t cbTransfer, int rcReq)
4235{
4236 int rc = VINF_SUCCESS;
4237 PVBOXHDD pDisk = pIoCtx->pDisk;
4238
4239 LogFlowFunc(("pIoStorage=%#p pIoCtx=%#p pfnComplete=%#p pvUser=%#p cbTransfer=%zu rcReq=%Rrc\n",
4240 pIoStorage, pIoCtx, pfnComplete, pvUser, cbTransfer, rcReq));
4241
4242 VD_IS_LOCKED(pDisk);
4243
4244 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbTransfer);
4245 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTransfer); Assert(cbTransfer == (uint32_t)cbTransfer);
4246 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4247
4248 if (pfnComplete)
4249 rc = pfnComplete(pIoStorage->pVDIo->pBackendData, pIoCtx, pvUser, rcReq);
4250
4251 if (RT_SUCCESS(rc))
4252 rc = vdIoCtxContinue(pIoCtx, rcReq);
4253 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4254 rc = VINF_SUCCESS;
4255
4256 return rc;
4257}
4258
4259static void vdIoCtxContinueDeferredList(PVDIOSTORAGE pIoStorage, PRTLISTANCHOR pListWaiting,
4260 PFNVDXFERCOMPLETED pfnComplete, void *pvUser, int rcReq)
4261{
4262 LogFlowFunc(("pIoStorage=%#p pListWaiting=%#p pfnComplete=%#p pvUser=%#p rcReq=%Rrc\n",
4263 pIoStorage, pListWaiting, pfnComplete, pvUser, rcReq));
4264
4265 /* Go through the waiting list and continue the I/O contexts. */
4266 while (!RTListIsEmpty(pListWaiting))
4267 {
4268 int rc = VINF_SUCCESS;
4269 PVDIOCTXDEFERRED pDeferred = RTListGetFirst(pListWaiting, VDIOCTXDEFERRED, NodeDeferred);
4270 PVDIOCTX pIoCtx = pDeferred->pIoCtx;
4271 RTListNodeRemove(&pDeferred->NodeDeferred);
4272
4273 RTMemFree(pDeferred);
4274 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
4275
4276 if (pfnComplete)
4277 rc = pfnComplete(pIoStorage->pVDIo->pBackendData, pIoCtx, pvUser, rcReq);
4278
4279 LogFlow(("Completion callback for I/O context %#p returned %Rrc\n", pIoCtx, rc));
4280
4281 if (RT_SUCCESS(rc))
4282 {
4283 rc = vdIoCtxContinue(pIoCtx, rcReq);
4284 AssertRC(rc);
4285 }
4286 else
4287 Assert(rc == VERR_VD_ASYNC_IO_IN_PROGRESS);
4288 }
4289}
4290
4291/**
4292 * Internal - Called when a meta transfer completed.
4293 */
4294static int vdMetaXferCompleted(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser,
4295 PVDMETAXFER pMetaXfer, int rcReq)
4296{
4297 PVBOXHDD pDisk = pIoStorage->pVDIo->pDisk;
4298 RTLISTNODE ListIoCtxWaiting;
4299 bool fFlush;
4300
4301 LogFlowFunc(("pIoStorage=%#p pfnComplete=%#p pvUser=%#p pMetaXfer=%#p rcReq=%Rrc\n",
4302 pIoStorage, pfnComplete, pvUser, pMetaXfer, rcReq));
4303
4304 VD_IS_LOCKED(pDisk);
4305
4306 fFlush = VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_FLUSH;
4307
4308 if (!fFlush)
4309 {
4310 RTListMove(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxWaiting);
4311
4312 if (RT_FAILURE(rcReq))
4313 {
4314 /* Remove from the AVL tree. */
4315 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
4316 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
4317 Assert(fRemoved); NOREF(fRemoved);
4318 /* If this was a write check if there is a shadow buffer with updated data. */
4319 if (pMetaXfer->pbDataShw)
4320 {
4321 Assert(VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
4322 Assert(!RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites));
4323 RTListConcatenate(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxShwWrites);
4324 RTMemFree(pMetaXfer->pbDataShw);
4325 pMetaXfer->pbDataShw = NULL;
4326 }
4327 RTMemFree(pMetaXfer);
4328 }
4329 else
4330 {
4331 /* Increase the reference counter to make sure it doesn't go away before the last context is processed. */
4332 pMetaXfer->cRefs++;
4333 }
4334 }
4335 else
4336 RTListMove(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxWaiting);
4337
4338 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
4339 vdIoCtxContinueDeferredList(pIoStorage, &ListIoCtxWaiting, pfnComplete, pvUser, rcReq);
4340
4341 /*
4342 * If there is a shadow buffer and the previous write was successful update with the
4343 * new data and trigger a new write.
4344 */
4345 if ( pMetaXfer->pbDataShw
4346 && RT_SUCCESS(rcReq)
4347 && VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
4348 {
4349 LogFlowFunc(("pMetaXfer=%#p Updating from shadow buffer and triggering new write\n", pMetaXfer));
4350 memcpy(pMetaXfer->abData, pMetaXfer->pbDataShw, pMetaXfer->cbMeta);
4351 RTMemFree(pMetaXfer->pbDataShw);
4352 pMetaXfer->pbDataShw = NULL;
4353 Assert(!RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites));
4354
4355 /* Setup a new I/O write. */
4356 PVDIOTASK pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvUser, pMetaXfer);
4357 if (RT_LIKELY(pIoTask))
4358 {
4359 void *pvTask = NULL;
4360 RTSGSEG Seg;
4361
4362 Seg.cbSeg = pMetaXfer->cbMeta;
4363 Seg.pvSeg = pMetaXfer->abData;
4364
4365 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_WRITE);
4366 rcReq = pIoStorage->pVDIo->pInterfaceIo->pfnWriteAsync(pIoStorage->pVDIo->pInterfaceIo->Core.pvUser,
4367 pIoStorage->pStorage,
4368 pMetaXfer->Core.Key, &Seg, 1,
4369 pMetaXfer->cbMeta, pIoTask,
4370 &pvTask);
4371 if ( RT_SUCCESS(rcReq)
4372 || rcReq != VERR_VD_ASYNC_IO_IN_PROGRESS)
4373 {
4374 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
4375 vdIoTaskFree(pDisk, pIoTask);
4376 }
4377 else
4378 RTListMove(&pMetaXfer->ListIoCtxWaiting, &pMetaXfer->ListIoCtxShwWrites);
4379 }
4380 else
4381 rcReq = VERR_NO_MEMORY;
4382
4383 /* Cleanup if there was an error or the request completed already. */
4384 if (rcReq != VERR_VD_ASYNC_IO_IN_PROGRESS)
4385 vdIoCtxContinueDeferredList(pIoStorage, &pMetaXfer->ListIoCtxShwWrites, pfnComplete, pvUser, rcReq);
4386 }
4387
4388 /* Remove if not used anymore. */
4389 if (!fFlush)
4390 {
4391 pMetaXfer->cRefs--;
4392 if (!pMetaXfer->cRefs && RTListIsEmpty(&pMetaXfer->ListIoCtxWaiting))
4393 {
4394 /* Remove from the AVL tree. */
4395 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
4396 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
4397 Assert(fRemoved); NOREF(fRemoved);
4398 RTMemFree(pMetaXfer);
4399 }
4400 }
4401 else if (fFlush)
4402 RTMemFree(pMetaXfer);
4403
4404 return VINF_SUCCESS;
4405}
4406
4407/**
4408 * Processes a list of waiting I/O tasks. The disk lock must be held by caller.
4409 *
4410 * @returns nothing.
4411 * @param pDisk The disk to process the list for.
4412 */
4413static void vdIoTaskProcessWaitingList(PVBOXHDD pDisk)
4414{
4415 LogFlowFunc(("pDisk=%#p\n", pDisk));
4416
4417 VD_IS_LOCKED(pDisk);
4418
4419 PVDIOTASK pHead = ASMAtomicXchgPtrT(&pDisk->pIoTasksPendingHead, NULL, PVDIOTASK);
4420
4421 Log(("I/O task list cleared\n"));
4422
4423 /* Reverse order. */
4424 PVDIOTASK pCur = pHead;
4425 pHead = NULL;
4426 while (pCur)
4427 {
4428 PVDIOTASK pInsert = pCur;
4429 pCur = pCur->pNext;
4430 pInsert->pNext = pHead;
4431 pHead = pInsert;
4432 }
4433
4434 while (pHead)
4435 {
4436 PVDIOSTORAGE pIoStorage = pHead->pIoStorage;
4437
4438 if (!pHead->fMeta)
4439 vdUserXferCompleted(pIoStorage, pHead->Type.User.pIoCtx,
4440 pHead->pfnComplete, pHead->pvUser,
4441 pHead->Type.User.cbTransfer, pHead->rcReq);
4442 else
4443 vdMetaXferCompleted(pIoStorage, pHead->pfnComplete, pHead->pvUser,
4444 pHead->Type.Meta.pMetaXfer, pHead->rcReq);
4445
4446 pCur = pHead;
4447 pHead = pHead->pNext;
4448 vdIoTaskFree(pDisk, pCur);
4449 }
4450}
4451
4452/**
4453 * Process any I/O context on the halted list.
4454 *
4455 * @returns nothing.
4456 * @param pDisk The disk.
4457 */
4458static void vdIoCtxProcessHaltedList(PVBOXHDD pDisk)
4459{
4460 LogFlowFunc(("pDisk=%#p\n", pDisk));
4461
4462 VD_IS_LOCKED(pDisk);
4463
4464 /* Get the waiting list and process it in FIFO order. */
4465 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxHaltedHead, NULL, PVDIOCTX);
4466
4467 /* Reverse it. */
4468 PVDIOCTX pCur = pIoCtxHead;
4469 pIoCtxHead = NULL;
4470 while (pCur)
4471 {
4472 PVDIOCTX pInsert = pCur;
4473 pCur = pCur->pIoCtxNext;
4474 pInsert->pIoCtxNext = pIoCtxHead;
4475 pIoCtxHead = pInsert;
4476 }
4477
4478 /* Process now. */
4479 pCur = pIoCtxHead;
4480 while (pCur)
4481 {
4482 PVDIOCTX pTmp = pCur;
4483
4484 pCur = pCur->pIoCtxNext;
4485 pTmp->pIoCtxNext = NULL;
4486
4487 /* Continue */
4488 pTmp->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
4489 vdIoCtxContinue(pTmp, pTmp->rcReq);
4490 }
4491}
4492
4493/**
4494 * Unlock the disk and process pending tasks.
4495 *
4496 * @returns VBox status code.
4497 * @param pDisk The disk to unlock.
4498 * @param pIoCtxRc The I/O context to get the status code from, optional.
4499 */
4500static int vdDiskUnlock(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc)
4501{
4502 int rc = VINF_SUCCESS;
4503
4504 VD_IS_LOCKED(pDisk);
4505
4506 /*
4507 * Process the list of waiting I/O tasks first
4508 * because they might complete I/O contexts.
4509 * Same for the list of halted I/O contexts.
4510 * Afterwards comes the list of new I/O contexts.
4511 */
4512 vdIoTaskProcessWaitingList(pDisk);
4513 vdIoCtxProcessHaltedList(pDisk);
4514 rc = vdDiskProcessWaitingIoCtx(pDisk, pIoCtxRc);
4515 ASMAtomicXchgBool(&pDisk->fLocked, false);
4516
4517 /*
4518 * Need to check for new I/O tasks and waiting I/O contexts now
4519 * again as other threads might added them while we processed
4520 * previous lists.
4521 */
4522 while ( ASMAtomicUoReadPtrT(&pDisk->pIoCtxHead, PVDIOCTX) != NULL
4523 || ASMAtomicUoReadPtrT(&pDisk->pIoTasksPendingHead, PVDIOTASK) != NULL
4524 || ASMAtomicUoReadPtrT(&pDisk->pIoCtxHaltedHead, PVDIOCTX) != NULL)
4525 {
4526 /* Try lock disk again. */
4527 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
4528 {
4529 vdIoTaskProcessWaitingList(pDisk);
4530 vdIoCtxProcessHaltedList(pDisk);
4531 vdDiskProcessWaitingIoCtx(pDisk, NULL);
4532 ASMAtomicXchgBool(&pDisk->fLocked, false);
4533 }
4534 else /* Let the other thread everything when he unlocks the disk. */
4535 break;
4536 }
4537
4538 return rc;
4539}
4540
4541/**
4542 * Try to lock the disk to complete pressing of the I/O task.
4543 * The completion is deferred if the disk is locked already.
4544 *
4545 * @returns nothing.
4546 * @param pIoTask The I/O task to complete.
4547 */
4548static void vdXferTryLockDiskDeferIoTask(PVDIOTASK pIoTask)
4549{
4550 PVDIOSTORAGE pIoStorage = pIoTask->pIoStorage;
4551 PVBOXHDD pDisk = pIoStorage->pVDIo->pDisk;
4552
4553 Log(("Deferring I/O task pIoTask=%p\n", pIoTask));
4554
4555 /* Put it on the waiting list. */
4556 PVDIOTASK pNext = ASMAtomicUoReadPtrT(&pDisk->pIoTasksPendingHead, PVDIOTASK);
4557 PVDIOTASK pHeadOld;
4558 pIoTask->pNext = pNext;
4559 while (!ASMAtomicCmpXchgExPtr(&pDisk->pIoTasksPendingHead, pIoTask, pNext, &pHeadOld))
4560 {
4561 pNext = pHeadOld;
4562 Assert(pNext != pIoTask);
4563 pIoTask->pNext = pNext;
4564 ASMNopPause();
4565 }
4566
4567 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
4568 {
4569 /* Release disk lock, it will take care of processing all lists. */
4570 vdDiskUnlock(pDisk, NULL);
4571 }
4572}
4573
4574static DECLCALLBACK(int) vdIOIntReqCompleted(void *pvUser, int rcReq)
4575{
4576 PVDIOTASK pIoTask = (PVDIOTASK)pvUser;
4577
4578 LogFlowFunc(("Task completed pIoTask=%#p\n", pIoTask));
4579
4580 pIoTask->rcReq = rcReq;
4581 vdXferTryLockDiskDeferIoTask(pIoTask);
4582 return VINF_SUCCESS;
4583}
4584
4585/**
4586 * VD I/O interface callback for opening a file.
4587 */
4588static DECLCALLBACK(int) vdIOIntOpen(void *pvUser, const char *pszLocation,
4589 unsigned uOpenFlags, PPVDIOSTORAGE ppIoStorage)
4590{
4591 int rc = VINF_SUCCESS;
4592 PVDIO pVDIo = (PVDIO)pvUser;
4593 PVDIOSTORAGE pIoStorage = (PVDIOSTORAGE)RTMemAllocZ(sizeof(VDIOSTORAGE));
4594
4595 if (!pIoStorage)
4596 return VERR_NO_MEMORY;
4597
4598 /* Create the AVl tree. */
4599 pIoStorage->pTreeMetaXfers = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE));
4600 if (pIoStorage->pTreeMetaXfers)
4601 {
4602 rc = pVDIo->pInterfaceIo->pfnOpen(pVDIo->pInterfaceIo->Core.pvUser,
4603 pszLocation, uOpenFlags,
4604 vdIOIntReqCompleted,
4605 &pIoStorage->pStorage);
4606 if (RT_SUCCESS(rc))
4607 {
4608 pIoStorage->pVDIo = pVDIo;
4609 *ppIoStorage = pIoStorage;
4610 return VINF_SUCCESS;
4611 }
4612
4613 RTMemFree(pIoStorage->pTreeMetaXfers);
4614 }
4615 else
4616 rc = VERR_NO_MEMORY;
4617
4618 RTMemFree(pIoStorage);
4619 return rc;
4620}
4621
4622static DECLCALLBACK(int) vdIOIntTreeMetaXferDestroy(PAVLRFOFFNODECORE pNode, void *pvUser)
4623{
4624 RT_NOREF2(pNode, pvUser);
4625 AssertMsgFailed(("Tree should be empty at this point!\n"));
4626 return VINF_SUCCESS;
4627}
4628
4629static DECLCALLBACK(int) vdIOIntClose(void *pvUser, PVDIOSTORAGE pIoStorage)
4630{
4631 int rc = VINF_SUCCESS;
4632 PVDIO pVDIo = (PVDIO)pvUser;
4633
4634 /* We free everything here, even if closing the file failed for some reason. */
4635 rc = pVDIo->pInterfaceIo->pfnClose(pVDIo->pInterfaceIo->Core.pvUser, pIoStorage->pStorage);
4636 RTAvlrFileOffsetDestroy(pIoStorage->pTreeMetaXfers, vdIOIntTreeMetaXferDestroy, NULL);
4637 RTMemFree(pIoStorage->pTreeMetaXfers);
4638 RTMemFree(pIoStorage);
4639 return rc;
4640}
4641
4642static DECLCALLBACK(int) vdIOIntDelete(void *pvUser, const char *pcszFilename)
4643{
4644 PVDIO pVDIo = (PVDIO)pvUser;
4645 return pVDIo->pInterfaceIo->pfnDelete(pVDIo->pInterfaceIo->Core.pvUser,
4646 pcszFilename);
4647}
4648
4649static DECLCALLBACK(int) vdIOIntMove(void *pvUser, const char *pcszSrc, const char *pcszDst,
4650 unsigned fMove)
4651{
4652 PVDIO pVDIo = (PVDIO)pvUser;
4653 return pVDIo->pInterfaceIo->pfnMove(pVDIo->pInterfaceIo->Core.pvUser,
4654 pcszSrc, pcszDst, fMove);
4655}
4656
4657static DECLCALLBACK(int) vdIOIntGetFreeSpace(void *pvUser, const char *pcszFilename,
4658 int64_t *pcbFreeSpace)
4659{
4660 PVDIO pVDIo = (PVDIO)pvUser;
4661 return pVDIo->pInterfaceIo->pfnGetFreeSpace(pVDIo->pInterfaceIo->Core.pvUser,
4662 pcszFilename, pcbFreeSpace);
4663}
4664
4665static DECLCALLBACK(int) vdIOIntGetModificationTime(void *pvUser, const char *pcszFilename,
4666 PRTTIMESPEC pModificationTime)
4667{
4668 PVDIO pVDIo = (PVDIO)pvUser;
4669 return pVDIo->pInterfaceIo->pfnGetModificationTime(pVDIo->pInterfaceIo->Core.pvUser,
4670 pcszFilename, pModificationTime);
4671}
4672
4673static DECLCALLBACK(int) vdIOIntGetSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4674 uint64_t *pcbSize)
4675{
4676 PVDIO pVDIo = (PVDIO)pvUser;
4677 return pVDIo->pInterfaceIo->pfnGetSize(pVDIo->pInterfaceIo->Core.pvUser,
4678 pIoStorage->pStorage, pcbSize);
4679}
4680
4681static DECLCALLBACK(int) vdIOIntSetSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4682 uint64_t cbSize)
4683{
4684 PVDIO pVDIo = (PVDIO)pvUser;
4685 return pVDIo->pInterfaceIo->pfnSetSize(pVDIo->pInterfaceIo->Core.pvUser,
4686 pIoStorage->pStorage, cbSize);
4687}
4688
4689static DECLCALLBACK(int) vdIOIntSetAllocationSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4690 uint64_t cbSize, uint32_t fFlags,
4691 PVDINTERFACEPROGRESS pIfProgress,
4692 unsigned uPercentStart, unsigned uPercentSpan)
4693{
4694 PVDIO pVDIo = (PVDIO)pvUser;
4695 int rc = pVDIo->pInterfaceIo->pfnSetAllocationSize(pVDIo->pInterfaceIo->Core.pvUser,
4696 pIoStorage->pStorage, cbSize, fFlags);
4697 if (rc == VERR_NOT_SUPPORTED)
4698 {
4699 /* Fallback if the underlying medium does not support optimized storage allocation. */
4700 uint64_t cbSizeCur = 0;
4701 rc = pVDIo->pInterfaceIo->pfnGetSize(pVDIo->pInterfaceIo->Core.pvUser,
4702 pIoStorage->pStorage, &cbSizeCur);
4703 if (RT_SUCCESS(rc))
4704 {
4705 if (cbSizeCur < cbSize)
4706 {
4707 const size_t cbBuf = 128 * _1K;
4708 void *pvBuf = RTMemTmpAllocZ(cbBuf);
4709 if (RT_LIKELY(pvBuf))
4710 {
4711 uint64_t cbFill = cbSize - cbSizeCur;
4712 uint64_t uOff = 0;
4713
4714 /* Write data to all blocks. */
4715 while ( uOff < cbFill
4716 && RT_SUCCESS(rc))
4717 {
4718 size_t cbChunk = (size_t)RT_MIN(cbFill - uOff, cbBuf);
4719
4720 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
4721 pIoStorage->pStorage, cbSizeCur + uOff,
4722 pvBuf, cbChunk, NULL);
4723 if (RT_SUCCESS(rc))
4724 {
4725 uOff += cbChunk;
4726
4727 rc = vdIfProgress(pIfProgress, uPercentStart + uOff * uPercentSpan / cbFill);
4728 }
4729 }
4730
4731 RTMemTmpFree(pvBuf);
4732 }
4733 else
4734 rc = VERR_NO_MEMORY;
4735 }
4736 else if (cbSizeCur > cbSize)
4737 rc = pVDIo->pInterfaceIo->pfnSetSize(pVDIo->pInterfaceIo->Core.pvUser,
4738 pIoStorage->pStorage, cbSize);
4739 }
4740 }
4741
4742 if (RT_SUCCESS(rc))
4743 rc = vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
4744
4745 return rc;
4746}
4747
4748static DECLCALLBACK(int) vdIOIntReadUser(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4749 PVDIOCTX pIoCtx, size_t cbRead)
4750{
4751 int rc = VINF_SUCCESS;
4752 PVDIO pVDIo = (PVDIO)pvUser;
4753 PVBOXHDD pDisk = pVDIo->pDisk;
4754
4755 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pIoCtx=%#p cbRead=%u\n",
4756 pvUser, pIoStorage, uOffset, pIoCtx, cbRead));
4757
4758 /** @todo Enable check for sync I/O later. */
4759 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4760 VD_IS_LOCKED(pDisk);
4761
4762 Assert(cbRead > 0);
4763
4764 if (pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4765 {
4766 RTSGSEG Seg;
4767 unsigned cSegments = 1;
4768 size_t cbTaskRead = 0;
4769
4770 /* Synchronous I/O contexts only have one buffer segment. */
4771 AssertMsgReturn(pIoCtx->Req.Io.SgBuf.cSegs == 1,
4772 ("Invalid number of buffer segments for synchronous I/O context"),
4773 VERR_INVALID_PARAMETER);
4774
4775 cbTaskRead = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, &Seg, &cSegments, cbRead);
4776 Assert(cbRead == cbTaskRead);
4777 Assert(cSegments == 1);
4778 rc = pVDIo->pInterfaceIo->pfnReadSync(pVDIo->pInterfaceIo->Core.pvUser,
4779 pIoStorage->pStorage, uOffset,
4780 Seg.pvSeg, cbRead, NULL);
4781 if (RT_SUCCESS(rc))
4782 {
4783 Assert(cbRead == (uint32_t)cbRead);
4784 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbRead);
4785 }
4786 }
4787 else
4788 {
4789 /* Build the S/G array and spawn a new I/O task */
4790 while (cbRead)
4791 {
4792 RTSGSEG aSeg[VD_IO_TASK_SEGMENTS_MAX];
4793 unsigned cSegments = VD_IO_TASK_SEGMENTS_MAX;
4794 size_t cbTaskRead = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, aSeg, &cSegments, cbRead);
4795
4796 Assert(cSegments > 0);
4797 Assert(cbTaskRead > 0);
4798 AssertMsg(cbTaskRead <= cbRead, ("Invalid number of bytes to read\n"));
4799
4800 LogFlow(("Reading %u bytes into %u segments\n", cbTaskRead, cSegments));
4801
4802#ifdef RT_STRICT
4803 for (unsigned i = 0; i < cSegments; i++)
4804 AssertMsg(aSeg[i].pvSeg && !(aSeg[i].cbSeg % 512),
4805 ("Segment %u is invalid\n", i));
4806#endif
4807
4808 Assert(cbTaskRead == (uint32_t)cbTaskRead);
4809 PVDIOTASK pIoTask = vdIoTaskUserAlloc(pIoStorage, NULL, NULL, pIoCtx, (uint32_t)cbTaskRead);
4810
4811 if (!pIoTask)
4812 return VERR_NO_MEMORY;
4813
4814 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
4815
4816 void *pvTask;
4817 Log(("Spawning pIoTask=%p pIoCtx=%p\n", pIoTask, pIoCtx));
4818 rc = pVDIo->pInterfaceIo->pfnReadAsync(pVDIo->pInterfaceIo->Core.pvUser,
4819 pIoStorage->pStorage, uOffset,
4820 aSeg, cSegments, cbTaskRead, pIoTask,
4821 &pvTask);
4822 if (RT_SUCCESS(rc))
4823 {
4824 AssertMsg(cbTaskRead <= pIoCtx->Req.Io.cbTransferLeft, ("Impossible!\n"));
4825 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTaskRead);
4826 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4827 vdIoTaskFree(pDisk, pIoTask);
4828 }
4829 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
4830 {
4831 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4832 vdIoTaskFree(pDisk, pIoTask);
4833 break;
4834 }
4835
4836 uOffset += cbTaskRead;
4837 cbRead -= cbTaskRead;
4838 }
4839 }
4840
4841 LogFlowFunc(("returns rc=%Rrc\n", rc));
4842 return rc;
4843}
4844
4845static DECLCALLBACK(int) vdIOIntWriteUser(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4846 PVDIOCTX pIoCtx, size_t cbWrite, PFNVDXFERCOMPLETED pfnComplete,
4847 void *pvCompleteUser)
4848{
4849 int rc = VINF_SUCCESS;
4850 PVDIO pVDIo = (PVDIO)pvUser;
4851 PVBOXHDD pDisk = pVDIo->pDisk;
4852
4853 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pIoCtx=%#p cbWrite=%u\n",
4854 pvUser, pIoStorage, uOffset, pIoCtx, cbWrite));
4855
4856 /** @todo Enable check for sync I/O later. */
4857 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4858 VD_IS_LOCKED(pDisk);
4859
4860 Assert(cbWrite > 0);
4861
4862 if (pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4863 {
4864 RTSGSEG Seg;
4865 unsigned cSegments = 1;
4866 size_t cbTaskWrite = 0;
4867
4868 /* Synchronous I/O contexts only have one buffer segment. */
4869 AssertMsgReturn(pIoCtx->Req.Io.SgBuf.cSegs == 1,
4870 ("Invalid number of buffer segments for synchronous I/O context"),
4871 VERR_INVALID_PARAMETER);
4872
4873 cbTaskWrite = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, &Seg, &cSegments, cbWrite);
4874 Assert(cbWrite == cbTaskWrite);
4875 Assert(cSegments == 1);
4876 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
4877 pIoStorage->pStorage, uOffset,
4878 Seg.pvSeg, cbWrite, NULL);
4879 if (RT_SUCCESS(rc))
4880 {
4881 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbWrite);
4882 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbWrite);
4883 }
4884 }
4885 else
4886 {
4887 /* Build the S/G array and spawn a new I/O task */
4888 while (cbWrite)
4889 {
4890 RTSGSEG aSeg[VD_IO_TASK_SEGMENTS_MAX];
4891 unsigned cSegments = VD_IO_TASK_SEGMENTS_MAX;
4892 size_t cbTaskWrite = 0;
4893
4894 cbTaskWrite = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, aSeg, &cSegments, cbWrite);
4895
4896 Assert(cSegments > 0);
4897 Assert(cbTaskWrite > 0);
4898 AssertMsg(cbTaskWrite <= cbWrite, ("Invalid number of bytes to write\n"));
4899
4900 LogFlow(("Writing %u bytes from %u segments\n", cbTaskWrite, cSegments));
4901
4902#ifdef DEBUG
4903 for (unsigned i = 0; i < cSegments; i++)
4904 AssertMsg(aSeg[i].pvSeg && !(aSeg[i].cbSeg % 512),
4905 ("Segment %u is invalid\n", i));
4906#endif
4907
4908 Assert(cbTaskWrite == (uint32_t)cbTaskWrite);
4909 PVDIOTASK pIoTask = vdIoTaskUserAlloc(pIoStorage, pfnComplete, pvCompleteUser, pIoCtx, (uint32_t)cbTaskWrite);
4910
4911 if (!pIoTask)
4912 return VERR_NO_MEMORY;
4913
4914 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
4915
4916 void *pvTask;
4917 Log(("Spawning pIoTask=%p pIoCtx=%p\n", pIoTask, pIoCtx));
4918 rc = pVDIo->pInterfaceIo->pfnWriteAsync(pVDIo->pInterfaceIo->Core.pvUser,
4919 pIoStorage->pStorage,
4920 uOffset, aSeg, cSegments,
4921 cbTaskWrite, pIoTask, &pvTask);
4922 if (RT_SUCCESS(rc))
4923 {
4924 AssertMsg(cbTaskWrite <= pIoCtx->Req.Io.cbTransferLeft, ("Impossible!\n"));
4925 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTaskWrite);
4926 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4927 vdIoTaskFree(pDisk, pIoTask);
4928 }
4929 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
4930 {
4931 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4932 vdIoTaskFree(pDisk, pIoTask);
4933 break;
4934 }
4935
4936 uOffset += cbTaskWrite;
4937 cbWrite -= cbTaskWrite;
4938 }
4939 }
4940
4941 LogFlowFunc(("returns rc=%Rrc\n", rc));
4942 return rc;
4943}
4944
4945static DECLCALLBACK(int) vdIOIntReadMeta(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4946 void *pvBuf, size_t cbRead, PVDIOCTX pIoCtx,
4947 PPVDMETAXFER ppMetaXfer, PFNVDXFERCOMPLETED pfnComplete,
4948 void *pvCompleteUser)
4949{
4950 PVDIO pVDIo = (PVDIO)pvUser;
4951 PVBOXHDD pDisk = pVDIo->pDisk;
4952 int rc = VINF_SUCCESS;
4953 RTSGSEG Seg;
4954 PVDIOTASK pIoTask;
4955 PVDMETAXFER pMetaXfer = NULL;
4956 void *pvTask = NULL;
4957
4958 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pvBuf=%#p cbRead=%u\n",
4959 pvUser, pIoStorage, uOffset, pvBuf, cbRead));
4960
4961 AssertMsgReturn( pIoCtx
4962 || (!ppMetaXfer && !pfnComplete && !pvCompleteUser),
4963 ("A synchronous metadata read is requested but the parameters are wrong\n"),
4964 VERR_INVALID_POINTER);
4965
4966 /** @todo Enable check for sync I/O later. */
4967 if ( pIoCtx
4968 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4969 VD_IS_LOCKED(pDisk);
4970
4971 if ( !pIoCtx
4972 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4973 {
4974 /* Handle synchronous metadata I/O. */
4975 /** @todo Integrate with metadata transfers below. */
4976 rc = pVDIo->pInterfaceIo->pfnReadSync(pVDIo->pInterfaceIo->Core.pvUser,
4977 pIoStorage->pStorage, uOffset,
4978 pvBuf, cbRead, NULL);
4979 if (ppMetaXfer)
4980 *ppMetaXfer = NULL;
4981 }
4982 else
4983 {
4984 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGet(pIoStorage->pTreeMetaXfers, uOffset);
4985 if (!pMetaXfer)
4986 {
4987#ifdef RT_STRICT
4988 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGetBestFit(pIoStorage->pTreeMetaXfers, uOffset, false /* fAbove */);
4989 AssertMsg(!pMetaXfer || (pMetaXfer->Core.Key + (RTFOFF)pMetaXfer->cbMeta <= (RTFOFF)uOffset),
4990 ("Overlapping meta transfers!\n"));
4991#endif
4992
4993 /* Allocate a new meta transfer. */
4994 pMetaXfer = vdMetaXferAlloc(pIoStorage, uOffset, cbRead);
4995 if (!pMetaXfer)
4996 return VERR_NO_MEMORY;
4997
4998 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvCompleteUser, pMetaXfer);
4999 if (!pIoTask)
5000 {
5001 RTMemFree(pMetaXfer);
5002 return VERR_NO_MEMORY;
5003 }
5004
5005 Seg.cbSeg = cbRead;
5006 Seg.pvSeg = pMetaXfer->abData;
5007
5008 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_READ);
5009 rc = pVDIo->pInterfaceIo->pfnReadAsync(pVDIo->pInterfaceIo->Core.pvUser,
5010 pIoStorage->pStorage,
5011 uOffset, &Seg, 1,
5012 cbRead, pIoTask, &pvTask);
5013
5014 if (RT_SUCCESS(rc) || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5015 {
5016 bool fInserted = RTAvlrFileOffsetInsert(pIoStorage->pTreeMetaXfers, &pMetaXfer->Core);
5017 Assert(fInserted); NOREF(fInserted);
5018 }
5019 else
5020 RTMemFree(pMetaXfer);
5021
5022 if (RT_SUCCESS(rc))
5023 {
5024 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5025 vdIoTaskFree(pDisk, pIoTask);
5026 }
5027 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS && !pfnComplete)
5028 rc = VERR_VD_NOT_ENOUGH_METADATA;
5029 }
5030
5031 Assert(VALID_PTR(pMetaXfer) || RT_FAILURE(rc));
5032
5033 if (RT_SUCCESS(rc) || rc == VERR_VD_NOT_ENOUGH_METADATA || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5034 {
5035 /* If it is pending add the request to the list. */
5036 if (VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_READ)
5037 {
5038 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5039 AssertPtr(pDeferred);
5040
5041 RTListInit(&pDeferred->NodeDeferred);
5042 pDeferred->pIoCtx = pIoCtx;
5043
5044 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5045 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5046 rc = VERR_VD_NOT_ENOUGH_METADATA;
5047 }
5048 else
5049 {
5050 /* Transfer the data. */
5051 pMetaXfer->cRefs++;
5052 Assert(pMetaXfer->cbMeta >= cbRead);
5053 Assert(pMetaXfer->Core.Key == (RTFOFF)uOffset);
5054 if (pMetaXfer->pbDataShw)
5055 memcpy(pvBuf, pMetaXfer->pbDataShw, cbRead);
5056 else
5057 memcpy(pvBuf, pMetaXfer->abData, cbRead);
5058 *ppMetaXfer = pMetaXfer;
5059 }
5060 }
5061 }
5062
5063 LogFlowFunc(("returns rc=%Rrc\n", rc));
5064 return rc;
5065}
5066
5067static DECLCALLBACK(int) vdIOIntWriteMeta(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
5068 const void *pvBuf, size_t cbWrite, PVDIOCTX pIoCtx,
5069 PFNVDXFERCOMPLETED pfnComplete, void *pvCompleteUser)
5070{
5071 PVDIO pVDIo = (PVDIO)pvUser;
5072 PVBOXHDD pDisk = pVDIo->pDisk;
5073 int rc = VINF_SUCCESS;
5074 RTSGSEG Seg;
5075 PVDIOTASK pIoTask;
5076 PVDMETAXFER pMetaXfer = NULL;
5077 bool fInTree = false;
5078 void *pvTask = NULL;
5079
5080 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pvBuf=%#p cbWrite=%u\n",
5081 pvUser, pIoStorage, uOffset, pvBuf, cbWrite));
5082
5083 AssertMsgReturn( pIoCtx
5084 || (!pfnComplete && !pvCompleteUser),
5085 ("A synchronous metadata write is requested but the parameters are wrong\n"),
5086 VERR_INVALID_POINTER);
5087
5088 /** @todo Enable check for sync I/O later. */
5089 if ( pIoCtx
5090 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5091 VD_IS_LOCKED(pDisk);
5092
5093 if ( !pIoCtx
5094 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
5095 {
5096 /* Handle synchronous metadata I/O. */
5097 /** @todo Integrate with metadata transfers below. */
5098 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
5099 pIoStorage->pStorage, uOffset,
5100 pvBuf, cbWrite, NULL);
5101 }
5102 else
5103 {
5104 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGet(pIoStorage->pTreeMetaXfers, uOffset);
5105 if (!pMetaXfer)
5106 {
5107 /* Allocate a new meta transfer. */
5108 pMetaXfer = vdMetaXferAlloc(pIoStorage, uOffset, cbWrite);
5109 if (!pMetaXfer)
5110 return VERR_NO_MEMORY;
5111 }
5112 else
5113 {
5114 Assert(pMetaXfer->cbMeta >= cbWrite);
5115 Assert(pMetaXfer->Core.Key == (RTFOFF)uOffset);
5116 fInTree = true;
5117 }
5118
5119 if (VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
5120 {
5121 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvCompleteUser, pMetaXfer);
5122 if (!pIoTask)
5123 {
5124 RTMemFree(pMetaXfer);
5125 return VERR_NO_MEMORY;
5126 }
5127
5128 memcpy(pMetaXfer->abData, pvBuf, cbWrite);
5129 Seg.cbSeg = cbWrite;
5130 Seg.pvSeg = pMetaXfer->abData;
5131
5132 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5133
5134 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_WRITE);
5135 rc = pVDIo->pInterfaceIo->pfnWriteAsync(pVDIo->pInterfaceIo->Core.pvUser,
5136 pIoStorage->pStorage,
5137 uOffset, &Seg, 1, cbWrite, pIoTask,
5138 &pvTask);
5139 if (RT_SUCCESS(rc))
5140 {
5141 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5142 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
5143 vdIoTaskFree(pDisk, pIoTask);
5144 if (fInTree && !pMetaXfer->cRefs)
5145 {
5146 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
5147 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
5148 AssertMsg(fRemoved, ("Metadata transfer wasn't removed\n")); NOREF(fRemoved);
5149 RTMemFree(pMetaXfer);
5150 pMetaXfer = NULL;
5151 }
5152 }
5153 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5154 {
5155 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5156 AssertPtr(pDeferred);
5157
5158 RTListInit(&pDeferred->NodeDeferred);
5159 pDeferred->pIoCtx = pIoCtx;
5160
5161 if (!fInTree)
5162 {
5163 bool fInserted = RTAvlrFileOffsetInsert(pIoStorage->pTreeMetaXfers, &pMetaXfer->Core);
5164 Assert(fInserted); NOREF(fInserted);
5165 }
5166
5167 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5168 }
5169 else
5170 {
5171 RTMemFree(pMetaXfer);
5172 pMetaXfer = NULL;
5173 }
5174 }
5175 else
5176 {
5177 /* I/O is in progress, update shadow buffer and add to waiting list. */
5178 Assert(VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
5179 if (!pMetaXfer->pbDataShw)
5180 {
5181 /* Allocate shadow buffer and set initial state. */
5182 LogFlowFunc(("pMetaXfer=%#p Creating shadow buffer\n", pMetaXfer));
5183 pMetaXfer->pbDataShw = (uint8_t *)RTMemAlloc(pMetaXfer->cbMeta);
5184 if (RT_LIKELY(pMetaXfer->pbDataShw))
5185 memcpy(pMetaXfer->pbDataShw, pMetaXfer->abData, pMetaXfer->cbMeta);
5186 else
5187 rc = VERR_NO_MEMORY;
5188 }
5189
5190 if (RT_SUCCESS(rc))
5191 {
5192 /* Update with written data and append to waiting list. */
5193 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5194 if (pDeferred)
5195 {
5196 LogFlowFunc(("pMetaXfer=%#p Updating shadow buffer\n", pMetaXfer));
5197
5198 RTListInit(&pDeferred->NodeDeferred);
5199 pDeferred->pIoCtx = pIoCtx;
5200 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5201 memcpy(pMetaXfer->pbDataShw, pvBuf, cbWrite);
5202 RTListAppend(&pMetaXfer->ListIoCtxShwWrites, &pDeferred->NodeDeferred);
5203 }
5204 else
5205 {
5206 /*
5207 * Free shadow buffer if there is no one depending on it, i.e.
5208 * we just allocated it.
5209 */
5210 if (RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites))
5211 {
5212 RTMemFree(pMetaXfer->pbDataShw);
5213 pMetaXfer->pbDataShw = NULL;
5214 }
5215 rc = VERR_NO_MEMORY;
5216 }
5217 }
5218 }
5219 }
5220
5221 LogFlowFunc(("returns rc=%Rrc\n", rc));
5222 return rc;
5223}
5224
5225static DECLCALLBACK(void) vdIOIntMetaXferRelease(void *pvUser, PVDMETAXFER pMetaXfer)
5226{
5227 PVDIO pVDIo = (PVDIO)pvUser;
5228 PVBOXHDD pDisk = pVDIo->pDisk;
5229 PVDIOSTORAGE pIoStorage;
5230
5231 /*
5232 * It is possible that we get called with a NULL metadata xfer handle
5233 * for synchronous I/O. Just exit.
5234 */
5235 if (!pMetaXfer)
5236 return;
5237
5238 pIoStorage = pMetaXfer->pIoStorage;
5239
5240 VD_IS_LOCKED(pDisk);
5241
5242 Assert( VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE
5243 || VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
5244 Assert(pMetaXfer->cRefs > 0);
5245
5246 pMetaXfer->cRefs--;
5247 if ( !pMetaXfer->cRefs
5248 && RTListIsEmpty(&pMetaXfer->ListIoCtxWaiting)
5249 && VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
5250 {
5251 /* Free the meta data entry. */
5252 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
5253 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
5254 AssertMsg(fRemoved, ("Metadata transfer wasn't removed\n")); NOREF(fRemoved);
5255
5256 RTMemFree(pMetaXfer);
5257 }
5258}
5259
5260static DECLCALLBACK(int) vdIOIntFlush(void *pvUser, PVDIOSTORAGE pIoStorage, PVDIOCTX pIoCtx,
5261 PFNVDXFERCOMPLETED pfnComplete, void *pvCompleteUser)
5262{
5263 PVDIO pVDIo = (PVDIO)pvUser;
5264 PVBOXHDD pDisk = pVDIo->pDisk;
5265 int rc = VINF_SUCCESS;
5266 PVDIOTASK pIoTask;
5267 PVDMETAXFER pMetaXfer = NULL;
5268 void *pvTask = NULL;
5269
5270 LogFlowFunc(("pvUser=%#p pIoStorage=%#p pIoCtx=%#p\n",
5271 pvUser, pIoStorage, pIoCtx));
5272
5273 AssertMsgReturn( pIoCtx
5274 || (!pfnComplete && !pvCompleteUser),
5275 ("A synchronous metadata write is requested but the parameters are wrong\n"),
5276 VERR_INVALID_POINTER);
5277
5278 /** @todo Enable check for sync I/O later. */
5279 if ( pIoCtx
5280 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5281 VD_IS_LOCKED(pDisk);
5282
5283 if (pVDIo->fIgnoreFlush)
5284 return VINF_SUCCESS;
5285
5286 if ( !pIoCtx
5287 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
5288 {
5289 /* Handle synchronous flushes. */
5290 /** @todo Integrate with metadata transfers below. */
5291 rc = pVDIo->pInterfaceIo->pfnFlushSync(pVDIo->pInterfaceIo->Core.pvUser,
5292 pIoStorage->pStorage);
5293 }
5294 else
5295 {
5296 /* Allocate a new meta transfer. */
5297 pMetaXfer = vdMetaXferAlloc(pIoStorage, 0, 0);
5298 if (!pMetaXfer)
5299 return VERR_NO_MEMORY;
5300
5301 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvUser, pMetaXfer);
5302 if (!pIoTask)
5303 {
5304 RTMemFree(pMetaXfer);
5305 return VERR_NO_MEMORY;
5306 }
5307
5308 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5309
5310 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5311 AssertPtr(pDeferred);
5312
5313 RTListInit(&pDeferred->NodeDeferred);
5314 pDeferred->pIoCtx = pIoCtx;
5315
5316 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5317 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_FLUSH);
5318 rc = pVDIo->pInterfaceIo->pfnFlushAsync(pVDIo->pInterfaceIo->Core.pvUser,
5319 pIoStorage->pStorage,
5320 pIoTask, &pvTask);
5321 if (RT_SUCCESS(rc))
5322 {
5323 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5324 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
5325 vdIoTaskFree(pDisk, pIoTask);
5326 RTMemFree(pDeferred);
5327 RTMemFree(pMetaXfer);
5328 }
5329 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
5330 RTMemFree(pMetaXfer);
5331 }
5332
5333 LogFlowFunc(("returns rc=%Rrc\n", rc));
5334 return rc;
5335}
5336
5337static DECLCALLBACK(size_t) vdIOIntIoCtxCopyTo(void *pvUser, PVDIOCTX pIoCtx,
5338 const void *pvBuf, size_t cbBuf)
5339{
5340 PVDIO pVDIo = (PVDIO)pvUser;
5341 PVBOXHDD pDisk = pVDIo->pDisk;
5342 size_t cbCopied = 0;
5343
5344 /** @todo Enable check for sync I/O later. */
5345 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5346 VD_IS_LOCKED(pDisk);
5347
5348 cbCopied = vdIoCtxCopyTo(pIoCtx, (uint8_t *)pvBuf, cbBuf);
5349 Assert(cbCopied == cbBuf);
5350
5351 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft >= cbCopied); - triggers with vdCopyHelper/dmgRead.
5352 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCopied);
5353
5354 return cbCopied;
5355}
5356
5357static DECLCALLBACK(size_t) vdIOIntIoCtxCopyFrom(void *pvUser, PVDIOCTX pIoCtx,
5358 void *pvBuf, size_t cbBuf)
5359{
5360 PVDIO pVDIo = (PVDIO)pvUser;
5361 PVBOXHDD pDisk = pVDIo->pDisk;
5362 size_t cbCopied = 0;
5363
5364 /** @todo Enable check for sync I/O later. */
5365 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5366 VD_IS_LOCKED(pDisk);
5367
5368 cbCopied = vdIoCtxCopyFrom(pIoCtx, (uint8_t *)pvBuf, cbBuf);
5369 Assert(cbCopied == cbBuf);
5370
5371 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft > cbCopied); - triggers with vdCopyHelper/dmgRead.
5372 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCopied);
5373
5374 return cbCopied;
5375}
5376
5377static DECLCALLBACK(size_t) vdIOIntIoCtxSet(void *pvUser, PVDIOCTX pIoCtx, int ch, size_t cb)
5378{
5379 PVDIO pVDIo = (PVDIO)pvUser;
5380 PVBOXHDD pDisk = pVDIo->pDisk;
5381 size_t cbSet = 0;
5382
5383 /** @todo Enable check for sync I/O later. */
5384 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5385 VD_IS_LOCKED(pDisk);
5386
5387 cbSet = vdIoCtxSet(pIoCtx, ch, cb);
5388 Assert(cbSet == cb);
5389
5390 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft >= cbSet); - triggers with vdCopyHelper/dmgRead.
5391 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbSet);
5392
5393 return cbSet;
5394}
5395
5396static DECLCALLBACK(size_t) vdIOIntIoCtxSegArrayCreate(void *pvUser, PVDIOCTX pIoCtx,
5397 PRTSGSEG paSeg, unsigned *pcSeg,
5398 size_t cbData)
5399{
5400 PVDIO pVDIo = (PVDIO)pvUser;
5401 PVBOXHDD pDisk = pVDIo->pDisk;
5402 size_t cbCreated = 0;
5403
5404 /** @todo It is possible that this gets called from a filter plugin
5405 * outside of the disk lock. Refine assertion or remove completely. */
5406#if 0
5407 /** @todo Enable check for sync I/O later. */
5408 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5409 VD_IS_LOCKED(pDisk);
5410#else
5411 NOREF(pDisk);
5412#endif
5413
5414 cbCreated = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, paSeg, pcSeg, cbData);
5415 Assert(!paSeg || cbData == cbCreated);
5416
5417 return cbCreated;
5418}
5419
5420static DECLCALLBACK(void) vdIOIntIoCtxCompleted(void *pvUser, PVDIOCTX pIoCtx, int rcReq,
5421 size_t cbCompleted)
5422{
5423 PVDIO pVDIo = (PVDIO)pvUser;
5424 PVBOXHDD pDisk = pVDIo->pDisk;
5425
5426 LogFlowFunc(("pvUser=%#p pIoCtx=%#p rcReq=%Rrc cbCompleted=%zu\n",
5427 pvUser, pIoCtx, rcReq, cbCompleted));
5428
5429 /*
5430 * Grab the disk critical section to avoid races with other threads which
5431 * might still modify the I/O context.
5432 * Example is that iSCSI is doing an asynchronous write but calls us already
5433 * while the other thread is still hanging in vdWriteHelperAsync and couldn't update
5434 * the blocked state yet.
5435 * It can overwrite the state to true before we call vdIoCtxContinue and the
5436 * the request would hang indefinite.
5437 */
5438 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rcReq, VINF_SUCCESS);
5439 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbCompleted);
5440 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCompleted);
5441
5442 /* Set next transfer function if the current one finished.
5443 * @todo: Find a better way to prevent vdIoCtxContinue from calling the current helper again. */
5444 if (!pIoCtx->Req.Io.cbTransferLeft)
5445 {
5446 pIoCtx->pfnIoCtxTransfer = pIoCtx->pfnIoCtxTransferNext;
5447 pIoCtx->pfnIoCtxTransferNext = NULL;
5448 }
5449
5450 vdIoCtxAddToWaitingList(&pDisk->pIoCtxHaltedHead, pIoCtx);
5451 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
5452 {
5453 /* Immediately drop the lock again, it will take care of processing the list. */
5454 vdDiskUnlock(pDisk, NULL);
5455 }
5456}
5457
5458static DECLCALLBACK(bool) vdIOIntIoCtxIsSynchronous(void *pvUser, PVDIOCTX pIoCtx)
5459{
5460 NOREF(pvUser);
5461 return !!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC);
5462}
5463
5464static DECLCALLBACK(bool) vdIOIntIoCtxIsZero(void *pvUser, PVDIOCTX pIoCtx, size_t cbCheck,
5465 bool fAdvance)
5466{
5467 NOREF(pvUser);
5468
5469 bool fIsZero = RTSgBufIsZero(&pIoCtx->Req.Io.SgBuf, cbCheck);
5470 if (fIsZero && fAdvance)
5471 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbCheck);
5472
5473 return fIsZero;
5474}
5475
5476static DECLCALLBACK(size_t) vdIOIntIoCtxGetDataUnitSize(void *pvUser, PVDIOCTX pIoCtx)
5477{
5478 RT_NOREF1(pIoCtx);
5479 PVDIO pVDIo = (PVDIO)pvUser;
5480 PVBOXHDD pDisk = pVDIo->pDisk;
5481
5482 PVDIMAGE pImage = vdGetImageByNumber(pDisk, VD_LAST_IMAGE);
5483 AssertPtrReturn(pImage, 0);
5484 return pImage->Backend->pfnGetSectorSize(pImage->pBackendData);
5485}
5486
5487/**
5488 * VD I/O interface callback for opening a file (limited version for VDGetFormat).
5489 */
5490static DECLCALLBACK(int) vdIOIntOpenLimited(void *pvUser, const char *pszLocation,
5491 uint32_t fOpen, PPVDIOSTORAGE ppIoStorage)
5492{
5493 int rc = VINF_SUCCESS;
5494 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5495 PVDIOSTORAGE pIoStorage = (PVDIOSTORAGE)RTMemAllocZ(sizeof(VDIOSTORAGE));
5496
5497 if (!pIoStorage)
5498 return VERR_NO_MEMORY;
5499
5500 rc = pInterfaceIo->pfnOpen(NULL, pszLocation, fOpen, NULL, &pIoStorage->pStorage);
5501 if (RT_SUCCESS(rc))
5502 *ppIoStorage = pIoStorage;
5503 else
5504 RTMemFree(pIoStorage);
5505
5506 return rc;
5507}
5508
5509static DECLCALLBACK(int) vdIOIntCloseLimited(void *pvUser, PVDIOSTORAGE pIoStorage)
5510{
5511 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5512 int rc = pInterfaceIo->pfnClose(NULL, pIoStorage->pStorage);
5513
5514 RTMemFree(pIoStorage);
5515 return rc;
5516}
5517
5518static DECLCALLBACK(int) vdIOIntDeleteLimited(void *pvUser, const char *pcszFilename)
5519{
5520 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5521 return pInterfaceIo->pfnDelete(NULL, pcszFilename);
5522}
5523
5524static DECLCALLBACK(int) vdIOIntMoveLimited(void *pvUser, const char *pcszSrc,
5525 const char *pcszDst, unsigned fMove)
5526{
5527 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5528 return pInterfaceIo->pfnMove(NULL, pcszSrc, pcszDst, fMove);
5529}
5530
5531static DECLCALLBACK(int) vdIOIntGetFreeSpaceLimited(void *pvUser, const char *pcszFilename,
5532 int64_t *pcbFreeSpace)
5533{
5534 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5535 return pInterfaceIo->pfnGetFreeSpace(NULL, pcszFilename, pcbFreeSpace);
5536}
5537
5538static DECLCALLBACK(int) vdIOIntGetModificationTimeLimited(void *pvUser,
5539 const char *pcszFilename,
5540 PRTTIMESPEC pModificationTime)
5541{
5542 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5543 return pInterfaceIo->pfnGetModificationTime(NULL, pcszFilename, pModificationTime);
5544}
5545
5546static DECLCALLBACK(int) vdIOIntGetSizeLimited(void *pvUser, PVDIOSTORAGE pIoStorage,
5547 uint64_t *pcbSize)
5548{
5549 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5550 return pInterfaceIo->pfnGetSize(NULL, pIoStorage->pStorage, pcbSize);
5551}
5552
5553static DECLCALLBACK(int) vdIOIntSetSizeLimited(void *pvUser, PVDIOSTORAGE pIoStorage,
5554 uint64_t cbSize)
5555{
5556 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5557 return pInterfaceIo->pfnSetSize(NULL, pIoStorage->pStorage, cbSize);
5558}
5559
5560static DECLCALLBACK(int) vdIOIntWriteUserLimited(void *pvUser, PVDIOSTORAGE pStorage,
5561 uint64_t uOffset, PVDIOCTX pIoCtx,
5562 size_t cbWrite,
5563 PFNVDXFERCOMPLETED pfnComplete,
5564 void *pvCompleteUser)
5565{
5566 NOREF(pvUser);
5567 NOREF(pStorage);
5568 NOREF(uOffset);
5569 NOREF(pIoCtx);
5570 NOREF(cbWrite);
5571 NOREF(pfnComplete);
5572 NOREF(pvCompleteUser);
5573 AssertMsgFailedReturn(("This needs to be implemented when called\n"), VERR_NOT_IMPLEMENTED);
5574}
5575
5576static DECLCALLBACK(int) vdIOIntReadUserLimited(void *pvUser, PVDIOSTORAGE pStorage,
5577 uint64_t uOffset, PVDIOCTX pIoCtx,
5578 size_t cbRead)
5579{
5580 NOREF(pvUser);
5581 NOREF(pStorage);
5582 NOREF(uOffset);
5583 NOREF(pIoCtx);
5584 NOREF(cbRead);
5585 AssertMsgFailedReturn(("This needs to be implemented when called\n"), VERR_NOT_IMPLEMENTED);
5586}
5587
5588static DECLCALLBACK(int) vdIOIntWriteMetaLimited(void *pvUser, PVDIOSTORAGE pStorage,
5589 uint64_t uOffset, const void *pvBuffer,
5590 size_t cbBuffer, PVDIOCTX pIoCtx,
5591 PFNVDXFERCOMPLETED pfnComplete,
5592 void *pvCompleteUser)
5593{
5594 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5595
5596 AssertMsgReturn(!pIoCtx && !pfnComplete && !pvCompleteUser,
5597 ("Async I/O not implemented for the limited interface"),
5598 VERR_NOT_SUPPORTED);
5599
5600 return pInterfaceIo->pfnWriteSync(NULL, pStorage->pStorage, uOffset, pvBuffer, cbBuffer, NULL);
5601}
5602
5603static DECLCALLBACK(int) vdIOIntReadMetaLimited(void *pvUser, PVDIOSTORAGE pStorage,
5604 uint64_t uOffset, void *pvBuffer,
5605 size_t cbBuffer, PVDIOCTX pIoCtx,
5606 PPVDMETAXFER ppMetaXfer,
5607 PFNVDXFERCOMPLETED pfnComplete,
5608 void *pvCompleteUser)
5609{
5610 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5611
5612 AssertMsgReturn(!pIoCtx && !ppMetaXfer && !pfnComplete && !pvCompleteUser,
5613 ("Async I/O not implemented for the limited interface"),
5614 VERR_NOT_SUPPORTED);
5615
5616 return pInterfaceIo->pfnReadSync(NULL, pStorage->pStorage, uOffset, pvBuffer, cbBuffer, NULL);
5617}
5618
5619#if 0 /* unsed */
5620static int vdIOIntMetaXferReleaseLimited(void *pvUser, PVDMETAXFER pMetaXfer)
5621{
5622 /* This is a NOP in this case. */
5623 NOREF(pvUser);
5624 NOREF(pMetaXfer);
5625 return VINF_SUCCESS;
5626}
5627#endif
5628
5629static DECLCALLBACK(int) vdIOIntFlushLimited(void *pvUser, PVDIOSTORAGE pStorage,
5630 PVDIOCTX pIoCtx,
5631 PFNVDXFERCOMPLETED pfnComplete,
5632 void *pvCompleteUser)
5633{
5634 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5635
5636 AssertMsgReturn(!pIoCtx && !pfnComplete && !pvCompleteUser,
5637 ("Async I/O not implemented for the limited interface"),
5638 VERR_NOT_SUPPORTED);
5639
5640 return pInterfaceIo->pfnFlushSync(NULL, pStorage->pStorage);
5641}
5642
5643/**
5644 * internal: send output to the log (unconditionally).
5645 */
5646static DECLCALLBACK(int) vdLogMessage(void *pvUser, const char *pszFormat, va_list args)
5647{
5648 NOREF(pvUser);
5649 RTLogPrintfV(pszFormat, args);
5650 return VINF_SUCCESS;
5651}
5652
5653DECLINLINE(int) vdMessageWrapper(PVBOXHDD pDisk, const char *pszFormat, ...)
5654{
5655 va_list va;
5656 va_start(va, pszFormat);
5657 int rc = pDisk->pInterfaceError->pfnMessage(pDisk->pInterfaceError->Core.pvUser,
5658 pszFormat, va);
5659 va_end(va);
5660 return rc;
5661}
5662
5663
5664/**
5665 * internal: adjust PCHS geometry
5666 */
5667static void vdFixupPCHSGeometry(PVDGEOMETRY pPCHS, uint64_t cbSize)
5668{
5669 /* Fix broken PCHS geometry. Can happen for two reasons: either the backend
5670 * mixes up PCHS and LCHS, or the application used to create the source
5671 * image has put garbage in it. Additionally, if the PCHS geometry covers
5672 * more than the image size, set it back to the default. */
5673 if ( pPCHS->cHeads > 16
5674 || pPCHS->cSectors > 63
5675 || pPCHS->cCylinders == 0
5676 || (uint64_t)pPCHS->cHeads * pPCHS->cSectors * pPCHS->cCylinders * 512 > cbSize)
5677 {
5678 Assert(!(RT_MIN(cbSize / 512 / 16 / 63, 16383) - (uint32_t)RT_MIN(cbSize / 512 / 16 / 63, 16383)));
5679 pPCHS->cCylinders = (uint32_t)RT_MIN(cbSize / 512 / 16 / 63, 16383);
5680 pPCHS->cHeads = 16;
5681 pPCHS->cSectors = 63;
5682 }
5683}
5684
5685/**
5686 * internal: adjust PCHS geometry
5687 */
5688static void vdFixupLCHSGeometry(PVDGEOMETRY pLCHS, uint64_t cbSize)
5689{
5690 /* Fix broken LCHS geometry. Can happen for two reasons: either the backend
5691 * mixes up PCHS and LCHS, or the application used to create the source
5692 * image has put garbage in it. The fix in this case is to clear the LCHS
5693 * geometry to trigger autodetection when it is used next. If the geometry
5694 * already says "please autodetect" (cylinders=0) keep it. */
5695 if ( ( pLCHS->cHeads > 255
5696 || pLCHS->cHeads == 0
5697 || pLCHS->cSectors > 63
5698 || pLCHS->cSectors == 0)
5699 && pLCHS->cCylinders != 0)
5700 {
5701 pLCHS->cCylinders = 0;
5702 pLCHS->cHeads = 0;
5703 pLCHS->cSectors = 0;
5704 }
5705 /* Always recompute the number of cylinders stored in the LCHS
5706 * geometry if it isn't set to "autotedetect" at the moment.
5707 * This is very useful if the destination image size is
5708 * larger or smaller than the source image size. Do not modify
5709 * the number of heads and sectors. Windows guests hate it. */
5710 if ( pLCHS->cCylinders != 0
5711 && pLCHS->cHeads != 0 /* paranoia */
5712 && pLCHS->cSectors != 0 /* paranoia */)
5713 {
5714 Assert(!(RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024) - (uint32_t)RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024)));
5715 pLCHS->cCylinders = (uint32_t)RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024);
5716 }
5717}
5718
5719/**
5720 * Sets the I/O callbacks of the given interface to the fallback methods
5721 *
5722 * @returns nothing.
5723 * @param pIfIo The I/O interface to setup.
5724 */
5725static void vdIfIoFallbackCallbacksSetup(PVDINTERFACEIO pIfIo)
5726{
5727 pIfIo->pfnOpen = vdIOOpenFallback;
5728 pIfIo->pfnClose = vdIOCloseFallback;
5729 pIfIo->pfnDelete = vdIODeleteFallback;
5730 pIfIo->pfnMove = vdIOMoveFallback;
5731 pIfIo->pfnGetFreeSpace = vdIOGetFreeSpaceFallback;
5732 pIfIo->pfnGetModificationTime = vdIOGetModificationTimeFallback;
5733 pIfIo->pfnGetSize = vdIOGetSizeFallback;
5734 pIfIo->pfnSetSize = vdIOSetSizeFallback;
5735 pIfIo->pfnSetAllocationSize = vdIOSetAllocationSizeFallback;
5736 pIfIo->pfnReadSync = vdIOReadSyncFallback;
5737 pIfIo->pfnWriteSync = vdIOWriteSyncFallback;
5738 pIfIo->pfnFlushSync = vdIOFlushSyncFallback;
5739 pIfIo->pfnReadAsync = vdIOReadAsyncFallback;
5740 pIfIo->pfnWriteAsync = vdIOWriteAsyncFallback;
5741 pIfIo->pfnFlushAsync = vdIOFlushAsyncFallback;
5742}
5743
5744/**
5745 * Sets the internal I/O callbacks of the given interface.
5746 *
5747 * @returns nothing.
5748 * @param pIfIoInt The internal I/O interface to setup.
5749 */
5750static void vdIfIoIntCallbacksSetup(PVDINTERFACEIOINT pIfIoInt)
5751{
5752 pIfIoInt->pfnOpen = vdIOIntOpen;
5753 pIfIoInt->pfnClose = vdIOIntClose;
5754 pIfIoInt->pfnDelete = vdIOIntDelete;
5755 pIfIoInt->pfnMove = vdIOIntMove;
5756 pIfIoInt->pfnGetFreeSpace = vdIOIntGetFreeSpace;
5757 pIfIoInt->pfnGetModificationTime = vdIOIntGetModificationTime;
5758 pIfIoInt->pfnGetSize = vdIOIntGetSize;
5759 pIfIoInt->pfnSetSize = vdIOIntSetSize;
5760 pIfIoInt->pfnSetAllocationSize = vdIOIntSetAllocationSize;
5761 pIfIoInt->pfnReadUser = vdIOIntReadUser;
5762 pIfIoInt->pfnWriteUser = vdIOIntWriteUser;
5763 pIfIoInt->pfnReadMeta = vdIOIntReadMeta;
5764 pIfIoInt->pfnWriteMeta = vdIOIntWriteMeta;
5765 pIfIoInt->pfnMetaXferRelease = vdIOIntMetaXferRelease;
5766 pIfIoInt->pfnFlush = vdIOIntFlush;
5767 pIfIoInt->pfnIoCtxCopyFrom = vdIOIntIoCtxCopyFrom;
5768 pIfIoInt->pfnIoCtxCopyTo = vdIOIntIoCtxCopyTo;
5769 pIfIoInt->pfnIoCtxSet = vdIOIntIoCtxSet;
5770 pIfIoInt->pfnIoCtxSegArrayCreate = vdIOIntIoCtxSegArrayCreate;
5771 pIfIoInt->pfnIoCtxCompleted = vdIOIntIoCtxCompleted;
5772 pIfIoInt->pfnIoCtxIsSynchronous = vdIOIntIoCtxIsSynchronous;
5773 pIfIoInt->pfnIoCtxIsZero = vdIOIntIoCtxIsZero;
5774 pIfIoInt->pfnIoCtxGetDataUnitSize = vdIOIntIoCtxGetDataUnitSize;
5775}
5776
5777/**
5778 * Internally used completion handler for synchronous I/O contexts.
5779 */
5780static DECLCALLBACK(void) vdIoCtxSyncComplete(void *pvUser1, void *pvUser2, int rcReq)
5781{
5782 RT_NOREF2(pvUser1, rcReq);
5783 RTSEMEVENT hEvent = (RTSEMEVENT)pvUser2;
5784
5785 RTSemEventSignal(hEvent);
5786}
5787
5788/**
5789 * Initializes HDD backends.
5790 *
5791 * @returns VBox status code.
5792 */
5793VBOXDDU_DECL(int) VDInit(void)
5794{
5795 int rc = vdAddBackends(NIL_RTLDRMOD, aStaticBackends, RT_ELEMENTS(aStaticBackends));
5796 if (RT_SUCCESS(rc))
5797 {
5798 rc = vdAddCacheBackends(NIL_RTLDRMOD, aStaticCacheBackends, RT_ELEMENTS(aStaticCacheBackends));
5799 if (RT_SUCCESS(rc))
5800 {
5801 RTListInit(&g_ListPluginsLoaded);
5802 rc = vdLoadDynamicBackends();
5803 }
5804 }
5805 LogRel(("VD: VDInit finished\n"));
5806 return rc;
5807}
5808
5809/**
5810 * Destroys loaded HDD backends.
5811 *
5812 * @returns VBox status code.
5813 */
5814VBOXDDU_DECL(int) VDShutdown(void)
5815{
5816 if (!g_apBackends)
5817 return VERR_INTERNAL_ERROR;
5818
5819 if (g_apCacheBackends)
5820 RTMemFree(g_apCacheBackends);
5821 RTMemFree(g_apBackends);
5822
5823 g_cBackends = 0;
5824 g_apBackends = NULL;
5825
5826 /* Clear the supported cache backends. */
5827 g_cCacheBackends = 0;
5828 g_apCacheBackends = NULL;
5829
5830#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
5831 PVDPLUGIN pPlugin, pPluginNext;
5832 RTListForEachSafe(&g_ListPluginsLoaded, pPlugin, pPluginNext, VDPLUGIN, NodePlugin)
5833 {
5834 RTLdrClose(pPlugin->hPlugin);
5835 RTStrFree(pPlugin->pszFilename);
5836 RTListNodeRemove(&pPlugin->NodePlugin);
5837 RTMemFree(pPlugin);
5838 }
5839#endif
5840
5841 return VINF_SUCCESS;
5842}
5843
5844/**
5845 * Loads a single plugin given by filename.
5846 *
5847 * @returns VBox status code.
5848 * @param pszFilename The plugin filename to load.
5849 */
5850VBOXDDU_DECL(int) VDPluginLoadFromFilename(const char *pszFilename)
5851{
5852 if (!g_apBackends)
5853 {
5854 int rc = VDInit();
5855 if (RT_FAILURE(rc))
5856 return rc;
5857 }
5858
5859 return vdPluginLoadFromFilename(pszFilename);
5860}
5861
5862/**
5863 * Load all plugins from a given path.
5864 *
5865 * @returns VBox statuse code.
5866 * @param pszPath The path to load plugins from.
5867 */
5868VBOXDDU_DECL(int) VDPluginLoadFromPath(const char *pszPath)
5869{
5870 if (!g_apBackends)
5871 {
5872 int rc = VDInit();
5873 if (RT_FAILURE(rc))
5874 return rc;
5875 }
5876
5877 return vdPluginLoadFromPath(pszPath);
5878}
5879
5880/**
5881 * Unloads a single plugin given by filename.
5882 *
5883 * @returns VBox status code.
5884 * @param pszFilename The plugin filename to unload.
5885 */
5886VBOXDDU_DECL(int) VDPluginUnloadFromFilename(const char *pszFilename)
5887{
5888 if (!g_apBackends)
5889 {
5890 int rc = VDInit();
5891 if (RT_FAILURE(rc))
5892 return rc;
5893 }
5894
5895 return vdPluginUnloadFromFilename(pszFilename);
5896}
5897
5898/**
5899 * Unload all plugins from a given path.
5900 *
5901 * @returns VBox statuse code.
5902 * @param pszPath The path to unload plugins from.
5903 */
5904VBOXDDU_DECL(int) VDPluginUnloadFromPath(const char *pszPath)
5905{
5906 if (!g_apBackends)
5907 {
5908 int rc = VDInit();
5909 if (RT_FAILURE(rc))
5910 return rc;
5911 }
5912
5913 return vdPluginUnloadFromPath(pszPath);
5914}
5915
5916/**
5917 * Lists all HDD backends and their capabilities in a caller-provided buffer.
5918 *
5919 * @returns VBox status code.
5920 * VERR_BUFFER_OVERFLOW if not enough space is passed.
5921 * @param cEntriesAlloc Number of list entries available.
5922 * @param pEntries Pointer to array for the entries.
5923 * @param pcEntriesUsed Number of entries returned.
5924 */
5925VBOXDDU_DECL(int) VDBackendInfo(unsigned cEntriesAlloc, PVDBACKENDINFO pEntries,
5926 unsigned *pcEntriesUsed)
5927{
5928 int rc = VINF_SUCCESS;
5929
5930 LogFlowFunc(("cEntriesAlloc=%u pEntries=%#p pcEntriesUsed=%#p\n", cEntriesAlloc, pEntries, pcEntriesUsed));
5931 /* Check arguments. */
5932 AssertMsgReturn(cEntriesAlloc,
5933 ("cEntriesAlloc=%u\n", cEntriesAlloc),
5934 VERR_INVALID_PARAMETER);
5935 AssertMsgReturn(VALID_PTR(pEntries),
5936 ("pEntries=%#p\n", pEntries),
5937 VERR_INVALID_PARAMETER);
5938 AssertMsgReturn(VALID_PTR(pcEntriesUsed),
5939 ("pcEntriesUsed=%#p\n", pcEntriesUsed),
5940 VERR_INVALID_PARAMETER);
5941 if (!g_apBackends)
5942 VDInit();
5943
5944 if (cEntriesAlloc < g_cBackends)
5945 {
5946 *pcEntriesUsed = g_cBackends;
5947 return VERR_BUFFER_OVERFLOW;
5948 }
5949
5950 for (unsigned i = 0; i < g_cBackends; i++)
5951 {
5952 pEntries[i].pszBackend = g_apBackends[i]->pszBackendName;
5953 pEntries[i].uBackendCaps = g_apBackends[i]->uBackendCaps;
5954 pEntries[i].paFileExtensions = g_apBackends[i]->paFileExtensions;
5955 pEntries[i].paConfigInfo = g_apBackends[i]->paConfigInfo;
5956 pEntries[i].pfnComposeLocation = g_apBackends[i]->pfnComposeLocation;
5957 pEntries[i].pfnComposeName = g_apBackends[i]->pfnComposeName;
5958 }
5959
5960 LogFlowFunc(("returns %Rrc *pcEntriesUsed=%u\n", rc, g_cBackends));
5961 *pcEntriesUsed = g_cBackends;
5962 return rc;
5963}
5964
5965/**
5966 * Lists the capabilities of a backend identified by its name.
5967 *
5968 * @returns VBox status code.
5969 * @param pszBackend The backend name.
5970 * @param pEntry Pointer to an entry.
5971 */
5972VBOXDDU_DECL(int) VDBackendInfoOne(const char *pszBackend, PVDBACKENDINFO pEntry)
5973{
5974 LogFlowFunc(("pszBackend=%#p pEntry=%#p\n", pszBackend, pEntry));
5975 /* Check arguments. */
5976 AssertMsgReturn(VALID_PTR(pszBackend),
5977 ("pszBackend=%#p\n", pszBackend),
5978 VERR_INVALID_PARAMETER);
5979 AssertMsgReturn(VALID_PTR(pEntry),
5980 ("pEntry=%#p\n", pEntry),
5981 VERR_INVALID_PARAMETER);
5982 if (!g_apBackends)
5983 VDInit();
5984
5985 /* Go through loaded backends. */
5986 for (unsigned i = 0; i < g_cBackends; i++)
5987 {
5988 if (!RTStrICmp(pszBackend, g_apBackends[i]->pszBackendName))
5989 {
5990 pEntry->pszBackend = g_apBackends[i]->pszBackendName;
5991 pEntry->uBackendCaps = g_apBackends[i]->uBackendCaps;
5992 pEntry->paFileExtensions = g_apBackends[i]->paFileExtensions;
5993 pEntry->paConfigInfo = g_apBackends[i]->paConfigInfo;
5994 return VINF_SUCCESS;
5995 }
5996 }
5997
5998 return VERR_NOT_FOUND;
5999}
6000
6001/**
6002 * Lists all filters and their capabilities in a caller-provided buffer.
6003 *
6004 * @return VBox status code.
6005 * VERR_BUFFER_OVERFLOW if not enough space is passed.
6006 * @param cEntriesAlloc Number of list entries available.
6007 * @param pEntries Pointer to array for the entries.
6008 * @param pcEntriesUsed Number of entries returned.
6009 */
6010VBOXDDU_DECL(int) VDFilterInfo(unsigned cEntriesAlloc, PVDFILTERINFO pEntries,
6011 unsigned *pcEntriesUsed)
6012{
6013 int rc = VINF_SUCCESS;
6014
6015 LogFlowFunc(("cEntriesAlloc=%u pEntries=%#p pcEntriesUsed=%#p\n", cEntriesAlloc, pEntries, pcEntriesUsed));
6016 /* Check arguments. */
6017 AssertMsgReturn(cEntriesAlloc,
6018 ("cEntriesAlloc=%u\n", cEntriesAlloc),
6019 VERR_INVALID_PARAMETER);
6020 AssertMsgReturn(VALID_PTR(pEntries),
6021 ("pEntries=%#p\n", pEntries),
6022 VERR_INVALID_PARAMETER);
6023 AssertMsgReturn(VALID_PTR(pcEntriesUsed),
6024 ("pcEntriesUsed=%#p\n", pcEntriesUsed),
6025 VERR_INVALID_PARAMETER);
6026 if (!g_apBackends)
6027 VDInit();
6028
6029 if (cEntriesAlloc < g_cFilterBackends)
6030 {
6031 *pcEntriesUsed = g_cFilterBackends;
6032 return VERR_BUFFER_OVERFLOW;
6033 }
6034
6035 for (unsigned i = 0; i < g_cFilterBackends; i++)
6036 {
6037 pEntries[i].pszFilter = g_apFilterBackends[i]->pszBackendName;
6038 pEntries[i].paConfigInfo = g_apFilterBackends[i]->paConfigInfo;
6039 }
6040
6041 LogFlowFunc(("returns %Rrc *pcEntriesUsed=%u\n", rc, g_cFilterBackends));
6042 *pcEntriesUsed = g_cFilterBackends;
6043 return rc;
6044}
6045
6046/**
6047 * Lists the capabilities of a filter identified by its name.
6048 *
6049 * @return VBox status code.
6050 * @param pszFilter The filter name (case insensitive).
6051 * @param pEntry Pointer to an entry.
6052 */
6053VBOXDDU_DECL(int) VDFilterInfoOne(const char *pszFilter, PVDFILTERINFO pEntry)
6054{
6055 LogFlowFunc(("pszFilter=%#p pEntry=%#p\n", pszFilter, pEntry));
6056 /* Check arguments. */
6057 AssertMsgReturn(VALID_PTR(pszFilter),
6058 ("pszFilter=%#p\n", pszFilter),
6059 VERR_INVALID_PARAMETER);
6060 AssertMsgReturn(VALID_PTR(pEntry),
6061 ("pEntry=%#p\n", pEntry),
6062 VERR_INVALID_PARAMETER);
6063 if (!g_apBackends)
6064 VDInit();
6065
6066 /* Go through loaded backends. */
6067 for (unsigned i = 0; i < g_cFilterBackends; i++)
6068 {
6069 if (!RTStrICmp(pszFilter, g_apFilterBackends[i]->pszBackendName))
6070 {
6071 pEntry->pszFilter = g_apFilterBackends[i]->pszBackendName;
6072 pEntry->paConfigInfo = g_apFilterBackends[i]->paConfigInfo;
6073 return VINF_SUCCESS;
6074 }
6075 }
6076
6077 return VERR_NOT_FOUND;
6078}
6079
6080/**
6081 * Allocates and initializes an empty HDD container.
6082 * No image files are opened.
6083 *
6084 * @returns VBox status code.
6085 * @param pVDIfsDisk Pointer to the per-disk VD interface list.
6086 * @param enmType Type of the image container.
6087 * @param ppDisk Where to store the reference to HDD container.
6088 */
6089VBOXDDU_DECL(int) VDCreate(PVDINTERFACE pVDIfsDisk, VDTYPE enmType, PVBOXHDD *ppDisk)
6090{
6091 int rc = VINF_SUCCESS;
6092 PVBOXHDD pDisk = NULL;
6093
6094 LogFlowFunc(("pVDIfsDisk=%#p\n", pVDIfsDisk));
6095 do
6096 {
6097 /* Check arguments. */
6098 AssertMsgBreakStmt(VALID_PTR(ppDisk),
6099 ("ppDisk=%#p\n", ppDisk),
6100 rc = VERR_INVALID_PARAMETER);
6101
6102 pDisk = (PVBOXHDD)RTMemAllocZ(sizeof(VBOXHDD));
6103 if (pDisk)
6104 {
6105 pDisk->u32Signature = VBOXHDDDISK_SIGNATURE;
6106 pDisk->enmType = enmType;
6107 pDisk->cImages = 0;
6108 pDisk->pBase = NULL;
6109 pDisk->pLast = NULL;
6110 pDisk->cbSize = 0;
6111 pDisk->PCHSGeometry.cCylinders = 0;
6112 pDisk->PCHSGeometry.cHeads = 0;
6113 pDisk->PCHSGeometry.cSectors = 0;
6114 pDisk->LCHSGeometry.cCylinders = 0;
6115 pDisk->LCHSGeometry.cHeads = 0;
6116 pDisk->LCHSGeometry.cSectors = 0;
6117 pDisk->pVDIfsDisk = pVDIfsDisk;
6118 pDisk->pInterfaceError = NULL;
6119 pDisk->pInterfaceThreadSync = NULL;
6120 pDisk->pIoCtxLockOwner = NULL;
6121 pDisk->pIoCtxHead = NULL;
6122 pDisk->fLocked = false;
6123 pDisk->hMemCacheIoCtx = NIL_RTMEMCACHE;
6124 pDisk->hMemCacheIoTask = NIL_RTMEMCACHE;
6125 RTListInit(&pDisk->ListFilterChainWrite);
6126 RTListInit(&pDisk->ListFilterChainRead);
6127
6128 /* Create the I/O ctx cache */
6129 rc = RTMemCacheCreate(&pDisk->hMemCacheIoCtx, sizeof(VDIOCTX), 0, UINT32_MAX,
6130 NULL, NULL, NULL, 0);
6131 if (RT_FAILURE(rc))
6132 break;
6133
6134 /* Create the I/O task cache */
6135 rc = RTMemCacheCreate(&pDisk->hMemCacheIoTask, sizeof(VDIOTASK), 0, UINT32_MAX,
6136 NULL, NULL, NULL, 0);
6137 if (RT_FAILURE(rc))
6138 break;
6139
6140 pDisk->pInterfaceError = VDIfErrorGet(pVDIfsDisk);
6141 pDisk->pInterfaceThreadSync = VDIfThreadSyncGet(pVDIfsDisk);
6142
6143 *ppDisk = pDisk;
6144 }
6145 else
6146 {
6147 rc = VERR_NO_MEMORY;
6148 break;
6149 }
6150 } while (0);
6151
6152 if ( RT_FAILURE(rc)
6153 && pDisk)
6154 {
6155 if (pDisk->hMemCacheIoCtx != NIL_RTMEMCACHE)
6156 RTMemCacheDestroy(pDisk->hMemCacheIoCtx);
6157 if (pDisk->hMemCacheIoTask != NIL_RTMEMCACHE)
6158 RTMemCacheDestroy(pDisk->hMemCacheIoTask);
6159 }
6160
6161 LogFlowFunc(("returns %Rrc (pDisk=%#p)\n", rc, pDisk));
6162 return rc;
6163}
6164
6165/**
6166 * Destroys HDD container.
6167 * If container has opened image files they will be closed.
6168 *
6169 * @returns VBox status code.
6170 * @param pDisk Pointer to HDD container.
6171 */
6172VBOXDDU_DECL(int) VDDestroy(PVBOXHDD pDisk)
6173{
6174 int rc = VINF_SUCCESS;
6175 LogFlowFunc(("pDisk=%#p\n", pDisk));
6176 do
6177 {
6178 /* sanity check */
6179 AssertPtrBreak(pDisk);
6180 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6181 Assert(!pDisk->fLocked);
6182
6183 rc = VDCloseAll(pDisk);
6184 int rc2 = VDFilterRemoveAll(pDisk);
6185 if (RT_SUCCESS(rc))
6186 rc = rc2;
6187
6188 RTMemCacheDestroy(pDisk->hMemCacheIoCtx);
6189 RTMemCacheDestroy(pDisk->hMemCacheIoTask);
6190 RTMemFree(pDisk);
6191 } while (0);
6192 LogFlowFunc(("returns %Rrc\n", rc));
6193 return rc;
6194}
6195
6196/**
6197 * Try to get the backend name which can use this image.
6198 *
6199 * @returns VBox status code.
6200 * VINF_SUCCESS if a plugin was found.
6201 * ppszFormat contains the string which can be used as backend name.
6202 * VERR_NOT_SUPPORTED if no backend was found.
6203 * @param pVDIfsDisk Pointer to the per-disk VD interface list.
6204 * @param pVDIfsImage Pointer to the per-image VD interface list.
6205 * @param pszFilename Name of the image file for which the backend is queried.
6206 * @param ppszFormat Receives pointer of the UTF-8 string which contains the format name.
6207 * The returned pointer must be freed using RTStrFree().
6208 */
6209VBOXDDU_DECL(int) VDGetFormat(PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6210 const char *pszFilename, char **ppszFormat, VDTYPE *penmType)
6211{
6212 int rc = VERR_NOT_SUPPORTED;
6213 VDINTERFACEIOINT VDIfIoInt;
6214 VDINTERFACEIO VDIfIoFallback;
6215 PVDINTERFACEIO pInterfaceIo;
6216
6217 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
6218 /* Check arguments. */
6219 AssertMsgReturn(VALID_PTR(pszFilename) && *pszFilename,
6220 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6221 VERR_INVALID_PARAMETER);
6222 AssertMsgReturn(VALID_PTR(ppszFormat),
6223 ("ppszFormat=%#p\n", ppszFormat),
6224 VERR_INVALID_PARAMETER);
6225 AssertMsgReturn(VALID_PTR(penmType),
6226 ("penmType=%#p\n", penmType),
6227 VERR_INVALID_PARAMETER);
6228
6229 if (!g_apBackends)
6230 VDInit();
6231
6232 pInterfaceIo = VDIfIoGet(pVDIfsImage);
6233 if (!pInterfaceIo)
6234 {
6235 /*
6236 * Caller doesn't provide an I/O interface, create our own using the
6237 * native file API.
6238 */
6239 vdIfIoFallbackCallbacksSetup(&VDIfIoFallback);
6240 pInterfaceIo = &VDIfIoFallback;
6241 }
6242
6243 /* Set up the internal I/O interface. */
6244 AssertReturn(!VDIfIoIntGet(pVDIfsImage), VERR_INVALID_PARAMETER);
6245 VDIfIoInt.pfnOpen = vdIOIntOpenLimited;
6246 VDIfIoInt.pfnClose = vdIOIntCloseLimited;
6247 VDIfIoInt.pfnDelete = vdIOIntDeleteLimited;
6248 VDIfIoInt.pfnMove = vdIOIntMoveLimited;
6249 VDIfIoInt.pfnGetFreeSpace = vdIOIntGetFreeSpaceLimited;
6250 VDIfIoInt.pfnGetModificationTime = vdIOIntGetModificationTimeLimited;
6251 VDIfIoInt.pfnGetSize = vdIOIntGetSizeLimited;
6252 VDIfIoInt.pfnSetSize = vdIOIntSetSizeLimited;
6253 VDIfIoInt.pfnReadUser = vdIOIntReadUserLimited;
6254 VDIfIoInt.pfnWriteUser = vdIOIntWriteUserLimited;
6255 VDIfIoInt.pfnReadMeta = vdIOIntReadMetaLimited;
6256 VDIfIoInt.pfnWriteMeta = vdIOIntWriteMetaLimited;
6257 VDIfIoInt.pfnFlush = vdIOIntFlushLimited;
6258 rc = VDInterfaceAdd(&VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6259 pInterfaceIo, sizeof(VDINTERFACEIOINT), &pVDIfsImage);
6260 AssertRC(rc);
6261
6262 /* Find the backend supporting this file format. */
6263 for (unsigned i = 0; i < g_cBackends; i++)
6264 {
6265 if (g_apBackends[i]->pfnProbe)
6266 {
6267 rc = g_apBackends[i]->pfnProbe(pszFilename, pVDIfsDisk, pVDIfsImage, penmType);
6268 if ( RT_SUCCESS(rc)
6269 /* The correct backend has been found, but there is a small
6270 * incompatibility so that the file cannot be used. Stop here
6271 * and signal success - the actual open will of course fail,
6272 * but that will create a really sensible error message. */
6273 || ( rc != VERR_VD_GEN_INVALID_HEADER
6274 && rc != VERR_VD_VDI_INVALID_HEADER
6275 && rc != VERR_VD_VMDK_INVALID_HEADER
6276 && rc != VERR_VD_ISCSI_INVALID_HEADER
6277 && rc != VERR_VD_VHD_INVALID_HEADER
6278 && rc != VERR_VD_RAW_INVALID_HEADER
6279 && rc != VERR_VD_RAW_SIZE_MODULO_512
6280 && rc != VERR_VD_RAW_SIZE_MODULO_2048
6281 && rc != VERR_VD_RAW_SIZE_OPTICAL_TOO_SMALL
6282 && rc != VERR_VD_RAW_SIZE_FLOPPY_TOO_BIG
6283 && rc != VERR_VD_PARALLELS_INVALID_HEADER
6284 && rc != VERR_VD_DMG_INVALID_HEADER))
6285 {
6286 /* Copy the name into the new string. */
6287 char *pszFormat = RTStrDup(g_apBackends[i]->pszBackendName);
6288 if (!pszFormat)
6289 {
6290 rc = VERR_NO_MEMORY;
6291 break;
6292 }
6293 *ppszFormat = pszFormat;
6294 /* Do not consider the typical file access errors as success,
6295 * which allows the caller to deal with such issues. */
6296 if ( rc != VERR_ACCESS_DENIED
6297 && rc != VERR_PATH_NOT_FOUND
6298 && rc != VERR_FILE_NOT_FOUND)
6299 rc = VINF_SUCCESS;
6300 break;
6301 }
6302 rc = VERR_NOT_SUPPORTED;
6303 }
6304 }
6305
6306 /* Try the cache backends. */
6307 if (rc == VERR_NOT_SUPPORTED)
6308 {
6309 for (unsigned i = 0; i < g_cCacheBackends; i++)
6310 {
6311 if (g_apCacheBackends[i]->pfnProbe)
6312 {
6313 rc = g_apCacheBackends[i]->pfnProbe(pszFilename, pVDIfsDisk,
6314 pVDIfsImage);
6315 if ( RT_SUCCESS(rc)
6316 || (rc != VERR_VD_GEN_INVALID_HEADER))
6317 {
6318 /* Copy the name into the new string. */
6319 char *pszFormat = RTStrDup(g_apBackends[i]->pszBackendName);
6320 if (!pszFormat)
6321 {
6322 rc = VERR_NO_MEMORY;
6323 break;
6324 }
6325 *ppszFormat = pszFormat;
6326 rc = VINF_SUCCESS;
6327 break;
6328 }
6329 rc = VERR_NOT_SUPPORTED;
6330 }
6331 }
6332 }
6333
6334 LogFlowFunc(("returns %Rrc *ppszFormat=\"%s\"\n", rc, *ppszFormat));
6335 return rc;
6336}
6337
6338/**
6339 * Opens an image file.
6340 *
6341 * The first opened image file in HDD container must have a base image type,
6342 * others (next opened images) must be a differencing or undo images.
6343 * Linkage is checked for differencing image to be in consistence with the previously opened image.
6344 * When another differencing image is opened and the last image was opened in read/write access
6345 * mode, then the last image is reopened in read-only with deny write sharing mode. This allows
6346 * other processes to use images in read-only mode too.
6347 *
6348 * Note that the image is opened in read-only mode if a read/write open is not possible.
6349 * Use VDIsReadOnly to check open mode.
6350 *
6351 * @returns VBox status code.
6352 * @param pDisk Pointer to HDD container.
6353 * @param pszBackend Name of the image file backend to use.
6354 * @param pszFilename Name of the image file to open.
6355 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
6356 * @param pVDIfsImage Pointer to the per-image VD interface list.
6357 */
6358VBOXDDU_DECL(int) VDOpen(PVBOXHDD pDisk, const char *pszBackend,
6359 const char *pszFilename, unsigned uOpenFlags,
6360 PVDINTERFACE pVDIfsImage)
6361{
6362 int rc = VINF_SUCCESS;
6363 int rc2;
6364 bool fLockWrite = false;
6365 PVDIMAGE pImage = NULL;
6366
6367 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uOpenFlags=%#x, pVDIfsImage=%#p\n",
6368 pDisk, pszBackend, pszFilename, uOpenFlags, pVDIfsImage));
6369
6370 do
6371 {
6372 /* sanity check */
6373 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6374 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6375
6376 /* Check arguments. */
6377 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
6378 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
6379 rc = VERR_INVALID_PARAMETER);
6380 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
6381 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6382 rc = VERR_INVALID_PARAMETER);
6383 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
6384 ("uOpenFlags=%#x\n", uOpenFlags),
6385 rc = VERR_INVALID_PARAMETER);
6386 AssertMsgBreakStmt( !(uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)
6387 || (uOpenFlags & VD_OPEN_FLAGS_READONLY),
6388 ("uOpenFlags=%#x\n", uOpenFlags),
6389 rc = VERR_INVALID_PARAMETER);
6390
6391 /*
6392 * Destroy the current discard state first which might still have pending blocks
6393 * for the currently opened image which will be switched to readonly mode.
6394 */
6395 /* Lock disk for writing, as we modify pDisk information below. */
6396 rc2 = vdThreadStartWrite(pDisk);
6397 AssertRC(rc2);
6398 fLockWrite = true;
6399 rc = vdDiscardStateDestroy(pDisk);
6400 if (RT_FAILURE(rc))
6401 break;
6402 rc2 = vdThreadFinishWrite(pDisk);
6403 AssertRC(rc2);
6404 fLockWrite = false;
6405
6406 /* Set up image descriptor. */
6407 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
6408 if (!pImage)
6409 {
6410 rc = VERR_NO_MEMORY;
6411 break;
6412 }
6413 pImage->pszFilename = RTStrDup(pszFilename);
6414 if (!pImage->pszFilename)
6415 {
6416 rc = VERR_NO_MEMORY;
6417 break;
6418 }
6419
6420 pImage->VDIo.pDisk = pDisk;
6421 pImage->pVDIfsImage = pVDIfsImage;
6422
6423 rc = vdFindBackend(pszBackend, &pImage->Backend);
6424 if (RT_FAILURE(rc))
6425 break;
6426 if (!pImage->Backend)
6427 {
6428 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6429 N_("VD: unknown backend name '%s'"), pszBackend);
6430 break;
6431 }
6432
6433 /*
6434 * Fail if the backend can't do async I/O but the
6435 * flag is set.
6436 */
6437 if ( !(pImage->Backend->uBackendCaps & VD_CAP_ASYNC)
6438 && (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO))
6439 {
6440 rc = vdError(pDisk, VERR_NOT_SUPPORTED, RT_SRC_POS,
6441 N_("VD: Backend '%s' does not support async I/O"), pszBackend);
6442 break;
6443 }
6444
6445 /*
6446 * Fail if the backend doesn't support the discard operation but the
6447 * flag is set.
6448 */
6449 if ( !(pImage->Backend->uBackendCaps & VD_CAP_DISCARD)
6450 && (uOpenFlags & VD_OPEN_FLAGS_DISCARD))
6451 {
6452 rc = vdError(pDisk, VERR_VD_DISCARD_NOT_SUPPORTED, RT_SRC_POS,
6453 N_("VD: Backend '%s' does not support discard"), pszBackend);
6454 break;
6455 }
6456
6457 /* Set up the I/O interface. */
6458 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
6459 if (!pImage->VDIo.pInterfaceIo)
6460 {
6461 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
6462 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
6463 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
6464 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
6465 }
6466
6467 /* Set up the internal I/O interface. */
6468 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
6469 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
6470 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6471 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
6472 AssertRC(rc);
6473
6474 pImage->uOpenFlags = uOpenFlags & (VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_DISCARD | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS);
6475 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
6476 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6477 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS),
6478 pDisk->pVDIfsDisk,
6479 pImage->pVDIfsImage,
6480 pDisk->enmType,
6481 &pImage->pBackendData);
6482 /*
6483 * If the image is corrupted and there is a repair method try to repair it
6484 * first if it was openend in read-write mode and open again afterwards.
6485 */
6486 if ( RT_UNLIKELY(rc == VERR_VD_IMAGE_CORRUPTED)
6487 && !(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6488 && pImage->Backend->pfnRepair)
6489 {
6490 rc = pImage->Backend->pfnRepair(pszFilename, pDisk->pVDIfsDisk, pImage->pVDIfsImage, 0 /* fFlags */);
6491 if (RT_SUCCESS(rc))
6492 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6493 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS),
6494 pDisk->pVDIfsDisk,
6495 pImage->pVDIfsImage,
6496 pDisk->enmType,
6497 &pImage->pBackendData);
6498 else
6499 {
6500 rc = vdError(pDisk, rc, RT_SRC_POS,
6501 N_("VD: error %Rrc repairing corrupted image file '%s'"), rc, pszFilename);
6502 break;
6503 }
6504 }
6505 else if (RT_UNLIKELY(rc == VERR_VD_IMAGE_CORRUPTED))
6506 {
6507 rc = vdError(pDisk, rc, RT_SRC_POS,
6508 N_("VD: Image file '%s' is corrupted and can't be opened"), pszFilename);
6509 break;
6510 }
6511
6512 /* If the open in read-write mode failed, retry in read-only mode. */
6513 if (RT_FAILURE(rc))
6514 {
6515 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6516 && ( rc == VERR_ACCESS_DENIED
6517 || rc == VERR_PERMISSION_DENIED
6518 || rc == VERR_WRITE_PROTECT
6519 || rc == VERR_SHARING_VIOLATION
6520 || rc == VERR_FILE_LOCK_FAILED))
6521 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6522 (uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS))
6523 | VD_OPEN_FLAGS_READONLY,
6524 pDisk->pVDIfsDisk,
6525 pImage->pVDIfsImage,
6526 pDisk->enmType,
6527 &pImage->pBackendData);
6528 if (RT_FAILURE(rc))
6529 {
6530 rc = vdError(pDisk, rc, RT_SRC_POS,
6531 N_("VD: error %Rrc opening image file '%s'"), rc, pszFilename);
6532 break;
6533 }
6534 }
6535
6536 /* Lock disk for writing, as we modify pDisk information below. */
6537 rc2 = vdThreadStartWrite(pDisk);
6538 AssertRC(rc2);
6539 fLockWrite = true;
6540
6541 pImage->VDIo.pBackendData = pImage->pBackendData;
6542
6543 /* Check image type. As the image itself has only partial knowledge
6544 * whether it's a base image or not, this info is derived here. The
6545 * base image can be fixed or normal, all others must be normal or
6546 * diff images. Some image formats don't distinguish between normal
6547 * and diff images, so this must be corrected here. */
6548 unsigned uImageFlags;
6549 uImageFlags = pImage->Backend->pfnGetImageFlags(pImage->pBackendData);
6550 if (RT_FAILURE(rc))
6551 uImageFlags = VD_IMAGE_FLAGS_NONE;
6552 if ( RT_SUCCESS(rc)
6553 && !(uOpenFlags & VD_OPEN_FLAGS_INFO))
6554 {
6555 if ( pDisk->cImages == 0
6556 && (uImageFlags & VD_IMAGE_FLAGS_DIFF))
6557 {
6558 rc = VERR_VD_INVALID_TYPE;
6559 break;
6560 }
6561 else if (pDisk->cImages != 0)
6562 {
6563 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
6564 {
6565 rc = VERR_VD_INVALID_TYPE;
6566 break;
6567 }
6568 else
6569 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
6570 }
6571 }
6572
6573 /* Ensure we always get correct diff information, even if the backend
6574 * doesn't actually have a stored flag for this. It must not return
6575 * bogus information for the parent UUID if it is not a diff image. */
6576 RTUUID parentUuid;
6577 RTUuidClear(&parentUuid);
6578 rc2 = pImage->Backend->pfnGetParentUuid(pImage->pBackendData, &parentUuid);
6579 if (RT_SUCCESS(rc2) && !RTUuidIsNull(&parentUuid))
6580 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
6581
6582 pImage->uImageFlags = uImageFlags;
6583
6584 /* Force sane optimization settings. It's not worth avoiding writes
6585 * to fixed size images. The overhead would have almost no payback. */
6586 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
6587 pImage->uOpenFlags |= VD_OPEN_FLAGS_HONOR_SAME;
6588
6589 /** @todo optionally check UUIDs */
6590
6591 /* Cache disk information. */
6592 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
6593
6594 /* Cache PCHS geometry. */
6595 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
6596 &pDisk->PCHSGeometry);
6597 if (RT_FAILURE(rc2))
6598 {
6599 pDisk->PCHSGeometry.cCylinders = 0;
6600 pDisk->PCHSGeometry.cHeads = 0;
6601 pDisk->PCHSGeometry.cSectors = 0;
6602 }
6603 else
6604 {
6605 /* Make sure the PCHS geometry is properly clipped. */
6606 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
6607 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
6608 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
6609 }
6610
6611 /* Cache LCHS geometry. */
6612 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
6613 &pDisk->LCHSGeometry);
6614 if (RT_FAILURE(rc2))
6615 {
6616 pDisk->LCHSGeometry.cCylinders = 0;
6617 pDisk->LCHSGeometry.cHeads = 0;
6618 pDisk->LCHSGeometry.cSectors = 0;
6619 }
6620 else
6621 {
6622 /* Make sure the LCHS geometry is properly clipped. */
6623 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
6624 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
6625 }
6626
6627 if (pDisk->cImages != 0)
6628 {
6629 /* Switch previous image to read-only mode. */
6630 unsigned uOpenFlagsPrevImg;
6631 uOpenFlagsPrevImg = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
6632 if (!(uOpenFlagsPrevImg & VD_OPEN_FLAGS_READONLY))
6633 {
6634 uOpenFlagsPrevImg |= VD_OPEN_FLAGS_READONLY;
6635 rc = pDisk->pLast->Backend->pfnSetOpenFlags(pDisk->pLast->pBackendData, uOpenFlagsPrevImg);
6636 }
6637 }
6638
6639 if (RT_SUCCESS(rc))
6640 {
6641 /* Image successfully opened, make it the last image. */
6642 vdAddImageToList(pDisk, pImage);
6643 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
6644 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
6645 }
6646 else
6647 {
6648 /* Error detected, but image opened. Close image. */
6649 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, false);
6650 AssertRC(rc2);
6651 pImage->pBackendData = NULL;
6652 }
6653 } while (0);
6654
6655 if (RT_UNLIKELY(fLockWrite))
6656 {
6657 rc2 = vdThreadFinishWrite(pDisk);
6658 AssertRC(rc2);
6659 }
6660
6661 if (RT_FAILURE(rc))
6662 {
6663 if (pImage)
6664 {
6665 if (pImage->pszFilename)
6666 RTStrFree(pImage->pszFilename);
6667 RTMemFree(pImage);
6668 }
6669 }
6670
6671 LogFlowFunc(("returns %Rrc\n", rc));
6672 return rc;
6673}
6674
6675/**
6676 * Opens a cache image.
6677 *
6678 * @return VBox status code.
6679 * @param pDisk Pointer to the HDD container which should use the cache image.
6680 * @param pszBackend Name of the cache file backend to use (case insensitive).
6681 * @param pszFilename Name of the cache image to open.
6682 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
6683 * @param pVDIfsCache Pointer to the per-cache VD interface list.
6684 */
6685VBOXDDU_DECL(int) VDCacheOpen(PVBOXHDD pDisk, const char *pszBackend,
6686 const char *pszFilename, unsigned uOpenFlags,
6687 PVDINTERFACE pVDIfsCache)
6688{
6689 int rc = VINF_SUCCESS;
6690 int rc2;
6691 bool fLockWrite = false;
6692 PVDCACHE pCache = NULL;
6693
6694 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uOpenFlags=%#x, pVDIfsCache=%#p\n",
6695 pDisk, pszBackend, pszFilename, uOpenFlags, pVDIfsCache));
6696
6697 do
6698 {
6699 /* sanity check */
6700 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6701 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6702
6703 /* Check arguments. */
6704 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
6705 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
6706 rc = VERR_INVALID_PARAMETER);
6707 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
6708 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6709 rc = VERR_INVALID_PARAMETER);
6710 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
6711 ("uOpenFlags=%#x\n", uOpenFlags),
6712 rc = VERR_INVALID_PARAMETER);
6713
6714 /* Set up image descriptor. */
6715 pCache = (PVDCACHE)RTMemAllocZ(sizeof(VDCACHE));
6716 if (!pCache)
6717 {
6718 rc = VERR_NO_MEMORY;
6719 break;
6720 }
6721 pCache->pszFilename = RTStrDup(pszFilename);
6722 if (!pCache->pszFilename)
6723 {
6724 rc = VERR_NO_MEMORY;
6725 break;
6726 }
6727
6728 pCache->VDIo.pDisk = pDisk;
6729 pCache->pVDIfsCache = pVDIfsCache;
6730
6731 rc = vdFindCacheBackend(pszBackend, &pCache->Backend);
6732 if (RT_FAILURE(rc))
6733 break;
6734 if (!pCache->Backend)
6735 {
6736 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6737 N_("VD: unknown backend name '%s'"), pszBackend);
6738 break;
6739 }
6740
6741 /* Set up the I/O interface. */
6742 pCache->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsCache);
6743 if (!pCache->VDIo.pInterfaceIo)
6744 {
6745 vdIfIoFallbackCallbacksSetup(&pCache->VDIo.VDIfIo);
6746 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
6747 pDisk, sizeof(VDINTERFACEIO), &pVDIfsCache);
6748 pCache->VDIo.pInterfaceIo = &pCache->VDIo.VDIfIo;
6749 }
6750
6751 /* Set up the internal I/O interface. */
6752 AssertBreakStmt(!VDIfIoIntGet(pVDIfsCache), rc = VERR_INVALID_PARAMETER);
6753 vdIfIoIntCallbacksSetup(&pCache->VDIo.VDIfIoInt);
6754 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6755 &pCache->VDIo, sizeof(VDINTERFACEIOINT), &pCache->pVDIfsCache);
6756 AssertRC(rc);
6757
6758 pCache->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
6759 rc = pCache->Backend->pfnOpen(pCache->pszFilename,
6760 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
6761 pDisk->pVDIfsDisk,
6762 pCache->pVDIfsCache,
6763 &pCache->pBackendData);
6764 /* If the open in read-write mode failed, retry in read-only mode. */
6765 if (RT_FAILURE(rc))
6766 {
6767 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6768 && ( rc == VERR_ACCESS_DENIED
6769 || rc == VERR_PERMISSION_DENIED
6770 || rc == VERR_WRITE_PROTECT
6771 || rc == VERR_SHARING_VIOLATION
6772 || rc == VERR_FILE_LOCK_FAILED))
6773 rc = pCache->Backend->pfnOpen(pCache->pszFilename,
6774 (uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME)
6775 | VD_OPEN_FLAGS_READONLY,
6776 pDisk->pVDIfsDisk,
6777 pCache->pVDIfsCache,
6778 &pCache->pBackendData);
6779 if (RT_FAILURE(rc))
6780 {
6781 rc = vdError(pDisk, rc, RT_SRC_POS,
6782 N_("VD: error %Rrc opening image file '%s'"), rc, pszFilename);
6783 break;
6784 }
6785 }
6786
6787 /* Lock disk for writing, as we modify pDisk information below. */
6788 rc2 = vdThreadStartWrite(pDisk);
6789 AssertRC(rc2);
6790 fLockWrite = true;
6791
6792 /*
6793 * Check that the modification UUID of the cache and last image
6794 * match. If not the image was modified in-between without the cache.
6795 * The cache might contain stale data.
6796 */
6797 RTUUID UuidImage, UuidCache;
6798
6799 rc = pCache->Backend->pfnGetModificationUuid(pCache->pBackendData,
6800 &UuidCache);
6801 if (RT_SUCCESS(rc))
6802 {
6803 rc = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
6804 &UuidImage);
6805 if (RT_SUCCESS(rc))
6806 {
6807 if (RTUuidCompare(&UuidImage, &UuidCache))
6808 rc = VERR_VD_CACHE_NOT_UP_TO_DATE;
6809 }
6810 }
6811
6812 /*
6813 * We assume that the user knows what he is doing if one of the images
6814 * doesn't support the modification uuid.
6815 */
6816 if (rc == VERR_NOT_SUPPORTED)
6817 rc = VINF_SUCCESS;
6818
6819 if (RT_SUCCESS(rc))
6820 {
6821 /* Cache successfully opened, make it the current one. */
6822 if (!pDisk->pCache)
6823 pDisk->pCache = pCache;
6824 else
6825 rc = VERR_VD_CACHE_ALREADY_EXISTS;
6826 }
6827
6828 if (RT_FAILURE(rc))
6829 {
6830 /* Error detected, but image opened. Close image. */
6831 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, false);
6832 AssertRC(rc2);
6833 pCache->pBackendData = NULL;
6834 }
6835 } while (0);
6836
6837 if (RT_UNLIKELY(fLockWrite))
6838 {
6839 rc2 = vdThreadFinishWrite(pDisk);
6840 AssertRC(rc2);
6841 }
6842
6843 if (RT_FAILURE(rc))
6844 {
6845 if (pCache)
6846 {
6847 if (pCache->pszFilename)
6848 RTStrFree(pCache->pszFilename);
6849 RTMemFree(pCache);
6850 }
6851 }
6852
6853 LogFlowFunc(("returns %Rrc\n", rc));
6854 return rc;
6855}
6856
6857VBOXDDU_DECL(int) VDFilterAdd(PVBOXHDD pDisk, const char *pszFilter, uint32_t fFlags,
6858 PVDINTERFACE pVDIfsFilter)
6859{
6860 int rc = VINF_SUCCESS;
6861 int rc2;
6862 bool fLockWrite = false;
6863 PVDFILTER pFilter = NULL;
6864
6865 LogFlowFunc(("pDisk=%#p pszFilter=\"%s\" pVDIfsFilter=%#p\n",
6866 pDisk, pszFilter, pVDIfsFilter));
6867
6868 do
6869 {
6870 /* sanity check */
6871 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6872 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6873
6874 /* Check arguments. */
6875 AssertMsgBreakStmt(VALID_PTR(pszFilter) && *pszFilter,
6876 ("pszFilter=%#p \"%s\"\n", pszFilter, pszFilter),
6877 rc = VERR_INVALID_PARAMETER);
6878
6879 AssertMsgBreakStmt(!(fFlags & ~VD_FILTER_FLAGS_MASK),
6880 ("Invalid flags set (fFlags=%#x)\n", fFlags),
6881 rc = VERR_INVALID_PARAMETER);
6882
6883 /* Set up image descriptor. */
6884 pFilter = (PVDFILTER)RTMemAllocZ(sizeof(VDFILTER));
6885 if (!pFilter)
6886 {
6887 rc = VERR_NO_MEMORY;
6888 break;
6889 }
6890
6891 rc = vdFindFilterBackend(pszFilter, &pFilter->pBackend);
6892 if (RT_FAILURE(rc))
6893 break;
6894 if (!pFilter->pBackend)
6895 {
6896 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6897 N_("VD: unknown filter backend name '%s'"), pszFilter);
6898 break;
6899 }
6900
6901 pFilter->VDIo.pDisk = pDisk;
6902 pFilter->pVDIfsFilter = pVDIfsFilter;
6903
6904 /* Set up the internal I/O interface. */
6905 AssertBreakStmt(!VDIfIoIntGet(pVDIfsFilter), rc = VERR_INVALID_PARAMETER);
6906 vdIfIoIntCallbacksSetup(&pFilter->VDIo.VDIfIoInt);
6907 rc = VDInterfaceAdd(&pFilter->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6908 &pFilter->VDIo, sizeof(VDINTERFACEIOINT), &pFilter->pVDIfsFilter);
6909 AssertRC(rc);
6910
6911 rc = pFilter->pBackend->pfnCreate(pDisk->pVDIfsDisk, fFlags & VD_FILTER_FLAGS_INFO,
6912 pFilter->pVDIfsFilter, &pFilter->pvBackendData);
6913 if (RT_FAILURE(rc))
6914 break;
6915
6916 /* Lock disk for writing, as we modify pDisk information below. */
6917 rc2 = vdThreadStartWrite(pDisk);
6918 AssertRC(rc2);
6919 fLockWrite = true;
6920
6921 /* Add filter to chains. */
6922 if (fFlags & VD_FILTER_FLAGS_WRITE)
6923 {
6924 RTListAppend(&pDisk->ListFilterChainWrite, &pFilter->ListNodeChainWrite);
6925 vdFilterRetain(pFilter);
6926 }
6927
6928 if (fFlags & VD_FILTER_FLAGS_READ)
6929 {
6930 RTListAppend(&pDisk->ListFilterChainRead, &pFilter->ListNodeChainRead);
6931 vdFilterRetain(pFilter);
6932 }
6933 } while (0);
6934
6935 if (RT_UNLIKELY(fLockWrite))
6936 {
6937 rc2 = vdThreadFinishWrite(pDisk);
6938 AssertRC(rc2);
6939 }
6940
6941 if (RT_FAILURE(rc))
6942 {
6943 if (pFilter)
6944 RTMemFree(pFilter);
6945 }
6946
6947 LogFlowFunc(("returns %Rrc\n", rc));
6948 return rc;
6949}
6950
6951/**
6952 * Creates and opens a new base image file.
6953 *
6954 * @returns VBox status code.
6955 * @param pDisk Pointer to HDD container.
6956 * @param pszBackend Name of the image file backend to use.
6957 * @param pszFilename Name of the image file to create.
6958 * @param cbSize Image size in bytes.
6959 * @param uImageFlags Flags specifying special image features.
6960 * @param pszComment Pointer to image comment. NULL is ok.
6961 * @param pPCHSGeometry Pointer to physical disk geometry <= (16383,16,63). Not NULL.
6962 * @param pLCHSGeometry Pointer to logical disk geometry <= (x,255,63). Not NULL.
6963 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
6964 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
6965 * @param pVDIfsImage Pointer to the per-image VD interface list.
6966 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
6967 */
6968VBOXDDU_DECL(int) VDCreateBase(PVBOXHDD pDisk, const char *pszBackend,
6969 const char *pszFilename, uint64_t cbSize,
6970 unsigned uImageFlags, const char *pszComment,
6971 PCVDGEOMETRY pPCHSGeometry,
6972 PCVDGEOMETRY pLCHSGeometry,
6973 PCRTUUID pUuid, unsigned uOpenFlags,
6974 PVDINTERFACE pVDIfsImage,
6975 PVDINTERFACE pVDIfsOperation)
6976{
6977 int rc = VINF_SUCCESS;
6978 int rc2;
6979 bool fLockWrite = false, fLockRead = false;
6980 PVDIMAGE pImage = NULL;
6981 RTUUID uuid;
6982
6983 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" PCHS=%u/%u/%u LCHS=%u/%u/%u Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
6984 pDisk, pszBackend, pszFilename, cbSize, uImageFlags, pszComment,
6985 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
6986 pPCHSGeometry->cSectors, pLCHSGeometry->cCylinders,
6987 pLCHSGeometry->cHeads, pLCHSGeometry->cSectors, pUuid,
6988 uOpenFlags, pVDIfsImage, pVDIfsOperation));
6989
6990 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
6991
6992 do
6993 {
6994 /* sanity check */
6995 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6996 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6997
6998 /* Check arguments. */
6999 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7000 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7001 rc = VERR_INVALID_PARAMETER);
7002 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7003 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7004 rc = VERR_INVALID_PARAMETER);
7005 AssertMsgBreakStmt(cbSize,
7006 ("cbSize=%llu\n", cbSize),
7007 rc = VERR_INVALID_PARAMETER);
7008 if (cbSize % 512)
7009 {
7010 rc = vdError(pDisk, VERR_VD_INVALID_SIZE, RT_SRC_POS,
7011 N_("VD: The given disk size %llu is not aligned on a sector boundary (512 bytes)"), cbSize);
7012 break;
7013 }
7014 AssertMsgBreakStmt( ((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0)
7015 || ((uImageFlags & (VD_IMAGE_FLAGS_FIXED | VD_IMAGE_FLAGS_DIFF)) != VD_IMAGE_FLAGS_FIXED),
7016 ("uImageFlags=%#x\n", uImageFlags),
7017 rc = VERR_INVALID_PARAMETER);
7018 /* The PCHS geometry fields may be 0 to leave it for later. */
7019 AssertMsgBreakStmt( VALID_PTR(pPCHSGeometry)
7020 && pPCHSGeometry->cHeads <= 16
7021 && pPCHSGeometry->cSectors <= 63,
7022 ("pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pPCHSGeometry,
7023 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
7024 pPCHSGeometry->cSectors),
7025 rc = VERR_INVALID_PARAMETER);
7026 /* The LCHS geometry fields may be 0 to leave it to later autodetection. */
7027 AssertMsgBreakStmt( VALID_PTR(pLCHSGeometry)
7028 && pLCHSGeometry->cHeads <= 255
7029 && pLCHSGeometry->cSectors <= 63,
7030 ("pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pLCHSGeometry,
7031 pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads,
7032 pLCHSGeometry->cSectors),
7033 rc = VERR_INVALID_PARAMETER);
7034 /* The UUID may be NULL. */
7035 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7036 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7037 rc = VERR_INVALID_PARAMETER);
7038 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7039 ("uOpenFlags=%#x\n", uOpenFlags),
7040 rc = VERR_INVALID_PARAMETER);
7041
7042 /* Check state. Needs a temporary read lock. Holding the write lock
7043 * all the time would be blocking other activities for too long. */
7044 rc2 = vdThreadStartRead(pDisk);
7045 AssertRC(rc2);
7046 fLockRead = true;
7047 AssertMsgBreakStmt(pDisk->cImages == 0,
7048 ("Create base image cannot be done with other images open\n"),
7049 rc = VERR_VD_INVALID_STATE);
7050 rc2 = vdThreadFinishRead(pDisk);
7051 AssertRC(rc2);
7052 fLockRead = false;
7053
7054 /* Set up image descriptor. */
7055 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
7056 if (!pImage)
7057 {
7058 rc = VERR_NO_MEMORY;
7059 break;
7060 }
7061 pImage->pszFilename = RTStrDup(pszFilename);
7062 if (!pImage->pszFilename)
7063 {
7064 rc = VERR_NO_MEMORY;
7065 break;
7066 }
7067 pImage->VDIo.pDisk = pDisk;
7068 pImage->pVDIfsImage = pVDIfsImage;
7069
7070 /* Set up the I/O interface. */
7071 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
7072 if (!pImage->VDIo.pInterfaceIo)
7073 {
7074 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
7075 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7076 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
7077 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
7078 }
7079
7080 /* Set up the internal I/O interface. */
7081 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
7082 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
7083 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7084 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
7085 AssertRC(rc);
7086
7087 rc = vdFindBackend(pszBackend, &pImage->Backend);
7088 if (RT_FAILURE(rc))
7089 break;
7090 if (!pImage->Backend)
7091 {
7092 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7093 N_("VD: unknown backend name '%s'"), pszBackend);
7094 break;
7095 }
7096 if (!(pImage->Backend->uBackendCaps & ( VD_CAP_CREATE_FIXED
7097 | VD_CAP_CREATE_DYNAMIC)))
7098 {
7099 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7100 N_("VD: backend '%s' cannot create base images"), pszBackend);
7101 break;
7102 }
7103 if ( ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
7104 && !(pImage->Backend->uBackendCaps & VD_CAP_CREATE_SPLIT_2G))
7105 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7106 && RTStrICmp(pszBackend, "VMDK")))
7107 {
7108 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7109 N_("VD: backend '%s' does not support the selected image variant"), pszBackend);
7110 break;
7111 }
7112
7113 /* Create UUID if the caller didn't specify one. */
7114 if (!pUuid)
7115 {
7116 rc = RTUuidCreate(&uuid);
7117 if (RT_FAILURE(rc))
7118 {
7119 rc = vdError(pDisk, rc, RT_SRC_POS,
7120 N_("VD: cannot generate UUID for image '%s'"),
7121 pszFilename);
7122 break;
7123 }
7124 pUuid = &uuid;
7125 }
7126
7127 pImage->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7128 uImageFlags &= ~VD_IMAGE_FLAGS_DIFF;
7129 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7130 rc = pImage->Backend->pfnCreate(pImage->pszFilename, cbSize,
7131 uImageFlags, pszComment, pPCHSGeometry,
7132 pLCHSGeometry, pUuid,
7133 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7134 0, 99,
7135 pDisk->pVDIfsDisk,
7136 pImage->pVDIfsImage,
7137 pVDIfsOperation,
7138 pDisk->enmType,
7139 &pImage->pBackendData);
7140
7141 if (RT_SUCCESS(rc))
7142 {
7143 pImage->VDIo.pBackendData = pImage->pBackendData;
7144 pImage->uImageFlags = uImageFlags;
7145
7146 /* Force sane optimization settings. It's not worth avoiding writes
7147 * to fixed size images. The overhead would have almost no payback. */
7148 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
7149 pImage->uOpenFlags |= VD_OPEN_FLAGS_HONOR_SAME;
7150
7151 /* Lock disk for writing, as we modify pDisk information below. */
7152 rc2 = vdThreadStartWrite(pDisk);
7153 AssertRC(rc2);
7154 fLockWrite = true;
7155
7156 /** @todo optionally check UUIDs */
7157
7158 /* Re-check state, as the lock wasn't held and another image
7159 * creation call could have been done by another thread. */
7160 AssertMsgStmt(pDisk->cImages == 0,
7161 ("Create base image cannot be done with other images open\n"),
7162 rc = VERR_VD_INVALID_STATE);
7163 }
7164
7165 if (RT_SUCCESS(rc))
7166 {
7167 /* Cache disk information. */
7168 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
7169
7170 /* Cache PCHS geometry. */
7171 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
7172 &pDisk->PCHSGeometry);
7173 if (RT_FAILURE(rc2))
7174 {
7175 pDisk->PCHSGeometry.cCylinders = 0;
7176 pDisk->PCHSGeometry.cHeads = 0;
7177 pDisk->PCHSGeometry.cSectors = 0;
7178 }
7179 else
7180 {
7181 /* Make sure the CHS geometry is properly clipped. */
7182 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
7183 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
7184 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
7185 }
7186
7187 /* Cache LCHS geometry. */
7188 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
7189 &pDisk->LCHSGeometry);
7190 if (RT_FAILURE(rc2))
7191 {
7192 pDisk->LCHSGeometry.cCylinders = 0;
7193 pDisk->LCHSGeometry.cHeads = 0;
7194 pDisk->LCHSGeometry.cSectors = 0;
7195 }
7196 else
7197 {
7198 /* Make sure the CHS geometry is properly clipped. */
7199 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
7200 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
7201 }
7202
7203 /* Image successfully opened, make it the last image. */
7204 vdAddImageToList(pDisk, pImage);
7205 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
7206 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
7207 }
7208 else
7209 {
7210 /* Error detected, image may or may not be opened. Close and delete
7211 * image if it was opened. */
7212 if (pImage->pBackendData)
7213 {
7214 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, true);
7215 AssertRC(rc2);
7216 pImage->pBackendData = NULL;
7217 }
7218 }
7219 } while (0);
7220
7221 if (RT_UNLIKELY(fLockWrite))
7222 {
7223 rc2 = vdThreadFinishWrite(pDisk);
7224 AssertRC(rc2);
7225 }
7226 else if (RT_UNLIKELY(fLockRead))
7227 {
7228 rc2 = vdThreadFinishRead(pDisk);
7229 AssertRC(rc2);
7230 }
7231
7232 if (RT_FAILURE(rc))
7233 {
7234 if (pImage)
7235 {
7236 if (pImage->pszFilename)
7237 RTStrFree(pImage->pszFilename);
7238 RTMemFree(pImage);
7239 }
7240 }
7241
7242 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7243 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7244
7245 LogFlowFunc(("returns %Rrc\n", rc));
7246 return rc;
7247}
7248
7249/**
7250 * Creates and opens a new differencing image file in HDD container.
7251 * See comments for VDOpen function about differencing images.
7252 *
7253 * @returns VBox status code.
7254 * @param pDisk Pointer to HDD container.
7255 * @param pszBackend Name of the image file backend to use.
7256 * @param pszFilename Name of the differencing image file to create.
7257 * @param uImageFlags Flags specifying special image features.
7258 * @param pszComment Pointer to image comment. NULL is ok.
7259 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
7260 * @param pParentUuid New parent UUID of the image. If NULL, the UUID is queried automatically.
7261 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
7262 * @param pVDIfsImage Pointer to the per-image VD interface list.
7263 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7264 */
7265VBOXDDU_DECL(int) VDCreateDiff(PVBOXHDD pDisk, const char *pszBackend,
7266 const char *pszFilename, unsigned uImageFlags,
7267 const char *pszComment, PCRTUUID pUuid,
7268 PCRTUUID pParentUuid, unsigned uOpenFlags,
7269 PVDINTERFACE pVDIfsImage,
7270 PVDINTERFACE pVDIfsOperation)
7271{
7272 int rc = VINF_SUCCESS;
7273 int rc2;
7274 bool fLockWrite = false, fLockRead = false;
7275 PVDIMAGE pImage = NULL;
7276 RTUUID uuid;
7277
7278 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uImageFlags=%#x pszComment=\"%s\" Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
7279 pDisk, pszBackend, pszFilename, uImageFlags, pszComment, pUuid, uOpenFlags, pVDIfsImage, pVDIfsOperation));
7280
7281 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7282
7283 do
7284 {
7285 /* sanity check */
7286 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7287 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7288
7289 /* Check arguments. */
7290 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7291 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7292 rc = VERR_INVALID_PARAMETER);
7293 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7294 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7295 rc = VERR_INVALID_PARAMETER);
7296 AssertMsgBreakStmt((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0,
7297 ("uImageFlags=%#x\n", uImageFlags),
7298 rc = VERR_INVALID_PARAMETER);
7299 /* The UUID may be NULL. */
7300 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7301 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7302 rc = VERR_INVALID_PARAMETER);
7303 /* The parent UUID may be NULL. */
7304 AssertMsgBreakStmt(pParentUuid == NULL || VALID_PTR(pParentUuid),
7305 ("pParentUuid=%#p ParentUUID=%RTuuid\n", pParentUuid, pParentUuid),
7306 rc = VERR_INVALID_PARAMETER);
7307 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7308 ("uOpenFlags=%#x\n", uOpenFlags),
7309 rc = VERR_INVALID_PARAMETER);
7310
7311 /* Check state. Needs a temporary read lock. Holding the write lock
7312 * all the time would be blocking other activities for too long. */
7313 rc2 = vdThreadStartRead(pDisk);
7314 AssertRC(rc2);
7315 fLockRead = true;
7316 AssertMsgBreakStmt(pDisk->cImages != 0,
7317 ("Create diff image cannot be done without other images open\n"),
7318 rc = VERR_VD_INVALID_STATE);
7319 rc2 = vdThreadFinishRead(pDisk);
7320 AssertRC(rc2);
7321 fLockRead = false;
7322
7323 /*
7324 * Destroy the current discard state first which might still have pending blocks
7325 * for the currently opened image which will be switched to readonly mode.
7326 */
7327 /* Lock disk for writing, as we modify pDisk information below. */
7328 rc2 = vdThreadStartWrite(pDisk);
7329 AssertRC(rc2);
7330 fLockWrite = true;
7331 rc = vdDiscardStateDestroy(pDisk);
7332 if (RT_FAILURE(rc))
7333 break;
7334 rc2 = vdThreadFinishWrite(pDisk);
7335 AssertRC(rc2);
7336 fLockWrite = false;
7337
7338 /* Set up image descriptor. */
7339 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
7340 if (!pImage)
7341 {
7342 rc = VERR_NO_MEMORY;
7343 break;
7344 }
7345 pImage->pszFilename = RTStrDup(pszFilename);
7346 if (!pImage->pszFilename)
7347 {
7348 rc = VERR_NO_MEMORY;
7349 break;
7350 }
7351
7352 rc = vdFindBackend(pszBackend, &pImage->Backend);
7353 if (RT_FAILURE(rc))
7354 break;
7355 if (!pImage->Backend)
7356 {
7357 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7358 N_("VD: unknown backend name '%s'"), pszBackend);
7359 break;
7360 }
7361 if ( !(pImage->Backend->uBackendCaps & VD_CAP_DIFF)
7362 || !(pImage->Backend->uBackendCaps & ( VD_CAP_CREATE_FIXED
7363 | VD_CAP_CREATE_DYNAMIC)))
7364 {
7365 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7366 N_("VD: backend '%s' cannot create diff images"), pszBackend);
7367 break;
7368 }
7369
7370 pImage->VDIo.pDisk = pDisk;
7371 pImage->pVDIfsImage = pVDIfsImage;
7372
7373 /* Set up the I/O interface. */
7374 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
7375 if (!pImage->VDIo.pInterfaceIo)
7376 {
7377 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
7378 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7379 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
7380 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
7381 }
7382
7383 /* Set up the internal I/O interface. */
7384 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
7385 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
7386 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7387 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
7388 AssertRC(rc);
7389
7390 /* Create UUID if the caller didn't specify one. */
7391 if (!pUuid)
7392 {
7393 rc = RTUuidCreate(&uuid);
7394 if (RT_FAILURE(rc))
7395 {
7396 rc = vdError(pDisk, rc, RT_SRC_POS,
7397 N_("VD: cannot generate UUID for image '%s'"),
7398 pszFilename);
7399 break;
7400 }
7401 pUuid = &uuid;
7402 }
7403
7404 pImage->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7405 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7406 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
7407 rc = pImage->Backend->pfnCreate(pImage->pszFilename, pDisk->cbSize,
7408 uImageFlags | VD_IMAGE_FLAGS_DIFF,
7409 pszComment, &pDisk->PCHSGeometry,
7410 &pDisk->LCHSGeometry, pUuid,
7411 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7412 0, 99,
7413 pDisk->pVDIfsDisk,
7414 pImage->pVDIfsImage,
7415 pVDIfsOperation,
7416 pDisk->enmType,
7417 &pImage->pBackendData);
7418
7419 if (RT_SUCCESS(rc))
7420 {
7421 pImage->VDIo.pBackendData = pImage->pBackendData;
7422 pImage->uImageFlags = uImageFlags;
7423
7424 /* Lock disk for writing, as we modify pDisk information below. */
7425 rc2 = vdThreadStartWrite(pDisk);
7426 AssertRC(rc2);
7427 fLockWrite = true;
7428
7429 /* Switch previous image to read-only mode. */
7430 unsigned uOpenFlagsPrevImg;
7431 uOpenFlagsPrevImg = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
7432 if (!(uOpenFlagsPrevImg & VD_OPEN_FLAGS_READONLY))
7433 {
7434 uOpenFlagsPrevImg |= VD_OPEN_FLAGS_READONLY;
7435 rc = pDisk->pLast->Backend->pfnSetOpenFlags(pDisk->pLast->pBackendData, uOpenFlagsPrevImg);
7436 }
7437
7438 /** @todo optionally check UUIDs */
7439
7440 /* Re-check state, as the lock wasn't held and another image
7441 * creation call could have been done by another thread. */
7442 AssertMsgStmt(pDisk->cImages != 0,
7443 ("Create diff image cannot be done without other images open\n"),
7444 rc = VERR_VD_INVALID_STATE);
7445 }
7446
7447 if (RT_SUCCESS(rc))
7448 {
7449 RTUUID Uuid;
7450 RTTIMESPEC ts;
7451
7452 if (pParentUuid && !RTUuidIsNull(pParentUuid))
7453 {
7454 Uuid = *pParentUuid;
7455 pImage->Backend->pfnSetParentUuid(pImage->pBackendData, &Uuid);
7456 }
7457 else
7458 {
7459 rc2 = pDisk->pLast->Backend->pfnGetUuid(pDisk->pLast->pBackendData,
7460 &Uuid);
7461 if (RT_SUCCESS(rc2))
7462 pImage->Backend->pfnSetParentUuid(pImage->pBackendData, &Uuid);
7463 }
7464 rc2 = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
7465 &Uuid);
7466 if (RT_SUCCESS(rc2))
7467 pImage->Backend->pfnSetParentModificationUuid(pImage->pBackendData,
7468 &Uuid);
7469 if (pDisk->pLast->Backend->pfnGetTimestamp)
7470 rc2 = pDisk->pLast->Backend->pfnGetTimestamp(pDisk->pLast->pBackendData,
7471 &ts);
7472 else
7473 rc2 = VERR_NOT_IMPLEMENTED;
7474 if (RT_SUCCESS(rc2) && pImage->Backend->pfnSetParentTimestamp)
7475 pImage->Backend->pfnSetParentTimestamp(pImage->pBackendData, &ts);
7476
7477 if (pImage->Backend->pfnSetParentFilename)
7478 rc2 = pImage->Backend->pfnSetParentFilename(pImage->pBackendData, pDisk->pLast->pszFilename);
7479 }
7480
7481 if (RT_SUCCESS(rc))
7482 {
7483 /* Image successfully opened, make it the last image. */
7484 vdAddImageToList(pDisk, pImage);
7485 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
7486 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
7487 }
7488 else
7489 {
7490 /* Error detected, but image opened. Close and delete image. */
7491 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, true);
7492 AssertRC(rc2);
7493 pImage->pBackendData = NULL;
7494 }
7495 } while (0);
7496
7497 if (RT_UNLIKELY(fLockWrite))
7498 {
7499 rc2 = vdThreadFinishWrite(pDisk);
7500 AssertRC(rc2);
7501 }
7502 else if (RT_UNLIKELY(fLockRead))
7503 {
7504 rc2 = vdThreadFinishRead(pDisk);
7505 AssertRC(rc2);
7506 }
7507
7508 if (RT_FAILURE(rc))
7509 {
7510 if (pImage)
7511 {
7512 if (pImage->pszFilename)
7513 RTStrFree(pImage->pszFilename);
7514 RTMemFree(pImage);
7515 }
7516 }
7517
7518 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7519 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7520
7521 LogFlowFunc(("returns %Rrc\n", rc));
7522 return rc;
7523}
7524
7525
7526/**
7527 * Creates and opens new cache image file in HDD container.
7528 *
7529 * @return VBox status code.
7530 * @param pDisk Name of the cache file backend to use (case insensitive).
7531 * @param pszFilename Name of the differencing cache file to create.
7532 * @param cbSize Maximum size of the cache.
7533 * @param uImageFlags Flags specifying special cache features.
7534 * @param pszComment Pointer to image comment. NULL is ok.
7535 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
7536 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
7537 * @param pVDIfsCache Pointer to the per-cache VD interface list.
7538 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7539 */
7540VBOXDDU_DECL(int) VDCreateCache(PVBOXHDD pDisk, const char *pszBackend,
7541 const char *pszFilename, uint64_t cbSize,
7542 unsigned uImageFlags, const char *pszComment,
7543 PCRTUUID pUuid, unsigned uOpenFlags,
7544 PVDINTERFACE pVDIfsCache, PVDINTERFACE pVDIfsOperation)
7545{
7546 int rc = VINF_SUCCESS;
7547 int rc2;
7548 bool fLockWrite = false, fLockRead = false;
7549 PVDCACHE pCache = NULL;
7550 RTUUID uuid;
7551
7552 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
7553 pDisk, pszBackend, pszFilename, cbSize, uImageFlags, pszComment, pUuid, uOpenFlags, pVDIfsCache, pVDIfsOperation));
7554
7555 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7556
7557 do
7558 {
7559 /* sanity check */
7560 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7561 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7562
7563 /* Check arguments. */
7564 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7565 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7566 rc = VERR_INVALID_PARAMETER);
7567 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7568 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7569 rc = VERR_INVALID_PARAMETER);
7570 AssertMsgBreakStmt(cbSize,
7571 ("cbSize=%llu\n", cbSize),
7572 rc = VERR_INVALID_PARAMETER);
7573 AssertMsgBreakStmt((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0,
7574 ("uImageFlags=%#x\n", uImageFlags),
7575 rc = VERR_INVALID_PARAMETER);
7576 /* The UUID may be NULL. */
7577 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7578 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7579 rc = VERR_INVALID_PARAMETER);
7580 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7581 ("uOpenFlags=%#x\n", uOpenFlags),
7582 rc = VERR_INVALID_PARAMETER);
7583
7584 /* Check state. Needs a temporary read lock. Holding the write lock
7585 * all the time would be blocking other activities for too long. */
7586 rc2 = vdThreadStartRead(pDisk);
7587 AssertRC(rc2);
7588 fLockRead = true;
7589 AssertMsgBreakStmt(!pDisk->pCache,
7590 ("Create cache image cannot be done with a cache already attached\n"),
7591 rc = VERR_VD_CACHE_ALREADY_EXISTS);
7592 rc2 = vdThreadFinishRead(pDisk);
7593 AssertRC(rc2);
7594 fLockRead = false;
7595
7596 /* Set up image descriptor. */
7597 pCache = (PVDCACHE)RTMemAllocZ(sizeof(VDCACHE));
7598 if (!pCache)
7599 {
7600 rc = VERR_NO_MEMORY;
7601 break;
7602 }
7603 pCache->pszFilename = RTStrDup(pszFilename);
7604 if (!pCache->pszFilename)
7605 {
7606 rc = VERR_NO_MEMORY;
7607 break;
7608 }
7609
7610 rc = vdFindCacheBackend(pszBackend, &pCache->Backend);
7611 if (RT_FAILURE(rc))
7612 break;
7613 if (!pCache->Backend)
7614 {
7615 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7616 N_("VD: unknown backend name '%s'"), pszBackend);
7617 break;
7618 }
7619
7620 pCache->VDIo.pDisk = pDisk;
7621 pCache->pVDIfsCache = pVDIfsCache;
7622
7623 /* Set up the I/O interface. */
7624 pCache->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsCache);
7625 if (!pCache->VDIo.pInterfaceIo)
7626 {
7627 vdIfIoFallbackCallbacksSetup(&pCache->VDIo.VDIfIo);
7628 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7629 pDisk, sizeof(VDINTERFACEIO), &pVDIfsCache);
7630 pCache->VDIo.pInterfaceIo = &pCache->VDIo.VDIfIo;
7631 }
7632
7633 /* Set up the internal I/O interface. */
7634 AssertBreakStmt(!VDIfIoIntGet(pVDIfsCache), rc = VERR_INVALID_PARAMETER);
7635 vdIfIoIntCallbacksSetup(&pCache->VDIo.VDIfIoInt);
7636 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7637 &pCache->VDIo, sizeof(VDINTERFACEIOINT), &pCache->pVDIfsCache);
7638 AssertRC(rc);
7639
7640 /* Create UUID if the caller didn't specify one. */
7641 if (!pUuid)
7642 {
7643 rc = RTUuidCreate(&uuid);
7644 if (RT_FAILURE(rc))
7645 {
7646 rc = vdError(pDisk, rc, RT_SRC_POS,
7647 N_("VD: cannot generate UUID for image '%s'"),
7648 pszFilename);
7649 break;
7650 }
7651 pUuid = &uuid;
7652 }
7653
7654 pCache->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7655 pCache->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7656 rc = pCache->Backend->pfnCreate(pCache->pszFilename, cbSize,
7657 uImageFlags,
7658 pszComment, pUuid,
7659 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7660 0, 99,
7661 pDisk->pVDIfsDisk,
7662 pCache->pVDIfsCache,
7663 pVDIfsOperation,
7664 &pCache->pBackendData);
7665
7666 if (RT_SUCCESS(rc))
7667 {
7668 /* Lock disk for writing, as we modify pDisk information below. */
7669 rc2 = vdThreadStartWrite(pDisk);
7670 AssertRC(rc2);
7671 fLockWrite = true;
7672
7673 pCache->VDIo.pBackendData = pCache->pBackendData;
7674
7675 /* Re-check state, as the lock wasn't held and another image
7676 * creation call could have been done by another thread. */
7677 AssertMsgStmt(!pDisk->pCache,
7678 ("Create cache image cannot be done with another cache open\n"),
7679 rc = VERR_VD_CACHE_ALREADY_EXISTS);
7680 }
7681
7682 if ( RT_SUCCESS(rc)
7683 && pDisk->pLast)
7684 {
7685 RTUUID UuidModification;
7686
7687 /* Set same modification Uuid as the last image. */
7688 rc = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
7689 &UuidModification);
7690 if (RT_SUCCESS(rc))
7691 {
7692 rc = pCache->Backend->pfnSetModificationUuid(pCache->pBackendData,
7693 &UuidModification);
7694 }
7695
7696 if (rc == VERR_NOT_SUPPORTED)
7697 rc = VINF_SUCCESS;
7698 }
7699
7700 if (RT_SUCCESS(rc))
7701 {
7702 /* Cache successfully created. */
7703 pDisk->pCache = pCache;
7704 }
7705 else
7706 {
7707 /* Error detected, but image opened. Close and delete image. */
7708 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, true);
7709 AssertRC(rc2);
7710 pCache->pBackendData = NULL;
7711 }
7712 } while (0);
7713
7714 if (RT_UNLIKELY(fLockWrite))
7715 {
7716 rc2 = vdThreadFinishWrite(pDisk);
7717 AssertRC(rc2);
7718 }
7719 else if (RT_UNLIKELY(fLockRead))
7720 {
7721 rc2 = vdThreadFinishRead(pDisk);
7722 AssertRC(rc2);
7723 }
7724
7725 if (RT_FAILURE(rc))
7726 {
7727 if (pCache)
7728 {
7729 if (pCache->pszFilename)
7730 RTStrFree(pCache->pszFilename);
7731 RTMemFree(pCache);
7732 }
7733 }
7734
7735 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7736 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7737
7738 LogFlowFunc(("returns %Rrc\n", rc));
7739 return rc;
7740}
7741
7742/**
7743 * Merges two images (not necessarily with direct parent/child relationship).
7744 * As a side effect the source image and potentially the other images which
7745 * are also merged to the destination are deleted from both the disk and the
7746 * images in the HDD container.
7747 *
7748 * @returns VBox status code.
7749 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
7750 * @param pDisk Pointer to HDD container.
7751 * @param nImageFrom Name of the image file to merge from.
7752 * @param nImageTo Name of the image file to merge to.
7753 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7754 */
7755VBOXDDU_DECL(int) VDMerge(PVBOXHDD pDisk, unsigned nImageFrom,
7756 unsigned nImageTo, PVDINTERFACE pVDIfsOperation)
7757{
7758 int rc = VINF_SUCCESS;
7759 int rc2;
7760 bool fLockWrite = false, fLockRead = false;
7761 void *pvBuf = NULL;
7762
7763 LogFlowFunc(("pDisk=%#p nImageFrom=%u nImageTo=%u pVDIfsOperation=%#p\n",
7764 pDisk, nImageFrom, nImageTo, pVDIfsOperation));
7765
7766 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7767
7768 do
7769 {
7770 /* sanity check */
7771 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7772 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7773
7774 /* For simplicity reasons lock for writing as the image reopen below
7775 * might need it. After all the reopen is usually needed. */
7776 rc2 = vdThreadStartWrite(pDisk);
7777 AssertRC(rc2);
7778 fLockWrite = true;
7779 PVDIMAGE pImageFrom = vdGetImageByNumber(pDisk, nImageFrom);
7780 PVDIMAGE pImageTo = vdGetImageByNumber(pDisk, nImageTo);
7781 if (!pImageFrom || !pImageTo)
7782 {
7783 rc = VERR_VD_IMAGE_NOT_FOUND;
7784 break;
7785 }
7786 AssertBreakStmt(pImageFrom != pImageTo, rc = VERR_INVALID_PARAMETER);
7787
7788 /* Make sure destination image is writable. */
7789 unsigned uOpenFlags = pImageTo->Backend->pfnGetOpenFlags(pImageTo->pBackendData);
7790 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7791 {
7792 /*
7793 * Clear skip consistency checks because the image is made writable now and
7794 * skipping consistency checks is only possible for readonly images.
7795 */
7796 uOpenFlags &= ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS);
7797 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
7798 uOpenFlags);
7799 if (RT_FAILURE(rc))
7800 break;
7801 }
7802
7803 /* Get size of destination image. */
7804 uint64_t cbSize = pImageTo->Backend->pfnGetSize(pImageTo->pBackendData);
7805 rc2 = vdThreadFinishWrite(pDisk);
7806 AssertRC(rc2);
7807 fLockWrite = false;
7808
7809 /* Allocate tmp buffer. */
7810 pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
7811 if (!pvBuf)
7812 {
7813 rc = VERR_NO_MEMORY;
7814 break;
7815 }
7816
7817 /* Merging is done directly on the images itself. This potentially
7818 * causes trouble if the disk is full in the middle of operation. */
7819 if (nImageFrom < nImageTo)
7820 {
7821 /* Merge parent state into child. This means writing all not
7822 * allocated blocks in the destination image which are allocated in
7823 * the images to be merged. */
7824 uint64_t uOffset = 0;
7825 uint64_t cbRemaining = cbSize;
7826 do
7827 {
7828 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
7829 RTSGSEG SegmentBuf;
7830 RTSGBUF SgBuf;
7831 VDIOCTX IoCtx;
7832
7833 SegmentBuf.pvSeg = pvBuf;
7834 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
7835 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
7836 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
7837 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
7838
7839 /* Need to hold the write lock during a read-write operation. */
7840 rc2 = vdThreadStartWrite(pDisk);
7841 AssertRC(rc2);
7842 fLockWrite = true;
7843
7844 rc = pImageTo->Backend->pfnRead(pImageTo->pBackendData,
7845 uOffset, cbThisRead,
7846 &IoCtx, &cbThisRead);
7847 if (rc == VERR_VD_BLOCK_FREE)
7848 {
7849 /* Search for image with allocated block. Do not attempt to
7850 * read more than the previous reads marked as valid.
7851 * Otherwise this would return stale data when different
7852 * block sizes are used for the images. */
7853 for (PVDIMAGE pCurrImage = pImageTo->pPrev;
7854 pCurrImage != NULL && pCurrImage != pImageFrom->pPrev && rc == VERR_VD_BLOCK_FREE;
7855 pCurrImage = pCurrImage->pPrev)
7856 {
7857 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
7858 uOffset, cbThisRead,
7859 &IoCtx, &cbThisRead);
7860 }
7861
7862 if (rc != VERR_VD_BLOCK_FREE)
7863 {
7864 if (RT_FAILURE(rc))
7865 break;
7866 /* Updating the cache is required because this might be a live merge. */
7867 rc = vdWriteHelperEx(pDisk, pImageTo, pImageFrom->pPrev,
7868 uOffset, pvBuf, cbThisRead,
7869 VDIOCTX_FLAGS_READ_UPDATE_CACHE, 0);
7870 if (RT_FAILURE(rc))
7871 break;
7872 }
7873 else
7874 rc = VINF_SUCCESS;
7875 }
7876 else if (RT_FAILURE(rc))
7877 break;
7878
7879 rc2 = vdThreadFinishWrite(pDisk);
7880 AssertRC(rc2);
7881 fLockWrite = false;
7882
7883 uOffset += cbThisRead;
7884 cbRemaining -= cbThisRead;
7885
7886 if (pIfProgress && pIfProgress->pfnProgress)
7887 {
7888 /** @todo r=klaus: this can update the progress to the same
7889 * percentage over and over again if the image format makes
7890 * relatively small increments. */
7891 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
7892 uOffset * 99 / cbSize);
7893 if (RT_FAILURE(rc))
7894 break;
7895 }
7896 } while (uOffset < cbSize);
7897 }
7898 else
7899 {
7900 /*
7901 * We may need to update the parent uuid of the child coming after
7902 * the last image to be merged. We have to reopen it read/write.
7903 *
7904 * This is done before we do the actual merge to prevent an
7905 * inconsistent chain if the mode change fails for some reason.
7906 */
7907 if (pImageFrom->pNext)
7908 {
7909 PVDIMAGE pImageChild = pImageFrom->pNext;
7910
7911 /* Take the write lock. */
7912 rc2 = vdThreadStartWrite(pDisk);
7913 AssertRC(rc2);
7914 fLockWrite = true;
7915
7916 /* We need to open the image in read/write mode. */
7917 uOpenFlags = pImageChild->Backend->pfnGetOpenFlags(pImageChild->pBackendData);
7918
7919 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7920 {
7921 uOpenFlags &= ~VD_OPEN_FLAGS_READONLY;
7922 rc = pImageChild->Backend->pfnSetOpenFlags(pImageChild->pBackendData,
7923 uOpenFlags);
7924 if (RT_FAILURE(rc))
7925 break;
7926 }
7927
7928 rc2 = vdThreadFinishWrite(pDisk);
7929 AssertRC(rc2);
7930 fLockWrite = false;
7931 }
7932
7933 /* If the merge is from the last image we have to relay all writes
7934 * to the merge destination as well, so that concurrent writes
7935 * (in case of a live merge) are handled correctly. */
7936 if (!pImageFrom->pNext)
7937 {
7938 /* Take the write lock. */
7939 rc2 = vdThreadStartWrite(pDisk);
7940 AssertRC(rc2);
7941 fLockWrite = true;
7942
7943 pDisk->pImageRelay = pImageTo;
7944
7945 rc2 = vdThreadFinishWrite(pDisk);
7946 AssertRC(rc2);
7947 fLockWrite = false;
7948 }
7949
7950 /* Merge child state into parent. This means writing all blocks
7951 * which are allocated in the image up to the source image to the
7952 * destination image. */
7953 uint64_t uOffset = 0;
7954 uint64_t cbRemaining = cbSize;
7955 do
7956 {
7957 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
7958 RTSGSEG SegmentBuf;
7959 RTSGBUF SgBuf;
7960 VDIOCTX IoCtx;
7961
7962 rc = VERR_VD_BLOCK_FREE;
7963
7964 SegmentBuf.pvSeg = pvBuf;
7965 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
7966 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
7967 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
7968 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
7969
7970 /* Need to hold the write lock during a read-write operation. */
7971 rc2 = vdThreadStartWrite(pDisk);
7972 AssertRC(rc2);
7973 fLockWrite = true;
7974
7975 /* Search for image with allocated block. Do not attempt to
7976 * read more than the previous reads marked as valid. Otherwise
7977 * this would return stale data when different block sizes are
7978 * used for the images. */
7979 for (PVDIMAGE pCurrImage = pImageFrom;
7980 pCurrImage != NULL && pCurrImage != pImageTo && rc == VERR_VD_BLOCK_FREE;
7981 pCurrImage = pCurrImage->pPrev)
7982 {
7983 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
7984 uOffset, cbThisRead,
7985 &IoCtx, &cbThisRead);
7986 }
7987
7988 if (rc != VERR_VD_BLOCK_FREE)
7989 {
7990 if (RT_FAILURE(rc))
7991 break;
7992 rc = vdWriteHelper(pDisk, pImageTo, uOffset, pvBuf,
7993 cbThisRead, VDIOCTX_FLAGS_READ_UPDATE_CACHE);
7994 if (RT_FAILURE(rc))
7995 break;
7996 }
7997 else
7998 rc = VINF_SUCCESS;
7999
8000 rc2 = vdThreadFinishWrite(pDisk);
8001 AssertRC(rc2);
8002 fLockWrite = false;
8003
8004 uOffset += cbThisRead;
8005 cbRemaining -= cbThisRead;
8006
8007 if (pIfProgress && pIfProgress->pfnProgress)
8008 {
8009 /** @todo r=klaus: this can update the progress to the same
8010 * percentage over and over again if the image format makes
8011 * relatively small increments. */
8012 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
8013 uOffset * 99 / cbSize);
8014 if (RT_FAILURE(rc))
8015 break;
8016 }
8017 } while (uOffset < cbSize);
8018
8019 /* In case we set up a "write proxy" image above we must clear
8020 * this again now to prevent stray writes. Failure or not. */
8021 if (!pImageFrom->pNext)
8022 {
8023 /* Take the write lock. */
8024 rc2 = vdThreadStartWrite(pDisk);
8025 AssertRC(rc2);
8026 fLockWrite = true;
8027
8028 pDisk->pImageRelay = NULL;
8029
8030 rc2 = vdThreadFinishWrite(pDisk);
8031 AssertRC(rc2);
8032 fLockWrite = false;
8033 }
8034 }
8035
8036 /*
8037 * Leave in case of an error to avoid corrupted data in the image chain
8038 * (includes cancelling the operation by the user).
8039 */
8040 if (RT_FAILURE(rc))
8041 break;
8042
8043 /* Need to hold the write lock while finishing the merge. */
8044 rc2 = vdThreadStartWrite(pDisk);
8045 AssertRC(rc2);
8046 fLockWrite = true;
8047
8048 /* Update parent UUID so that image chain is consistent.
8049 * The two attempts work around the problem that some backends
8050 * (e.g. iSCSI) do not support UUIDs, so we exploit the fact that
8051 * so far there can only be one such image in the chain. */
8052 /** @todo needs a better long-term solution, passing the UUID
8053 * knowledge from the caller or some such */
8054 RTUUID Uuid;
8055 PVDIMAGE pImageChild = NULL;
8056 if (nImageFrom < nImageTo)
8057 {
8058 if (pImageFrom->pPrev)
8059 {
8060 /* plan A: ask the parent itself for its UUID */
8061 rc = pImageFrom->pPrev->Backend->pfnGetUuid(pImageFrom->pPrev->pBackendData,
8062 &Uuid);
8063 if (RT_FAILURE(rc))
8064 {
8065 /* plan B: ask the child of the parent for parent UUID */
8066 rc = pImageFrom->Backend->pfnGetParentUuid(pImageFrom->pBackendData,
8067 &Uuid);
8068 }
8069 AssertRC(rc);
8070 }
8071 else
8072 RTUuidClear(&Uuid);
8073 rc = pImageTo->Backend->pfnSetParentUuid(pImageTo->pBackendData,
8074 &Uuid);
8075 AssertRC(rc);
8076 }
8077 else
8078 {
8079 /* Update the parent uuid of the child of the last merged image. */
8080 if (pImageFrom->pNext)
8081 {
8082 /* plan A: ask the parent itself for its UUID */
8083 rc = pImageTo->Backend->pfnGetUuid(pImageTo->pBackendData,
8084 &Uuid);
8085 if (RT_FAILURE(rc))
8086 {
8087 /* plan B: ask the child of the parent for parent UUID */
8088 rc = pImageTo->pNext->Backend->pfnGetParentUuid(pImageTo->pNext->pBackendData,
8089 &Uuid);
8090 }
8091 AssertRC(rc);
8092
8093 rc = pImageFrom->Backend->pfnSetParentUuid(pImageFrom->pNext->pBackendData,
8094 &Uuid);
8095 AssertRC(rc);
8096
8097 pImageChild = pImageFrom->pNext;
8098 }
8099 }
8100
8101 /* Delete the no longer needed images. */
8102 PVDIMAGE pImg = pImageFrom, pTmp;
8103 while (pImg != pImageTo)
8104 {
8105 if (nImageFrom < nImageTo)
8106 pTmp = pImg->pNext;
8107 else
8108 pTmp = pImg->pPrev;
8109 vdRemoveImageFromList(pDisk, pImg);
8110 pImg->Backend->pfnClose(pImg->pBackendData, true);
8111 RTMemFree(pImg->pszFilename);
8112 RTMemFree(pImg);
8113 pImg = pTmp;
8114 }
8115
8116 /* Make sure destination image is back to read only if necessary. */
8117 if (pImageTo != pDisk->pLast)
8118 {
8119 uOpenFlags = pImageTo->Backend->pfnGetOpenFlags(pImageTo->pBackendData);
8120 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
8121 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
8122 uOpenFlags);
8123 if (RT_FAILURE(rc))
8124 break;
8125 }
8126
8127 /*
8128 * Make sure the child is readonly
8129 * for the child -> parent merge direction
8130 * if necessary.
8131 */
8132 if ( nImageFrom > nImageTo
8133 && pImageChild
8134 && pImageChild != pDisk->pLast)
8135 {
8136 uOpenFlags = pImageChild->Backend->pfnGetOpenFlags(pImageChild->pBackendData);
8137 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
8138 rc = pImageChild->Backend->pfnSetOpenFlags(pImageChild->pBackendData,
8139 uOpenFlags);
8140 if (RT_FAILURE(rc))
8141 break;
8142 }
8143 } while (0);
8144
8145 if (RT_UNLIKELY(fLockWrite))
8146 {
8147 rc2 = vdThreadFinishWrite(pDisk);
8148 AssertRC(rc2);
8149 }
8150 else if (RT_UNLIKELY(fLockRead))
8151 {
8152 rc2 = vdThreadFinishRead(pDisk);
8153 AssertRC(rc2);
8154 }
8155
8156 if (pvBuf)
8157 RTMemTmpFree(pvBuf);
8158
8159 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
8160 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8161
8162 LogFlowFunc(("returns %Rrc\n", rc));
8163 return rc;
8164}
8165
8166/**
8167 * Copies an image from one HDD container to another - extended version.
8168 * The copy is opened in the target HDD container.
8169 * It is possible to convert between different image formats, because the
8170 * backend for the destination may be different from the source.
8171 * If both the source and destination reference the same HDD container,
8172 * then the image is moved (by copying/deleting or renaming) to the new location.
8173 * The source container is unchanged if the move operation fails, otherwise
8174 * the image at the new location is opened in the same way as the old one was.
8175 *
8176 * @note The read/write accesses across disks are not synchronized, just the
8177 * accesses to each disk. Once there is a use case which requires a defined
8178 * read/write behavior in this situation this needs to be extended.
8179 *
8180 * @returns VBox status code.
8181 * @retval VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8182 * @param pDiskFrom Pointer to source HDD container.
8183 * @param nImage Image number, counts from 0. 0 is always base image of container.
8184 * @param pDiskTo Pointer to destination HDD container.
8185 * @param pszBackend Name of the image file backend to use (may be NULL to use the same as the source, case insensitive).
8186 * @param pszFilename New name of the image (may be NULL to specify that the
8187 * copy destination is the destination container, or
8188 * if pDiskFrom == pDiskTo, i.e. when moving).
8189 * @param fMoveByRename If true, attempt to perform a move by renaming (if successful the new size is ignored).
8190 * @param cbSize New image size (0 means leave unchanged).
8191 * @param nImageFromSame todo
8192 * @param nImageToSame todo
8193 * @param uImageFlags Flags specifying special destination image features.
8194 * @param pDstUuid New UUID of the destination image. If NULL, a new UUID is created.
8195 * This parameter is used if and only if a true copy is created.
8196 * In all rename/move cases or copy to existing image cases the modification UUIDs are copied over.
8197 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
8198 * Only used if the destination image is created.
8199 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8200 * @param pDstVDIfsImage Pointer to the per-image VD interface list, for the
8201 * destination image.
8202 * @param pDstVDIfsOperation Pointer to the per-operation VD interface list,
8203 * for the destination operation.
8204 */
8205VBOXDDU_DECL(int) VDCopyEx(PVBOXHDD pDiskFrom, unsigned nImage, PVBOXHDD pDiskTo,
8206 const char *pszBackend, const char *pszFilename,
8207 bool fMoveByRename, uint64_t cbSize,
8208 unsigned nImageFromSame, unsigned nImageToSame,
8209 unsigned uImageFlags, PCRTUUID pDstUuid,
8210 unsigned uOpenFlags, PVDINTERFACE pVDIfsOperation,
8211 PVDINTERFACE pDstVDIfsImage,
8212 PVDINTERFACE pDstVDIfsOperation)
8213{
8214 int rc = VINF_SUCCESS;
8215 int rc2;
8216 bool fLockReadFrom = false, fLockWriteFrom = false, fLockWriteTo = false;
8217 PVDIMAGE pImageTo = NULL;
8218
8219 LogFlowFunc(("pDiskFrom=%#p nImage=%u pDiskTo=%#p pszBackend=\"%s\" pszFilename=\"%s\" fMoveByRename=%d cbSize=%llu nImageFromSame=%u nImageToSame=%u uImageFlags=%#x pDstUuid=%#p uOpenFlags=%#x pVDIfsOperation=%#p pDstVDIfsImage=%#p pDstVDIfsOperation=%#p\n",
8220 pDiskFrom, nImage, pDiskTo, pszBackend, pszFilename, fMoveByRename, cbSize, nImageFromSame, nImageToSame, uImageFlags, pDstUuid, uOpenFlags, pVDIfsOperation, pDstVDIfsImage, pDstVDIfsOperation));
8221
8222 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8223 PVDINTERFACEPROGRESS pDstIfProgress = VDIfProgressGet(pDstVDIfsOperation);
8224
8225 do {
8226 /* Check arguments. */
8227 AssertMsgBreakStmt(VALID_PTR(pDiskFrom), ("pDiskFrom=%#p\n", pDiskFrom),
8228 rc = VERR_INVALID_PARAMETER);
8229 AssertMsg(pDiskFrom->u32Signature == VBOXHDDDISK_SIGNATURE,
8230 ("u32Signature=%08x\n", pDiskFrom->u32Signature));
8231
8232 rc2 = vdThreadStartRead(pDiskFrom);
8233 AssertRC(rc2);
8234 fLockReadFrom = true;
8235 PVDIMAGE pImageFrom = vdGetImageByNumber(pDiskFrom, nImage);
8236 AssertPtrBreakStmt(pImageFrom, rc = VERR_VD_IMAGE_NOT_FOUND);
8237 AssertMsgBreakStmt(VALID_PTR(pDiskTo), ("pDiskTo=%#p\n", pDiskTo),
8238 rc = VERR_INVALID_PARAMETER);
8239 AssertMsg(pDiskTo->u32Signature == VBOXHDDDISK_SIGNATURE,
8240 ("u32Signature=%08x\n", pDiskTo->u32Signature));
8241 AssertMsgBreakStmt( (nImageFromSame < nImage || nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN)
8242 && (nImageToSame < pDiskTo->cImages || nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8243 && ( (nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN && nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8244 || (nImageFromSame != VD_IMAGE_CONTENT_UNKNOWN && nImageToSame != VD_IMAGE_CONTENT_UNKNOWN)),
8245 ("nImageFromSame=%u nImageToSame=%u\n", nImageFromSame, nImageToSame),
8246 rc = VERR_INVALID_PARAMETER);
8247
8248 /* Move the image. */
8249 if (pDiskFrom == pDiskTo)
8250 {
8251 /* Rename only works when backends are the same, are file based
8252 * and the rename method is implemented. */
8253 if ( fMoveByRename
8254 && !RTStrICmp(pszBackend, pImageFrom->Backend->pszBackendName)
8255 && pImageFrom->Backend->uBackendCaps & VD_CAP_FILE
8256 && pImageFrom->Backend->pfnRename)
8257 {
8258 rc2 = vdThreadFinishRead(pDiskFrom);
8259 AssertRC(rc2);
8260 fLockReadFrom = false;
8261
8262 rc2 = vdThreadStartWrite(pDiskFrom);
8263 AssertRC(rc2);
8264 fLockWriteFrom = true;
8265 rc = pImageFrom->Backend->pfnRename(pImageFrom->pBackendData, pszFilename ? pszFilename : pImageFrom->pszFilename);
8266 break;
8267 }
8268
8269 /** @todo Moving (including shrinking/growing) of the image is
8270 * requested, but the rename attempt failed or it wasn't possible.
8271 * Must now copy image to temp location. */
8272 AssertReleaseMsgFailed(("VDCopy: moving by copy/delete not implemented\n"));
8273 }
8274
8275 /* pszFilename is allowed to be NULL, as this indicates copy to the existing image. */
8276 AssertMsgBreakStmt(pszFilename == NULL || (VALID_PTR(pszFilename) && *pszFilename),
8277 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
8278 rc = VERR_INVALID_PARAMETER);
8279
8280 uint64_t cbSizeFrom;
8281 cbSizeFrom = pImageFrom->Backend->pfnGetSize(pImageFrom->pBackendData);
8282 if (cbSizeFrom == 0)
8283 {
8284 rc = VERR_VD_VALUE_NOT_FOUND;
8285 break;
8286 }
8287
8288 VDGEOMETRY PCHSGeometryFrom = {0, 0, 0};
8289 VDGEOMETRY LCHSGeometryFrom = {0, 0, 0};
8290 pImageFrom->Backend->pfnGetPCHSGeometry(pImageFrom->pBackendData, &PCHSGeometryFrom);
8291 pImageFrom->Backend->pfnGetLCHSGeometry(pImageFrom->pBackendData, &LCHSGeometryFrom);
8292
8293 RTUUID ImageUuid, ImageModificationUuid;
8294 if (pDiskFrom != pDiskTo)
8295 {
8296 if (pDstUuid)
8297 ImageUuid = *pDstUuid;
8298 else
8299 RTUuidCreate(&ImageUuid);
8300 }
8301 else
8302 {
8303 rc = pImageFrom->Backend->pfnGetUuid(pImageFrom->pBackendData, &ImageUuid);
8304 if (RT_FAILURE(rc))
8305 RTUuidCreate(&ImageUuid);
8306 }
8307 rc = pImageFrom->Backend->pfnGetModificationUuid(pImageFrom->pBackendData, &ImageModificationUuid);
8308 if (RT_FAILURE(rc))
8309 RTUuidClear(&ImageModificationUuid);
8310
8311 char szComment[1024];
8312 rc = pImageFrom->Backend->pfnGetComment(pImageFrom->pBackendData, szComment, sizeof(szComment));
8313 if (RT_FAILURE(rc))
8314 szComment[0] = '\0';
8315 else
8316 szComment[sizeof(szComment) - 1] = '\0';
8317
8318 rc2 = vdThreadFinishRead(pDiskFrom);
8319 AssertRC(rc2);
8320 fLockReadFrom = false;
8321
8322 rc2 = vdThreadStartRead(pDiskTo);
8323 AssertRC(rc2);
8324 unsigned cImagesTo = pDiskTo->cImages;
8325 rc2 = vdThreadFinishRead(pDiskTo);
8326 AssertRC(rc2);
8327
8328 if (pszFilename)
8329 {
8330 if (cbSize == 0)
8331 cbSize = cbSizeFrom;
8332
8333 /* Create destination image with the properties of source image. */
8334 /** @todo replace the VDCreateDiff/VDCreateBase calls by direct
8335 * calls to the backend. Unifies the code and reduces the API
8336 * dependencies. Would also make the synchronization explicit. */
8337 if (cImagesTo > 0)
8338 {
8339 rc = VDCreateDiff(pDiskTo, pszBackend, pszFilename,
8340 uImageFlags, szComment, &ImageUuid,
8341 NULL /* pParentUuid */,
8342 uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
8343 pDstVDIfsImage, NULL);
8344
8345 rc2 = vdThreadStartWrite(pDiskTo);
8346 AssertRC(rc2);
8347 fLockWriteTo = true;
8348 } else {
8349 /** @todo hack to force creation of a fixed image for
8350 * the RAW backend, which can't handle anything else. */
8351 if (!RTStrICmp(pszBackend, "RAW"))
8352 uImageFlags |= VD_IMAGE_FLAGS_FIXED;
8353
8354 vdFixupPCHSGeometry(&PCHSGeometryFrom, cbSize);
8355 vdFixupLCHSGeometry(&LCHSGeometryFrom, cbSize);
8356
8357 rc = VDCreateBase(pDiskTo, pszBackend, pszFilename, cbSize,
8358 uImageFlags, szComment,
8359 &PCHSGeometryFrom, &LCHSGeometryFrom,
8360 NULL, uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
8361 pDstVDIfsImage, NULL);
8362
8363 rc2 = vdThreadStartWrite(pDiskTo);
8364 AssertRC(rc2);
8365 fLockWriteTo = true;
8366
8367 if (RT_SUCCESS(rc) && !RTUuidIsNull(&ImageUuid))
8368 pDiskTo->pLast->Backend->pfnSetUuid(pDiskTo->pLast->pBackendData, &ImageUuid);
8369 }
8370 if (RT_FAILURE(rc))
8371 break;
8372
8373 pImageTo = pDiskTo->pLast;
8374 AssertPtrBreakStmt(pImageTo, rc = VERR_VD_IMAGE_NOT_FOUND);
8375
8376 cbSize = RT_MIN(cbSize, cbSizeFrom);
8377 }
8378 else
8379 {
8380 pImageTo = pDiskTo->pLast;
8381 AssertPtrBreakStmt(pImageTo, rc = VERR_VD_IMAGE_NOT_FOUND);
8382
8383 uint64_t cbSizeTo;
8384 cbSizeTo = pImageTo->Backend->pfnGetSize(pImageTo->pBackendData);
8385 if (cbSizeTo == 0)
8386 {
8387 rc = VERR_VD_VALUE_NOT_FOUND;
8388 break;
8389 }
8390
8391 if (cbSize == 0)
8392 cbSize = RT_MIN(cbSizeFrom, cbSizeTo);
8393
8394 vdFixupPCHSGeometry(&PCHSGeometryFrom, cbSize);
8395 vdFixupLCHSGeometry(&LCHSGeometryFrom, cbSize);
8396
8397 /* Update the geometry in the destination image. */
8398 pImageTo->Backend->pfnSetPCHSGeometry(pImageTo->pBackendData, &PCHSGeometryFrom);
8399 pImageTo->Backend->pfnSetLCHSGeometry(pImageTo->pBackendData, &LCHSGeometryFrom);
8400 }
8401
8402 rc2 = vdThreadFinishWrite(pDiskTo);
8403 AssertRC(rc2);
8404 fLockWriteTo = false;
8405
8406 /* Whether we can take the optimized copy path (false) or not.
8407 * Don't optimize if the image existed or if it is a child image. */
8408 bool fSuppressRedundantIo = ( !(pszFilename == NULL || cImagesTo > 0)
8409 || (nImageToSame != VD_IMAGE_CONTENT_UNKNOWN));
8410 unsigned cImagesFromReadBack, cImagesToReadBack;
8411
8412 if (nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN)
8413 cImagesFromReadBack = 0;
8414 else
8415 {
8416 if (nImage == VD_LAST_IMAGE)
8417 cImagesFromReadBack = pDiskFrom->cImages - nImageFromSame - 1;
8418 else
8419 cImagesFromReadBack = nImage - nImageFromSame;
8420 }
8421
8422 if (nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8423 cImagesToReadBack = 0;
8424 else
8425 cImagesToReadBack = pDiskTo->cImages - nImageToSame - 1;
8426
8427 /* Copy the data. */
8428 rc = vdCopyHelper(pDiskFrom, pImageFrom, pDiskTo, cbSize,
8429 cImagesFromReadBack, cImagesToReadBack,
8430 fSuppressRedundantIo, pIfProgress, pDstIfProgress);
8431
8432 if (RT_SUCCESS(rc))
8433 {
8434 rc2 = vdThreadStartWrite(pDiskTo);
8435 AssertRC(rc2);
8436 fLockWriteTo = true;
8437
8438 /* Only set modification UUID if it is non-null, since the source
8439 * backend might not provide a valid modification UUID. */
8440 if (!RTUuidIsNull(&ImageModificationUuid))
8441 pImageTo->Backend->pfnSetModificationUuid(pImageTo->pBackendData, &ImageModificationUuid);
8442
8443 /* Set the requested open flags if they differ from the value
8444 * required for creating the image and copying the contents. */
8445 if ( pImageTo && pszFilename
8446 && uOpenFlags != (uOpenFlags & ~VD_OPEN_FLAGS_READONLY))
8447 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
8448 uOpenFlags);
8449 }
8450 } while (0);
8451
8452 if (RT_FAILURE(rc) && pImageTo && pszFilename)
8453 {
8454 /* Take the write lock only if it is not taken. Not worth making the
8455 * above code even more complicated. */
8456 if (RT_UNLIKELY(!fLockWriteTo))
8457 {
8458 rc2 = vdThreadStartWrite(pDiskTo);
8459 AssertRC(rc2);
8460 fLockWriteTo = true;
8461 }
8462 /* Error detected, but new image created. Remove image from list. */
8463 vdRemoveImageFromList(pDiskTo, pImageTo);
8464
8465 /* Close and delete image. */
8466 rc2 = pImageTo->Backend->pfnClose(pImageTo->pBackendData, true);
8467 AssertRC(rc2);
8468 pImageTo->pBackendData = NULL;
8469
8470 /* Free remaining resources. */
8471 if (pImageTo->pszFilename)
8472 RTStrFree(pImageTo->pszFilename);
8473
8474 RTMemFree(pImageTo);
8475 }
8476
8477 if (RT_UNLIKELY(fLockWriteTo))
8478 {
8479 rc2 = vdThreadFinishWrite(pDiskTo);
8480 AssertRC(rc2);
8481 }
8482 if (RT_UNLIKELY(fLockWriteFrom))
8483 {
8484 rc2 = vdThreadFinishWrite(pDiskFrom);
8485 AssertRC(rc2);
8486 }
8487 else if (RT_UNLIKELY(fLockReadFrom))
8488 {
8489 rc2 = vdThreadFinishRead(pDiskFrom);
8490 AssertRC(rc2);
8491 }
8492
8493 if (RT_SUCCESS(rc))
8494 {
8495 if (pIfProgress && pIfProgress->pfnProgress)
8496 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8497 if (pDstIfProgress && pDstIfProgress->pfnProgress)
8498 pDstIfProgress->pfnProgress(pDstIfProgress->Core.pvUser, 100);
8499 }
8500
8501 LogFlowFunc(("returns %Rrc\n", rc));
8502 return rc;
8503}
8504
8505/**
8506 * Copies an image from one HDD container to another.
8507 * The copy is opened in the target HDD container.
8508 * It is possible to convert between different image formats, because the
8509 * backend for the destination may be different from the source.
8510 * If both the source and destination reference the same HDD container,
8511 * then the image is moved (by copying/deleting or renaming) to the new location.
8512 * The source container is unchanged if the move operation fails, otherwise
8513 * the image at the new location is opened in the same way as the old one was.
8514 *
8515 * @returns VBox status code.
8516 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8517 * @param pDiskFrom Pointer to source HDD container.
8518 * @param nImage Image number, counts from 0. 0 is always base image of container.
8519 * @param pDiskTo Pointer to destination HDD container.
8520 * @param pszBackend Name of the image file backend to use.
8521 * @param pszFilename New name of the image (may be NULL if pDiskFrom == pDiskTo).
8522 * @param fMoveByRename If true, attempt to perform a move by renaming (if successful the new size is ignored).
8523 * @param cbSize New image size (0 means leave unchanged).
8524 * @param uImageFlags Flags specifying special destination image features.
8525 * @param pDstUuid New UUID of the destination image. If NULL, a new UUID is created.
8526 * This parameter is used if and only if a true copy is created.
8527 * In all rename/move cases the UUIDs are copied over.
8528 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
8529 * Only used if the destination image is created.
8530 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8531 * @param pDstVDIfsImage Pointer to the per-image VD interface list, for the
8532 * destination image.
8533 * @param pDstVDIfsOperation Pointer to the per-image VD interface list,
8534 * for the destination image.
8535 */
8536VBOXDDU_DECL(int) VDCopy(PVBOXHDD pDiskFrom, unsigned nImage, PVBOXHDD pDiskTo,
8537 const char *pszBackend, const char *pszFilename,
8538 bool fMoveByRename, uint64_t cbSize,
8539 unsigned uImageFlags, PCRTUUID pDstUuid,
8540 unsigned uOpenFlags, PVDINTERFACE pVDIfsOperation,
8541 PVDINTERFACE pDstVDIfsImage,
8542 PVDINTERFACE pDstVDIfsOperation)
8543{
8544 return VDCopyEx(pDiskFrom, nImage, pDiskTo, pszBackend, pszFilename, fMoveByRename,
8545 cbSize, VD_IMAGE_CONTENT_UNKNOWN, VD_IMAGE_CONTENT_UNKNOWN,
8546 uImageFlags, pDstUuid, uOpenFlags, pVDIfsOperation,
8547 pDstVDIfsImage, pDstVDIfsOperation);
8548}
8549
8550/**
8551 * Optimizes the storage consumption of an image. Typically the unused blocks
8552 * have to be wiped with zeroes to achieve a substantial reduced storage use.
8553 * Another optimization done is reordering the image blocks, which can provide
8554 * a significant performance boost, as reads and writes tend to use less random
8555 * file offsets.
8556 *
8557 * @return VBox status code.
8558 * @return VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8559 * @return VERR_VD_IMAGE_READ_ONLY if image is not writable.
8560 * @return VERR_NOT_SUPPORTED if this kind of image can be compacted, but
8561 * the code for this isn't implemented yet.
8562 * @param pDisk Pointer to HDD container.
8563 * @param nImage Image number, counts from 0. 0 is always base image of container.
8564 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8565 */
8566VBOXDDU_DECL(int) VDCompact(PVBOXHDD pDisk, unsigned nImage,
8567 PVDINTERFACE pVDIfsOperation)
8568{
8569 int rc = VINF_SUCCESS;
8570 int rc2;
8571 bool fLockRead = false, fLockWrite = false;
8572 void *pvBuf = NULL;
8573 void *pvTmp = NULL;
8574
8575 LogFlowFunc(("pDisk=%#p nImage=%u pVDIfsOperation=%#p\n",
8576 pDisk, nImage, pVDIfsOperation));
8577
8578 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8579
8580 do {
8581 /* Check arguments. */
8582 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8583 rc = VERR_INVALID_PARAMETER);
8584 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8585 ("u32Signature=%08x\n", pDisk->u32Signature));
8586
8587 rc2 = vdThreadStartRead(pDisk);
8588 AssertRC(rc2);
8589 fLockRead = true;
8590
8591 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
8592 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
8593
8594 /* If there is no compact callback for not file based backends then
8595 * the backend doesn't need compaction. No need to make much fuss about
8596 * this. For file based ones signal this as not yet supported. */
8597 if (!pImage->Backend->pfnCompact)
8598 {
8599 if (pImage->Backend->uBackendCaps & VD_CAP_FILE)
8600 rc = VERR_NOT_SUPPORTED;
8601 else
8602 rc = VINF_SUCCESS;
8603 break;
8604 }
8605
8606 /* Insert interface for reading parent state into per-operation list,
8607 * if there is a parent image. */
8608 VDINTERFACEPARENTSTATE VDIfParent;
8609 VDPARENTSTATEDESC ParentUser;
8610 if (pImage->pPrev)
8611 {
8612 VDIfParent.pfnParentRead = vdParentRead;
8613 ParentUser.pDisk = pDisk;
8614 ParentUser.pImage = pImage->pPrev;
8615 rc = VDInterfaceAdd(&VDIfParent.Core, "VDCompact_ParentState", VDINTERFACETYPE_PARENTSTATE,
8616 &ParentUser, sizeof(VDINTERFACEPARENTSTATE), &pVDIfsOperation);
8617 AssertRC(rc);
8618 }
8619
8620 rc2 = vdThreadFinishRead(pDisk);
8621 AssertRC(rc2);
8622 fLockRead = false;
8623
8624 rc2 = vdThreadStartWrite(pDisk);
8625 AssertRC(rc2);
8626 fLockWrite = true;
8627
8628 rc = pImage->Backend->pfnCompact(pImage->pBackendData,
8629 0, 99,
8630 pDisk->pVDIfsDisk,
8631 pImage->pVDIfsImage,
8632 pVDIfsOperation);
8633 } while (0);
8634
8635 if (RT_UNLIKELY(fLockWrite))
8636 {
8637 rc2 = vdThreadFinishWrite(pDisk);
8638 AssertRC(rc2);
8639 }
8640 else if (RT_UNLIKELY(fLockRead))
8641 {
8642 rc2 = vdThreadFinishRead(pDisk);
8643 AssertRC(rc2);
8644 }
8645
8646 if (pvBuf)
8647 RTMemTmpFree(pvBuf);
8648 if (pvTmp)
8649 RTMemTmpFree(pvTmp);
8650
8651 if (RT_SUCCESS(rc))
8652 {
8653 if (pIfProgress && pIfProgress->pfnProgress)
8654 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8655 }
8656
8657 LogFlowFunc(("returns %Rrc\n", rc));
8658 return rc;
8659}
8660
8661/**
8662 * Resizes the given disk image to the given size.
8663 *
8664 * @return VBox status
8665 * @return VERR_VD_IMAGE_READ_ONLY if image is not writable.
8666 * @return VERR_NOT_SUPPORTED if this kind of image can be compacted, but
8667 *
8668 * @param pDisk Pointer to the HDD container.
8669 * @param cbSize New size of the image.
8670 * @param pPCHSGeometry Pointer to the new physical disk geometry <= (16383,16,63). Not NULL.
8671 * @param pLCHSGeometry Pointer to the new logical disk geometry <= (x,255,63). Not NULL.
8672 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8673 */
8674VBOXDDU_DECL(int) VDResize(PVBOXHDD pDisk, uint64_t cbSize,
8675 PCVDGEOMETRY pPCHSGeometry,
8676 PCVDGEOMETRY pLCHSGeometry,
8677 PVDINTERFACE pVDIfsOperation)
8678{
8679 /** @todo r=klaus resizing was designed to be part of VDCopy, so having a separate function is not desirable. */
8680 int rc = VINF_SUCCESS;
8681 int rc2;
8682 bool fLockRead = false, fLockWrite = false;
8683
8684 LogFlowFunc(("pDisk=%#p cbSize=%llu pVDIfsOperation=%#p\n",
8685 pDisk, cbSize, pVDIfsOperation));
8686
8687 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8688
8689 do {
8690 /* Check arguments. */
8691 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8692 rc = VERR_INVALID_PARAMETER);
8693 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8694 ("u32Signature=%08x\n", pDisk->u32Signature));
8695
8696 rc2 = vdThreadStartRead(pDisk);
8697 AssertRC(rc2);
8698 fLockRead = true;
8699
8700 /* Must have at least one image in the chain, will resize last. */
8701 AssertMsgBreakStmt(pDisk->cImages >= 1, ("cImages=%u\n", pDisk->cImages),
8702 rc = VERR_NOT_SUPPORTED);
8703
8704 PVDIMAGE pImage = pDisk->pLast;
8705
8706 /* If there is no compact callback for not file based backends then
8707 * the backend doesn't need compaction. No need to make much fuss about
8708 * this. For file based ones signal this as not yet supported. */
8709 if (!pImage->Backend->pfnResize)
8710 {
8711 if (pImage->Backend->uBackendCaps & VD_CAP_FILE)
8712 rc = VERR_NOT_SUPPORTED;
8713 else
8714 rc = VINF_SUCCESS;
8715 break;
8716 }
8717
8718 rc2 = vdThreadFinishRead(pDisk);
8719 AssertRC(rc2);
8720 fLockRead = false;
8721
8722 rc2 = vdThreadStartWrite(pDisk);
8723 AssertRC(rc2);
8724 fLockWrite = true;
8725
8726 VDGEOMETRY PCHSGeometryOld;
8727 VDGEOMETRY LCHSGeometryOld;
8728 PCVDGEOMETRY pPCHSGeometryNew;
8729 PCVDGEOMETRY pLCHSGeometryNew;
8730
8731 if (pPCHSGeometry->cCylinders == 0)
8732 {
8733 /* Auto-detect marker, calculate new value ourself. */
8734 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData, &PCHSGeometryOld);
8735 if (RT_SUCCESS(rc) && (PCHSGeometryOld.cCylinders != 0))
8736 PCHSGeometryOld.cCylinders = RT_MIN(cbSize / 512 / PCHSGeometryOld.cHeads / PCHSGeometryOld.cSectors, 16383);
8737 else if (rc == VERR_VD_GEOMETRY_NOT_SET)
8738 rc = VINF_SUCCESS;
8739
8740 pPCHSGeometryNew = &PCHSGeometryOld;
8741 }
8742 else
8743 pPCHSGeometryNew = pPCHSGeometry;
8744
8745 if (pLCHSGeometry->cCylinders == 0)
8746 {
8747 /* Auto-detect marker, calculate new value ourself. */
8748 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData, &LCHSGeometryOld);
8749 if (RT_SUCCESS(rc) && (LCHSGeometryOld.cCylinders != 0))
8750 LCHSGeometryOld.cCylinders = cbSize / 512 / LCHSGeometryOld.cHeads / LCHSGeometryOld.cSectors;
8751 else if (rc == VERR_VD_GEOMETRY_NOT_SET)
8752 rc = VINF_SUCCESS;
8753
8754 pLCHSGeometryNew = &LCHSGeometryOld;
8755 }
8756 else
8757 pLCHSGeometryNew = pLCHSGeometry;
8758
8759 if (RT_SUCCESS(rc))
8760 rc = pImage->Backend->pfnResize(pImage->pBackendData,
8761 cbSize,
8762 pPCHSGeometryNew,
8763 pLCHSGeometryNew,
8764 0, 99,
8765 pDisk->pVDIfsDisk,
8766 pImage->pVDIfsImage,
8767 pVDIfsOperation);
8768 } while (0);
8769
8770 if (RT_UNLIKELY(fLockWrite))
8771 {
8772 rc2 = vdThreadFinishWrite(pDisk);
8773 AssertRC(rc2);
8774 }
8775 else if (RT_UNLIKELY(fLockRead))
8776 {
8777 rc2 = vdThreadFinishRead(pDisk);
8778 AssertRC(rc2);
8779 }
8780
8781 if (RT_SUCCESS(rc))
8782 {
8783 if (pIfProgress && pIfProgress->pfnProgress)
8784 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8785
8786 pDisk->cbSize = cbSize;
8787 }
8788
8789 LogFlowFunc(("returns %Rrc\n", rc));
8790 return rc;
8791}
8792
8793VBOXDDU_DECL(int) VDPrepareWithFilters(PVBOXHDD pDisk, PVDINTERFACE pVDIfsOperation)
8794{
8795 int rc = VINF_SUCCESS;
8796 int rc2;
8797 bool fLockRead = false, fLockWrite = false;
8798
8799 LogFlowFunc(("pDisk=%#p pVDIfsOperation=%#p\n", pDisk, pVDIfsOperation));
8800
8801 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8802
8803 do {
8804 /* Check arguments. */
8805 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8806 rc = VERR_INVALID_PARAMETER);
8807 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8808 ("u32Signature=%08x\n", pDisk->u32Signature));
8809
8810 rc2 = vdThreadStartRead(pDisk);
8811 AssertRC(rc2);
8812 fLockRead = true;
8813
8814 /* Must have at least one image in the chain. */
8815 AssertMsgBreakStmt(pDisk->cImages >= 1, ("cImages=%u\n", pDisk->cImages),
8816 rc = VERR_VD_NOT_OPENED);
8817
8818 unsigned uOpenFlags = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
8819 AssertMsgBreakStmt(!(uOpenFlags & VD_OPEN_FLAGS_READONLY),
8820 ("Last image should be read write"),
8821 rc = VERR_VD_IMAGE_READ_ONLY);
8822
8823 rc2 = vdThreadFinishRead(pDisk);
8824 AssertRC(rc2);
8825 fLockRead = false;
8826
8827 rc2 = vdThreadStartWrite(pDisk);
8828 AssertRC(rc2);
8829 fLockWrite = true;
8830
8831 /*
8832 * Open all images in the chain in read write mode first to avoid running
8833 * into an error in the middle of the process.
8834 */
8835 PVDIMAGE pImage = pDisk->pBase;
8836
8837 while (pImage)
8838 {
8839 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
8840 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
8841 {
8842 /*
8843 * Clear skip consistency checks because the image is made writable now and
8844 * skipping consistency checks is only possible for readonly images.
8845 */
8846 uOpenFlags &= ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS);
8847 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
8848 if (RT_FAILURE(rc))
8849 break;
8850 }
8851 pImage = pImage->pNext;
8852 }
8853
8854 if (RT_SUCCESS(rc))
8855 {
8856 unsigned cImgCur = 0;
8857 unsigned uPercentStart = 0;
8858 unsigned uPercentSpan = 100 / pDisk->cImages - 1;
8859
8860 /* Allocate tmp buffer. */
8861 void *pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
8862 if (!pvBuf)
8863 {
8864 rc = VERR_NO_MEMORY;
8865 break;
8866 }
8867
8868 pImage = pDisk->pBase;
8869 pDisk->fLocked = true;
8870
8871 while ( pImage
8872 && RT_SUCCESS(rc))
8873 {
8874 /* Get size of image. */
8875 uint64_t cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
8876 uint64_t cbSizeFile = pImage->Backend->pfnGetFileSize(pImage->pBackendData);
8877 uint64_t cbFileWritten = 0;
8878 uint64_t uOffset = 0;
8879 uint64_t cbRemaining = cbSize;
8880
8881 do
8882 {
8883 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
8884 RTSGSEG SegmentBuf;
8885 RTSGBUF SgBuf;
8886 VDIOCTX IoCtx;
8887
8888 SegmentBuf.pvSeg = pvBuf;
8889 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
8890 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
8891 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
8892 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
8893
8894 rc = pImage->Backend->pfnRead(pImage->pBackendData, uOffset,
8895 cbThisRead, &IoCtx, &cbThisRead);
8896 if (rc != VERR_VD_BLOCK_FREE)
8897 {
8898 if (RT_FAILURE(rc))
8899 break;
8900
8901 /* Apply filter chains. */
8902 rc = vdFilterChainApplyRead(pDisk, uOffset, cbThisRead, &IoCtx);
8903 if (RT_FAILURE(rc))
8904 break;
8905
8906 rc = vdFilterChainApplyWrite(pDisk, uOffset, cbThisRead, &IoCtx);
8907 if (RT_FAILURE(rc))
8908 break;
8909
8910 RTSgBufReset(&SgBuf);
8911 size_t cbThisWrite = 0;
8912 size_t cbPreRead = 0;
8913 size_t cbPostRead = 0;
8914 rc = pImage->Backend->pfnWrite(pImage->pBackendData, uOffset,
8915 cbThisRead, &IoCtx, &cbThisWrite,
8916 &cbPreRead, &cbPostRead, 0);
8917 if (RT_FAILURE(rc))
8918 break;
8919 Assert(cbThisWrite == cbThisRead);
8920 cbFileWritten += cbThisWrite;
8921 }
8922 else
8923 rc = VINF_SUCCESS;
8924
8925 uOffset += cbThisRead;
8926 cbRemaining -= cbThisRead;
8927
8928 if (pIfProgress && pIfProgress->pfnProgress)
8929 {
8930 rc2 = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
8931 uPercentStart + cbFileWritten * uPercentSpan / cbSizeFile);
8932 AssertRC(rc2); /* Cancelling this operation without leaving an inconsistent state is not possible. */
8933 }
8934 } while (uOffset < cbSize);
8935
8936 pImage = pImage->pNext;
8937 cImgCur++;
8938 uPercentStart += uPercentSpan;
8939 }
8940
8941 pDisk->fLocked = false;
8942 if (pvBuf)
8943 RTMemTmpFree(pvBuf);
8944 }
8945
8946 /* Change images except last one back to readonly. */
8947 pImage = pDisk->pBase;
8948 while ( pImage != pDisk->pLast
8949 && pImage)
8950 {
8951 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
8952 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
8953 rc2 = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
8954 if (RT_FAILURE(rc2))
8955 {
8956 if (RT_SUCCESS(rc))
8957 rc = rc2;
8958 break;
8959 }
8960 pImage = pImage->pNext;
8961 }
8962 } while (0);
8963
8964 if (RT_UNLIKELY(fLockWrite))
8965 {
8966 rc2 = vdThreadFinishWrite(pDisk);
8967 AssertRC(rc2);
8968 }
8969 else if (RT_UNLIKELY(fLockRead))
8970 {
8971 rc2 = vdThreadFinishRead(pDisk);
8972 AssertRC(rc2);
8973 }
8974
8975 if ( RT_SUCCESS(rc)
8976 && pIfProgress
8977 && pIfProgress->pfnProgress)
8978 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8979
8980 LogFlowFunc(("returns %Rrc\n", rc));
8981 return rc;
8982}
8983
8984/**
8985 * Closes the last opened image file in HDD container.
8986 * If previous image file was opened in read-only mode (the normal case) and
8987 * the last opened image is in read-write mode then the previous image will be
8988 * reopened in read/write mode.
8989 *
8990 * @returns VBox status code.
8991 * @returns VERR_VD_NOT_OPENED if no image is opened in HDD container.
8992 * @param pDisk Pointer to HDD container.
8993 * @param fDelete If true, delete the image from the host disk.
8994 */
8995VBOXDDU_DECL(int) VDClose(PVBOXHDD pDisk, bool fDelete)
8996{
8997 int rc = VINF_SUCCESS;
8998 int rc2;
8999 bool fLockWrite = false;
9000
9001 LogFlowFunc(("pDisk=%#p fDelete=%d\n", pDisk, fDelete));
9002 do
9003 {
9004 /* sanity check */
9005 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9006 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9007
9008 /* Not worth splitting this up into a read lock phase and write
9009 * lock phase, as closing an image is a relatively fast operation
9010 * dominated by the part which needs the write lock. */
9011 rc2 = vdThreadStartWrite(pDisk);
9012 AssertRC(rc2);
9013 fLockWrite = true;
9014
9015 PVDIMAGE pImage = pDisk->pLast;
9016 if (!pImage)
9017 {
9018 rc = VERR_VD_NOT_OPENED;
9019 break;
9020 }
9021
9022 /* Destroy the current discard state first which might still have pending blocks. */
9023 rc = vdDiscardStateDestroy(pDisk);
9024 if (RT_FAILURE(rc))
9025 break;
9026
9027 unsigned uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
9028 /* Remove image from list of opened images. */
9029 vdRemoveImageFromList(pDisk, pImage);
9030 /* Close (and optionally delete) image. */
9031 rc = pImage->Backend->pfnClose(pImage->pBackendData, fDelete);
9032 /* Free remaining resources related to the image. */
9033 RTStrFree(pImage->pszFilename);
9034 RTMemFree(pImage);
9035
9036 pImage = pDisk->pLast;
9037 if (!pImage)
9038 break;
9039
9040 /* If disk was previously in read/write mode, make sure it will stay
9041 * like this (if possible) after closing this image. Set the open flags
9042 * accordingly. */
9043 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
9044 {
9045 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
9046 uOpenFlags &= ~ VD_OPEN_FLAGS_READONLY;
9047 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
9048 }
9049
9050 /* Cache disk information. */
9051 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
9052
9053 /* Cache PCHS geometry. */
9054 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9055 &pDisk->PCHSGeometry);
9056 if (RT_FAILURE(rc2))
9057 {
9058 pDisk->PCHSGeometry.cCylinders = 0;
9059 pDisk->PCHSGeometry.cHeads = 0;
9060 pDisk->PCHSGeometry.cSectors = 0;
9061 }
9062 else
9063 {
9064 /* Make sure the PCHS geometry is properly clipped. */
9065 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
9066 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
9067 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
9068 }
9069
9070 /* Cache LCHS geometry. */
9071 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
9072 &pDisk->LCHSGeometry);
9073 if (RT_FAILURE(rc2))
9074 {
9075 pDisk->LCHSGeometry.cCylinders = 0;
9076 pDisk->LCHSGeometry.cHeads = 0;
9077 pDisk->LCHSGeometry.cSectors = 0;
9078 }
9079 else
9080 {
9081 /* Make sure the LCHS geometry is properly clipped. */
9082 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
9083 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
9084 }
9085 } while (0);
9086
9087 if (RT_UNLIKELY(fLockWrite))
9088 {
9089 rc2 = vdThreadFinishWrite(pDisk);
9090 AssertRC(rc2);
9091 }
9092
9093 LogFlowFunc(("returns %Rrc\n", rc));
9094 return rc;
9095}
9096
9097/**
9098 * Closes the currently opened cache image file in HDD container.
9099 *
9100 * @return VBox status code.
9101 * @return VERR_VD_NOT_OPENED if no cache is opened in HDD container.
9102 * @param pDisk Pointer to HDD container.
9103 * @param fDelete If true, delete the image from the host disk.
9104 */
9105VBOXDDU_DECL(int) VDCacheClose(PVBOXHDD pDisk, bool fDelete)
9106{
9107 int rc = VINF_SUCCESS;
9108 int rc2;
9109 bool fLockWrite = false;
9110 PVDCACHE pCache = NULL;
9111
9112 LogFlowFunc(("pDisk=%#p fDelete=%d\n", pDisk, fDelete));
9113
9114 do
9115 {
9116 /* sanity check */
9117 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9118 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9119
9120 rc2 = vdThreadStartWrite(pDisk);
9121 AssertRC(rc2);
9122 fLockWrite = true;
9123
9124 AssertPtrBreakStmt(pDisk->pCache, rc = VERR_VD_CACHE_NOT_FOUND);
9125
9126 pCache = pDisk->pCache;
9127 pDisk->pCache = NULL;
9128
9129 pCache->Backend->pfnClose(pCache->pBackendData, fDelete);
9130 if (pCache->pszFilename)
9131 RTStrFree(pCache->pszFilename);
9132 RTMemFree(pCache);
9133 } while (0);
9134
9135 if (RT_LIKELY(fLockWrite))
9136 {
9137 rc2 = vdThreadFinishWrite(pDisk);
9138 AssertRC(rc2);
9139 }
9140
9141 LogFlowFunc(("returns %Rrc\n", rc));
9142 return rc;
9143}
9144
9145VBOXDDU_DECL(int) VDFilterRemove(PVBOXHDD pDisk, uint32_t fFlags)
9146{
9147 int rc = VINF_SUCCESS;
9148 int rc2;
9149 bool fLockWrite = false;
9150 PVDFILTER pFilter = NULL;
9151
9152 LogFlowFunc(("pDisk=%#p\n", pDisk));
9153
9154 do
9155 {
9156 /* sanity check */
9157 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9158 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9159
9160 AssertMsgBreakStmt(!(fFlags & ~VD_FILTER_FLAGS_MASK),
9161 ("Invalid flags set (fFlags=%#x)\n", fFlags),
9162 rc = VERR_INVALID_PARAMETER);
9163
9164 rc2 = vdThreadStartWrite(pDisk);
9165 AssertRC(rc2);
9166 fLockWrite = true;
9167
9168 if (fFlags & VD_FILTER_FLAGS_WRITE)
9169 {
9170 AssertBreakStmt(!RTListIsEmpty(&pDisk->ListFilterChainWrite), rc = VERR_VD_NOT_OPENED);
9171 pFilter = RTListGetLast(&pDisk->ListFilterChainWrite, VDFILTER, ListNodeChainWrite);
9172 AssertPtr(pFilter);
9173 RTListNodeRemove(&pFilter->ListNodeChainWrite);
9174 vdFilterRelease(pFilter);
9175 }
9176
9177 if (fFlags & VD_FILTER_FLAGS_READ)
9178 {
9179 AssertBreakStmt(!RTListIsEmpty(&pDisk->ListFilterChainRead), rc = VERR_VD_NOT_OPENED);
9180 pFilter = RTListGetLast(&pDisk->ListFilterChainRead, VDFILTER, ListNodeChainRead);
9181 AssertPtr(pFilter);
9182 RTListNodeRemove(&pFilter->ListNodeChainRead);
9183 vdFilterRelease(pFilter);
9184 }
9185 } while (0);
9186
9187 if (RT_LIKELY(fLockWrite))
9188 {
9189 rc2 = vdThreadFinishWrite(pDisk);
9190 AssertRC(rc2);
9191 }
9192
9193 LogFlowFunc(("returns %Rrc\n", rc));
9194 return rc;
9195}
9196
9197/**
9198 * Closes all opened image files in HDD container.
9199 *
9200 * @returns VBox status code.
9201 * @param pDisk Pointer to HDD container.
9202 */
9203VBOXDDU_DECL(int) VDCloseAll(PVBOXHDD pDisk)
9204{
9205 int rc = VINF_SUCCESS;
9206 int rc2;
9207 bool fLockWrite = false;
9208
9209 LogFlowFunc(("pDisk=%#p\n", pDisk));
9210 do
9211 {
9212 /* sanity check */
9213 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9214 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9215
9216 /* Lock the entire operation. */
9217 rc2 = vdThreadStartWrite(pDisk);
9218 AssertRC(rc2);
9219 fLockWrite = true;
9220
9221 PVDCACHE pCache = pDisk->pCache;
9222 if (pCache)
9223 {
9224 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, false);
9225 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
9226 rc = rc2;
9227
9228 if (pCache->pszFilename)
9229 RTStrFree(pCache->pszFilename);
9230 RTMemFree(pCache);
9231 }
9232
9233 PVDIMAGE pImage = pDisk->pLast;
9234 while (VALID_PTR(pImage))
9235 {
9236 PVDIMAGE pPrev = pImage->pPrev;
9237 /* Remove image from list of opened images. */
9238 vdRemoveImageFromList(pDisk, pImage);
9239 /* Close image. */
9240 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, false);
9241 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
9242 rc = rc2;
9243 /* Free remaining resources related to the image. */
9244 RTStrFree(pImage->pszFilename);
9245 RTMemFree(pImage);
9246 pImage = pPrev;
9247 }
9248 Assert(!VALID_PTR(pDisk->pLast));
9249 } while (0);
9250
9251 if (RT_UNLIKELY(fLockWrite))
9252 {
9253 rc2 = vdThreadFinishWrite(pDisk);
9254 AssertRC(rc2);
9255 }
9256
9257 LogFlowFunc(("returns %Rrc\n", rc));
9258 return rc;
9259}
9260
9261/**
9262 * Removes all filters of the given HDD container.
9263 *
9264 * @return VBox status code.
9265 * @param pDisk Pointer to HDD container.
9266 */
9267VBOXDDU_DECL(int) VDFilterRemoveAll(PVBOXHDD pDisk)
9268{
9269 int rc = VINF_SUCCESS;
9270 int rc2;
9271 bool fLockWrite = false;
9272
9273 LogFlowFunc(("pDisk=%#p\n", pDisk));
9274 do
9275 {
9276 /* sanity check */
9277 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9278 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9279
9280 /* Lock the entire operation. */
9281 rc2 = vdThreadStartWrite(pDisk);
9282 AssertRC(rc2);
9283 fLockWrite = true;
9284
9285 PVDFILTER pFilter, pFilterNext;
9286 RTListForEachSafe(&pDisk->ListFilterChainWrite, pFilter, pFilterNext, VDFILTER, ListNodeChainWrite)
9287 {
9288 RTListNodeRemove(&pFilter->ListNodeChainWrite);
9289 vdFilterRelease(pFilter);
9290 }
9291
9292 RTListForEachSafe(&pDisk->ListFilterChainRead, pFilter, pFilterNext, VDFILTER, ListNodeChainRead)
9293 {
9294 RTListNodeRemove(&pFilter->ListNodeChainRead);
9295 vdFilterRelease(pFilter);
9296 }
9297 Assert(RTListIsEmpty(&pDisk->ListFilterChainRead));
9298 Assert(RTListIsEmpty(&pDisk->ListFilterChainWrite));
9299 } while (0);
9300
9301 if (RT_UNLIKELY(fLockWrite))
9302 {
9303 rc2 = vdThreadFinishWrite(pDisk);
9304 AssertRC(rc2);
9305 }
9306
9307 LogFlowFunc(("returns %Rrc\n", rc));
9308 return rc;
9309}
9310
9311/**
9312 * Read data from virtual HDD.
9313 *
9314 * @returns VBox status code.
9315 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9316 * @param pDisk Pointer to HDD container.
9317 * @param uOffset Offset of first reading byte from start of disk.
9318 * @param pvBuf Pointer to buffer for reading data.
9319 * @param cbRead Number of bytes to read.
9320 */
9321VBOXDDU_DECL(int) VDRead(PVBOXHDD pDisk, uint64_t uOffset, void *pvBuf,
9322 size_t cbRead)
9323{
9324 int rc = VINF_SUCCESS;
9325 int rc2;
9326 bool fLockRead = false;
9327
9328 LogFlowFunc(("pDisk=%#p uOffset=%llu pvBuf=%p cbRead=%zu\n",
9329 pDisk, uOffset, pvBuf, cbRead));
9330 do
9331 {
9332 /* sanity check */
9333 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9334 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9335
9336 /* Check arguments. */
9337 AssertMsgBreakStmt(VALID_PTR(pvBuf),
9338 ("pvBuf=%#p\n", pvBuf),
9339 rc = VERR_INVALID_PARAMETER);
9340 AssertMsgBreakStmt(cbRead,
9341 ("cbRead=%zu\n", cbRead),
9342 rc = VERR_INVALID_PARAMETER);
9343
9344 rc2 = vdThreadStartRead(pDisk);
9345 AssertRC(rc2);
9346 fLockRead = true;
9347
9348 PVDIMAGE pImage = pDisk->pLast;
9349 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9350
9351 if (uOffset + cbRead > pDisk->cbSize)
9352 {
9353 /* Floppy images might be smaller than the standard expected by
9354 the floppy controller code. So, we won't fail here. */
9355 AssertMsgBreakStmt(pDisk->enmType == VDTYPE_FLOPPY,
9356 ("uOffset=%llu cbRead=%zu pDisk->cbSize=%llu\n",
9357 uOffset, cbRead, pDisk->cbSize),
9358 rc = VERR_EOF);
9359 memset(pvBuf, 0xf6, cbRead); /* f6h = format.com filler byte */
9360 if (uOffset >= pDisk->cbSize)
9361 break;
9362 cbRead = pDisk->cbSize - uOffset;
9363 }
9364
9365 rc = vdReadHelper(pDisk, pImage, uOffset, pvBuf, cbRead,
9366 true /* fUpdateCache */);
9367 } while (0);
9368
9369 if (RT_UNLIKELY(fLockRead))
9370 {
9371 rc2 = vdThreadFinishRead(pDisk);
9372 AssertRC(rc2);
9373 }
9374
9375 LogFlowFunc(("returns %Rrc\n", rc));
9376 return rc;
9377}
9378
9379/**
9380 * Write data to virtual HDD.
9381 *
9382 * @returns VBox status code.
9383 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9384 * @param pDisk Pointer to HDD container.
9385 * @param uOffset Offset of the first byte being
9386 * written from start of disk.
9387 * @param pvBuf Pointer to buffer for writing data.
9388 * @param cbWrite Number of bytes to write.
9389 */
9390VBOXDDU_DECL(int) VDWrite(PVBOXHDD pDisk, uint64_t uOffset, const void *pvBuf,
9391 size_t cbWrite)
9392{
9393 int rc = VINF_SUCCESS;
9394 int rc2;
9395 bool fLockWrite = false;
9396
9397 LogFlowFunc(("pDisk=%#p uOffset=%llu pvBuf=%p cbWrite=%zu\n",
9398 pDisk, uOffset, pvBuf, cbWrite));
9399 do
9400 {
9401 /* sanity check */
9402 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9403 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9404
9405 /* Check arguments. */
9406 AssertMsgBreakStmt(VALID_PTR(pvBuf),
9407 ("pvBuf=%#p\n", pvBuf),
9408 rc = VERR_INVALID_PARAMETER);
9409 AssertMsgBreakStmt(cbWrite,
9410 ("cbWrite=%zu\n", cbWrite),
9411 rc = VERR_INVALID_PARAMETER);
9412
9413 rc2 = vdThreadStartWrite(pDisk);
9414 AssertRC(rc2);
9415 fLockWrite = true;
9416
9417 AssertMsgBreakStmt(uOffset + cbWrite <= pDisk->cbSize,
9418 ("uOffset=%llu cbWrite=%zu pDisk->cbSize=%llu\n",
9419 uOffset, cbWrite, pDisk->cbSize),
9420 rc = VERR_INVALID_PARAMETER);
9421
9422 PVDIMAGE pImage = pDisk->pLast;
9423 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9424
9425 vdSetModifiedFlag(pDisk);
9426 rc = vdWriteHelper(pDisk, pImage, uOffset, pvBuf, cbWrite,
9427 VDIOCTX_FLAGS_READ_UPDATE_CACHE);
9428 if (RT_FAILURE(rc))
9429 break;
9430
9431 /* If there is a merge (in the direction towards a parent) running
9432 * concurrently then we have to also "relay" the write to this parent,
9433 * as the merge position might be already past the position where
9434 * this write is going. The "context" of the write can come from the
9435 * natural chain, since merging either already did or will take care
9436 * of the "other" content which is might be needed to fill the block
9437 * to a full allocation size. The cache doesn't need to be touched
9438 * as this write is covered by the previous one. */
9439 if (RT_UNLIKELY(pDisk->pImageRelay))
9440 rc = vdWriteHelper(pDisk, pDisk->pImageRelay, uOffset,
9441 pvBuf, cbWrite, VDIOCTX_FLAGS_DEFAULT);
9442 } while (0);
9443
9444 if (RT_UNLIKELY(fLockWrite))
9445 {
9446 rc2 = vdThreadFinishWrite(pDisk);
9447 AssertRC(rc2);
9448 }
9449
9450 LogFlowFunc(("returns %Rrc\n", rc));
9451 return rc;
9452}
9453
9454/**
9455 * Make sure the on disk representation of a virtual HDD is up to date.
9456 *
9457 * @returns VBox status code.
9458 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9459 * @param pDisk Pointer to HDD container.
9460 */
9461VBOXDDU_DECL(int) VDFlush(PVBOXHDD pDisk)
9462{
9463 int rc = VINF_SUCCESS;
9464 int rc2;
9465 bool fLockWrite = false;
9466
9467 LogFlowFunc(("pDisk=%#p\n", pDisk));
9468 do
9469 {
9470 /* sanity check */
9471 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9472 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9473
9474 rc2 = vdThreadStartWrite(pDisk);
9475 AssertRC(rc2);
9476 fLockWrite = true;
9477
9478 PVDIMAGE pImage = pDisk->pLast;
9479 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9480
9481 VDIOCTX IoCtx;
9482 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
9483
9484 rc = RTSemEventCreate(&hEventComplete);
9485 if (RT_FAILURE(rc))
9486 break;
9487
9488 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_FLUSH, 0, 0, pImage, NULL,
9489 NULL, vdFlushHelperAsync, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE);
9490
9491 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
9492 IoCtx.Type.Root.pvUser1 = pDisk;
9493 IoCtx.Type.Root.pvUser2 = hEventComplete;
9494 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
9495
9496 RTSemEventDestroy(hEventComplete);
9497 } while (0);
9498
9499 if (RT_UNLIKELY(fLockWrite))
9500 {
9501 rc2 = vdThreadFinishWrite(pDisk);
9502 AssertRC(rc2);
9503 }
9504
9505 LogFlowFunc(("returns %Rrc\n", rc));
9506 return rc;
9507}
9508
9509/**
9510 * Get number of opened images in HDD container.
9511 *
9512 * @returns Number of opened images for HDD container. 0 if no images have been opened.
9513 * @param pDisk Pointer to HDD container.
9514 */
9515VBOXDDU_DECL(unsigned) VDGetCount(PVBOXHDD pDisk)
9516{
9517 unsigned cImages;
9518 int rc2;
9519 bool fLockRead = false;
9520
9521 LogFlowFunc(("pDisk=%#p\n", pDisk));
9522 do
9523 {
9524 /* sanity check */
9525 AssertPtrBreakStmt(pDisk, cImages = 0);
9526 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9527
9528 rc2 = vdThreadStartRead(pDisk);
9529 AssertRC(rc2);
9530 fLockRead = true;
9531
9532 cImages = pDisk->cImages;
9533 } while (0);
9534
9535 if (RT_UNLIKELY(fLockRead))
9536 {
9537 rc2 = vdThreadFinishRead(pDisk);
9538 AssertRC(rc2);
9539 }
9540
9541 LogFlowFunc(("returns %u\n", cImages));
9542 return cImages;
9543}
9544
9545/**
9546 * Get read/write mode of HDD container.
9547 *
9548 * @returns Virtual disk ReadOnly status.
9549 * @returns true if no image is opened in HDD container.
9550 * @param pDisk Pointer to HDD container.
9551 */
9552VBOXDDU_DECL(bool) VDIsReadOnly(PVBOXHDD pDisk)
9553{
9554 bool fReadOnly;
9555 int rc2;
9556 bool fLockRead = false;
9557
9558 LogFlowFunc(("pDisk=%#p\n", pDisk));
9559 do
9560 {
9561 /* sanity check */
9562 AssertPtrBreakStmt(pDisk, fReadOnly = false);
9563 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9564
9565 rc2 = vdThreadStartRead(pDisk);
9566 AssertRC(rc2);
9567 fLockRead = true;
9568
9569 PVDIMAGE pImage = pDisk->pLast;
9570 AssertPtrBreakStmt(pImage, fReadOnly = true);
9571
9572 unsigned uOpenFlags;
9573 uOpenFlags = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
9574 fReadOnly = !!(uOpenFlags & VD_OPEN_FLAGS_READONLY);
9575 } while (0);
9576
9577 if (RT_UNLIKELY(fLockRead))
9578 {
9579 rc2 = vdThreadFinishRead(pDisk);
9580 AssertRC(rc2);
9581 }
9582
9583 LogFlowFunc(("returns %d\n", fReadOnly));
9584 return fReadOnly;
9585}
9586
9587/**
9588 * Get sector size of an image in HDD container.
9589 *
9590 * @return Virtual disk sector size in bytes.
9591 * @return 0 if image with specified number was not opened.
9592 * @param pDisk Pointer to HDD container.
9593 * @param nImage Image number, counts from 0. 0 is always base image of container.
9594 */
9595VBOXDDU_DECL(uint32_t) VDGetSectorSize(PVBOXHDD pDisk, unsigned nImage)
9596{
9597 uint64_t cbSector;
9598 int rc2;
9599 bool fLockRead = false;
9600
9601 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9602 do
9603 {
9604 /* sanity check */
9605 AssertPtrBreakStmt(pDisk, cbSector = 0);
9606 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9607
9608 rc2 = vdThreadStartRead(pDisk);
9609 AssertRC(rc2);
9610 fLockRead = true;
9611
9612 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9613 AssertPtrBreakStmt(pImage, cbSector = 0);
9614 cbSector = pImage->Backend->pfnGetSectorSize(pImage->pBackendData);
9615 } while (0);
9616
9617 if (RT_UNLIKELY(fLockRead))
9618 {
9619 rc2 = vdThreadFinishRead(pDisk);
9620 AssertRC(rc2);
9621 }
9622
9623 LogFlowFunc(("returns %u\n", cbSector));
9624 return cbSector;
9625}
9626
9627/**
9628 * Get total capacity of an image in HDD container.
9629 *
9630 * @returns Virtual disk size in bytes.
9631 * @returns 0 if no image with specified number was not opened.
9632 * @param pDisk Pointer to HDD container.
9633 * @param nImage Image number, counts from 0. 0 is always base image of container.
9634 */
9635VBOXDDU_DECL(uint64_t) VDGetSize(PVBOXHDD pDisk, unsigned nImage)
9636{
9637 uint64_t cbSize;
9638 int rc2;
9639 bool fLockRead = false;
9640
9641 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9642 do
9643 {
9644 /* sanity check */
9645 AssertPtrBreakStmt(pDisk, cbSize = 0);
9646 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9647
9648 rc2 = vdThreadStartRead(pDisk);
9649 AssertRC(rc2);
9650 fLockRead = true;
9651
9652 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9653 AssertPtrBreakStmt(pImage, cbSize = 0);
9654 cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
9655 } while (0);
9656
9657 if (RT_UNLIKELY(fLockRead))
9658 {
9659 rc2 = vdThreadFinishRead(pDisk);
9660 AssertRC(rc2);
9661 }
9662
9663 LogFlowFunc(("returns %llu\n", cbSize));
9664 return cbSize;
9665}
9666
9667/**
9668 * Get total file size of an image in HDD container.
9669 *
9670 * @returns Virtual disk size in bytes.
9671 * @returns 0 if no image is opened in HDD container.
9672 * @param pDisk Pointer to HDD container.
9673 * @param nImage Image number, counts from 0. 0 is always base image of container.
9674 */
9675VBOXDDU_DECL(uint64_t) VDGetFileSize(PVBOXHDD pDisk, unsigned nImage)
9676{
9677 uint64_t cbSize;
9678 int rc2;
9679 bool fLockRead = false;
9680
9681 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9682 do
9683 {
9684 /* sanity check */
9685 AssertPtrBreakStmt(pDisk, cbSize = 0);
9686 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9687
9688 rc2 = vdThreadStartRead(pDisk);
9689 AssertRC(rc2);
9690 fLockRead = true;
9691
9692 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9693 AssertPtrBreakStmt(pImage, cbSize = 0);
9694 cbSize = pImage->Backend->pfnGetFileSize(pImage->pBackendData);
9695 } while (0);
9696
9697 if (RT_UNLIKELY(fLockRead))
9698 {
9699 rc2 = vdThreadFinishRead(pDisk);
9700 AssertRC(rc2);
9701 }
9702
9703 LogFlowFunc(("returns %llu\n", cbSize));
9704 return cbSize;
9705}
9706
9707/**
9708 * Get virtual disk PCHS geometry stored in HDD container.
9709 *
9710 * @returns VBox status code.
9711 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9712 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9713 * @param pDisk Pointer to HDD container.
9714 * @param nImage Image number, counts from 0. 0 is always base image of container.
9715 * @param pPCHSGeometry Where to store PCHS geometry. Not NULL.
9716 */
9717VBOXDDU_DECL(int) VDGetPCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9718 PVDGEOMETRY pPCHSGeometry)
9719{
9720 int rc = VINF_SUCCESS;
9721 int rc2;
9722 bool fLockRead = false;
9723
9724 LogFlowFunc(("pDisk=%#p nImage=%u pPCHSGeometry=%#p\n",
9725 pDisk, nImage, pPCHSGeometry));
9726 do
9727 {
9728 /* sanity check */
9729 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9730 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9731
9732 /* Check arguments. */
9733 AssertMsgBreakStmt(VALID_PTR(pPCHSGeometry),
9734 ("pPCHSGeometry=%#p\n", pPCHSGeometry),
9735 rc = VERR_INVALID_PARAMETER);
9736
9737 rc2 = vdThreadStartRead(pDisk);
9738 AssertRC(rc2);
9739 fLockRead = true;
9740
9741 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9742 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9743
9744 if (pImage == pDisk->pLast)
9745 {
9746 /* Use cached information if possible. */
9747 if (pDisk->PCHSGeometry.cCylinders != 0)
9748 *pPCHSGeometry = pDisk->PCHSGeometry;
9749 else
9750 rc = VERR_VD_GEOMETRY_NOT_SET;
9751 }
9752 else
9753 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9754 pPCHSGeometry);
9755 } while (0);
9756
9757 if (RT_UNLIKELY(fLockRead))
9758 {
9759 rc2 = vdThreadFinishRead(pDisk);
9760 AssertRC(rc2);
9761 }
9762
9763 LogFlowFunc(("%Rrc (PCHS=%u/%u/%u)\n", rc,
9764 pDisk->PCHSGeometry.cCylinders, pDisk->PCHSGeometry.cHeads,
9765 pDisk->PCHSGeometry.cSectors));
9766 return rc;
9767}
9768
9769/**
9770 * Store virtual disk PCHS geometry in HDD container.
9771 *
9772 * Note that in case of unrecoverable error all images in HDD container will be closed.
9773 *
9774 * @returns VBox status code.
9775 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9776 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9777 * @param pDisk Pointer to HDD container.
9778 * @param nImage Image number, counts from 0. 0 is always base image of container.
9779 * @param pPCHSGeometry Where to load PCHS geometry from. Not NULL.
9780 */
9781VBOXDDU_DECL(int) VDSetPCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9782 PCVDGEOMETRY pPCHSGeometry)
9783{
9784 int rc = VINF_SUCCESS;
9785 int rc2;
9786 bool fLockWrite = false;
9787
9788 LogFlowFunc(("pDisk=%#p nImage=%u pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
9789 pDisk, nImage, pPCHSGeometry, pPCHSGeometry->cCylinders,
9790 pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
9791 do
9792 {
9793 /* sanity check */
9794 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9795 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9796
9797 /* Check arguments. */
9798 AssertMsgBreakStmt( VALID_PTR(pPCHSGeometry)
9799 && pPCHSGeometry->cHeads <= 16
9800 && pPCHSGeometry->cSectors <= 63,
9801 ("pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pPCHSGeometry,
9802 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
9803 pPCHSGeometry->cSectors),
9804 rc = VERR_INVALID_PARAMETER);
9805
9806 rc2 = vdThreadStartWrite(pDisk);
9807 AssertRC(rc2);
9808 fLockWrite = true;
9809
9810 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9811 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9812
9813 if (pImage == pDisk->pLast)
9814 {
9815 if ( pPCHSGeometry->cCylinders != pDisk->PCHSGeometry.cCylinders
9816 || pPCHSGeometry->cHeads != pDisk->PCHSGeometry.cHeads
9817 || pPCHSGeometry->cSectors != pDisk->PCHSGeometry.cSectors)
9818 {
9819 /* Only update geometry if it is changed. Avoids similar checks
9820 * in every backend. Most of the time the new geometry is set
9821 * to the previous values, so no need to go through the hassle
9822 * of updating an image which could be opened in read-only mode
9823 * right now. */
9824 rc = pImage->Backend->pfnSetPCHSGeometry(pImage->pBackendData,
9825 pPCHSGeometry);
9826
9827 /* Cache new geometry values in any case. */
9828 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9829 &pDisk->PCHSGeometry);
9830 if (RT_FAILURE(rc2))
9831 {
9832 pDisk->PCHSGeometry.cCylinders = 0;
9833 pDisk->PCHSGeometry.cHeads = 0;
9834 pDisk->PCHSGeometry.cSectors = 0;
9835 }
9836 else
9837 {
9838 /* Make sure the CHS geometry is properly clipped. */
9839 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 255);
9840 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
9841 }
9842 }
9843 }
9844 else
9845 {
9846 VDGEOMETRY PCHS;
9847 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9848 &PCHS);
9849 if ( RT_FAILURE(rc)
9850 || pPCHSGeometry->cCylinders != PCHS.cCylinders
9851 || pPCHSGeometry->cHeads != PCHS.cHeads
9852 || pPCHSGeometry->cSectors != PCHS.cSectors)
9853 {
9854 /* Only update geometry if it is changed. Avoids similar checks
9855 * in every backend. Most of the time the new geometry is set
9856 * to the previous values, so no need to go through the hassle
9857 * of updating an image which could be opened in read-only mode
9858 * right now. */
9859 rc = pImage->Backend->pfnSetPCHSGeometry(pImage->pBackendData,
9860 pPCHSGeometry);
9861 }
9862 }
9863 } while (0);
9864
9865 if (RT_UNLIKELY(fLockWrite))
9866 {
9867 rc2 = vdThreadFinishWrite(pDisk);
9868 AssertRC(rc2);
9869 }
9870
9871 LogFlowFunc(("returns %Rrc\n", rc));
9872 return rc;
9873}
9874
9875/**
9876 * Get virtual disk LCHS geometry stored in HDD container.
9877 *
9878 * @returns VBox status code.
9879 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9880 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9881 * @param pDisk Pointer to HDD container.
9882 * @param nImage Image number, counts from 0. 0 is always base image of container.
9883 * @param pLCHSGeometry Where to store LCHS geometry. Not NULL.
9884 */
9885VBOXDDU_DECL(int) VDGetLCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9886 PVDGEOMETRY pLCHSGeometry)
9887{
9888 int rc = VINF_SUCCESS;
9889 int rc2;
9890 bool fLockRead = false;
9891
9892 LogFlowFunc(("pDisk=%#p nImage=%u pLCHSGeometry=%#p\n",
9893 pDisk, nImage, pLCHSGeometry));
9894 do
9895 {
9896 /* sanity check */
9897 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9898 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9899
9900 /* Check arguments. */
9901 AssertMsgBreakStmt(VALID_PTR(pLCHSGeometry),
9902 ("pLCHSGeometry=%#p\n", pLCHSGeometry),
9903 rc = VERR_INVALID_PARAMETER);
9904
9905 rc2 = vdThreadStartRead(pDisk);
9906 AssertRC(rc2);
9907 fLockRead = true;
9908
9909 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9910 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9911
9912 if (pImage == pDisk->pLast)
9913 {
9914 /* Use cached information if possible. */
9915 if (pDisk->LCHSGeometry.cCylinders != 0)
9916 *pLCHSGeometry = pDisk->LCHSGeometry;
9917 else
9918 rc = VERR_VD_GEOMETRY_NOT_SET;
9919 }
9920 else
9921 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
9922 pLCHSGeometry);
9923 } while (0);
9924
9925 if (RT_UNLIKELY(fLockRead))
9926 {
9927 rc2 = vdThreadFinishRead(pDisk);
9928 AssertRC(rc2);
9929 }
9930
9931 LogFlowFunc((": %Rrc (LCHS=%u/%u/%u)\n", rc,
9932 pDisk->LCHSGeometry.cCylinders, pDisk->LCHSGeometry.cHeads,
9933 pDisk->LCHSGeometry.cSectors));
9934 return rc;
9935}
9936
9937/**
9938 * Store virtual disk LCHS geometry in HDD container.
9939 *
9940 * Note that in case of unrecoverable error all images in HDD container will be closed.
9941 *
9942 * @returns VBox status code.
9943 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9944 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9945 * @param pDisk Pointer to HDD container.
9946 * @param nImage Image number, counts from 0. 0 is always base image of container.
9947 * @param pLCHSGeometry Where to load LCHS geometry from. Not NULL.
9948 */
9949VBOXDDU_DECL(int) VDSetLCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9950 PCVDGEOMETRY pLCHSGeometry)
9951{
9952 int rc = VINF_SUCCESS;
9953 int rc2;
9954 bool fLockWrite = false;
9955
9956 LogFlowFunc(("pDisk=%#p nImage=%u pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
9957 pDisk, nImage, pLCHSGeometry, pLCHSGeometry->cCylinders,
9958 pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
9959 do
9960 {
9961 /* sanity check */
9962 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9963 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9964
9965 /* Check arguments. */
9966 AssertMsgBreakStmt( VALID_PTR(pLCHSGeometry)
9967 && pLCHSGeometry->cHeads <= 255
9968 && pLCHSGeometry->cSectors <= 63,
9969 ("pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pLCHSGeometry,
9970 pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads,
9971 pLCHSGeometry->cSectors),
9972 rc = VERR_INVALID_PARAMETER);
9973
9974 rc2 = vdThreadStartWrite(pDisk);
9975 AssertRC(rc2);
9976 fLockWrite = true;
9977
9978 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9979 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9980
9981 if (pImage == pDisk->pLast)
9982 {
9983 if ( pLCHSGeometry->cCylinders != pDisk->LCHSGeometry.cCylinders
9984 || pLCHSGeometry->cHeads != pDisk->LCHSGeometry.cHeads
9985 || pLCHSGeometry->cSectors != pDisk->LCHSGeometry.cSectors)
9986 {
9987 /* Only update geometry if it is changed. Avoids similar checks
9988 * in every backend. Most of the time the new geometry is set
9989 * to the previous values, so no need to go through the hassle
9990 * of updating an image which could be opened in read-only mode
9991 * right now. */
9992 rc = pImage->Backend->pfnSetLCHSGeometry(pImage->pBackendData,
9993 pLCHSGeometry);
9994
9995 /* Cache new geometry values in any case. */
9996 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
9997 &pDisk->LCHSGeometry);
9998 if (RT_FAILURE(rc2))
9999 {
10000 pDisk->LCHSGeometry.cCylinders = 0;
10001 pDisk->LCHSGeometry.cHeads = 0;
10002 pDisk->LCHSGeometry.cSectors = 0;
10003 }
10004 else
10005 {
10006 /* Make sure the CHS geometry is properly clipped. */
10007 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
10008 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
10009 }
10010 }
10011 }
10012 else
10013 {
10014 VDGEOMETRY LCHS;
10015 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
10016 &LCHS);
10017 if ( RT_FAILURE(rc)
10018 || pLCHSGeometry->cCylinders != LCHS.cCylinders
10019 || pLCHSGeometry->cHeads != LCHS.cHeads
10020 || pLCHSGeometry->cSectors != LCHS.cSectors)
10021 {
10022 /* Only update geometry if it is changed. Avoids similar checks
10023 * in every backend. Most of the time the new geometry is set
10024 * to the previous values, so no need to go through the hassle
10025 * of updating an image which could be opened in read-only mode
10026 * right now. */
10027 rc = pImage->Backend->pfnSetLCHSGeometry(pImage->pBackendData,
10028 pLCHSGeometry);
10029 }
10030 }
10031 } while (0);
10032
10033 if (RT_UNLIKELY(fLockWrite))
10034 {
10035 rc2 = vdThreadFinishWrite(pDisk);
10036 AssertRC(rc2);
10037 }
10038
10039 LogFlowFunc(("returns %Rrc\n", rc));
10040 return rc;
10041}
10042
10043/**
10044 * Get version of image in HDD container.
10045 *
10046 * @returns VBox status code.
10047 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10048 * @param pDisk Pointer to HDD container.
10049 * @param nImage Image number, counts from 0. 0 is always base image of container.
10050 * @param puVersion Where to store the image version.
10051 */
10052VBOXDDU_DECL(int) VDGetVersion(PVBOXHDD pDisk, unsigned nImage,
10053 unsigned *puVersion)
10054{
10055 int rc = VINF_SUCCESS;
10056 int rc2;
10057 bool fLockRead = false;
10058
10059 LogFlowFunc(("pDisk=%#p nImage=%u puVersion=%#p\n",
10060 pDisk, nImage, puVersion));
10061 do
10062 {
10063 /* sanity check */
10064 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10065 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10066
10067 /* Check arguments. */
10068 AssertMsgBreakStmt(VALID_PTR(puVersion),
10069 ("puVersion=%#p\n", puVersion),
10070 rc = VERR_INVALID_PARAMETER);
10071
10072 rc2 = vdThreadStartRead(pDisk);
10073 AssertRC(rc2);
10074 fLockRead = true;
10075
10076 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10077 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10078
10079 *puVersion = pImage->Backend->pfnGetVersion(pImage->pBackendData);
10080 } while (0);
10081
10082 if (RT_UNLIKELY(fLockRead))
10083 {
10084 rc2 = vdThreadFinishRead(pDisk);
10085 AssertRC(rc2);
10086 }
10087
10088 LogFlowFunc(("returns %Rrc uVersion=%#x\n", rc, *puVersion));
10089 return rc;
10090}
10091
10092/**
10093 * List the capabilities of image backend in HDD container.
10094 *
10095 * @returns VBox status code.
10096 * @retval VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10097 * @param pDisk Pointer to the HDD container.
10098 * @param nImage Image number, counts from 0. 0 is always base image of container.
10099 * @param pBackendInfo Where to store the backend information.
10100 */
10101VBOXDDU_DECL(int) VDBackendInfoSingle(PVBOXHDD pDisk, unsigned nImage,
10102 PVDBACKENDINFO pBackendInfo)
10103{
10104 int rc = VINF_SUCCESS;
10105 int rc2;
10106 bool fLockRead = false;
10107
10108 LogFlowFunc(("pDisk=%#p nImage=%u pBackendInfo=%#p\n",
10109 pDisk, nImage, pBackendInfo));
10110 do
10111 {
10112 /* sanity check */
10113 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10114 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10115
10116 /* Check arguments. */
10117 AssertMsgBreakStmt(VALID_PTR(pBackendInfo),
10118 ("pBackendInfo=%#p\n", pBackendInfo),
10119 rc = VERR_INVALID_PARAMETER);
10120
10121 rc2 = vdThreadStartRead(pDisk);
10122 AssertRC(rc2);
10123 fLockRead = true;
10124
10125 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10126 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10127
10128 pBackendInfo->pszBackend = pImage->Backend->pszBackendName;
10129 pBackendInfo->uBackendCaps = pImage->Backend->uBackendCaps;
10130 pBackendInfo->paFileExtensions = pImage->Backend->paFileExtensions;
10131 pBackendInfo->paConfigInfo = pImage->Backend->paConfigInfo;
10132 } while (0);
10133
10134 if (RT_UNLIKELY(fLockRead))
10135 {
10136 rc2 = vdThreadFinishRead(pDisk);
10137 AssertRC(rc2);
10138 }
10139
10140 LogFlowFunc(("returns %Rrc\n", rc));
10141 return rc;
10142}
10143
10144/**
10145 * Get flags of image in HDD container.
10146 *
10147 * @returns VBox status code.
10148 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10149 * @param pDisk Pointer to HDD container.
10150 * @param nImage Image number, counts from 0. 0 is always base image of container.
10151 * @param puImageFlags Where to store the image flags.
10152 */
10153VBOXDDU_DECL(int) VDGetImageFlags(PVBOXHDD pDisk, unsigned nImage,
10154 unsigned *puImageFlags)
10155{
10156 int rc = VINF_SUCCESS;
10157 int rc2;
10158 bool fLockRead = false;
10159
10160 LogFlowFunc(("pDisk=%#p nImage=%u puImageFlags=%#p\n",
10161 pDisk, nImage, puImageFlags));
10162 do
10163 {
10164 /* sanity check */
10165 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10166 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10167
10168 /* Check arguments. */
10169 AssertMsgBreakStmt(VALID_PTR(puImageFlags),
10170 ("puImageFlags=%#p\n", puImageFlags),
10171 rc = VERR_INVALID_PARAMETER);
10172
10173 rc2 = vdThreadStartRead(pDisk);
10174 AssertRC(rc2);
10175 fLockRead = true;
10176
10177 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10178 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10179
10180 *puImageFlags = pImage->uImageFlags;
10181 } while (0);
10182
10183 if (RT_UNLIKELY(fLockRead))
10184 {
10185 rc2 = vdThreadFinishRead(pDisk);
10186 AssertRC(rc2);
10187 }
10188
10189 LogFlowFunc(("returns %Rrc uImageFlags=%#x\n", rc, *puImageFlags));
10190 return rc;
10191}
10192
10193/**
10194 * Get open flags of image in HDD container.
10195 *
10196 * @returns VBox status code.
10197 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10198 * @param pDisk Pointer to HDD container.
10199 * @param nImage Image number, counts from 0. 0 is always base image of container.
10200 * @param puOpenFlags Where to store the image open flags.
10201 */
10202VBOXDDU_DECL(int) VDGetOpenFlags(PVBOXHDD pDisk, unsigned nImage,
10203 unsigned *puOpenFlags)
10204{
10205 int rc = VINF_SUCCESS;
10206 int rc2;
10207 bool fLockRead = false;
10208
10209 LogFlowFunc(("pDisk=%#p nImage=%u puOpenFlags=%#p\n",
10210 pDisk, nImage, puOpenFlags));
10211 do
10212 {
10213 /* sanity check */
10214 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10215 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10216
10217 /* Check arguments. */
10218 AssertMsgBreakStmt(VALID_PTR(puOpenFlags),
10219 ("puOpenFlags=%#p\n", puOpenFlags),
10220 rc = VERR_INVALID_PARAMETER);
10221
10222 rc2 = vdThreadStartRead(pDisk);
10223 AssertRC(rc2);
10224 fLockRead = true;
10225
10226 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10227 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10228
10229 *puOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
10230 } while (0);
10231
10232 if (RT_UNLIKELY(fLockRead))
10233 {
10234 rc2 = vdThreadFinishRead(pDisk);
10235 AssertRC(rc2);
10236 }
10237
10238 LogFlowFunc(("returns %Rrc uOpenFlags=%#x\n", rc, *puOpenFlags));
10239 return rc;
10240}
10241
10242/**
10243 * Set open flags of image in HDD container.
10244 * This operation may cause file locking changes and/or files being reopened.
10245 * Note that in case of unrecoverable error all images in HDD container will be closed.
10246 *
10247 * @returns VBox status code.
10248 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10249 * @param pDisk Pointer to HDD container.
10250 * @param nImage Image number, counts from 0. 0 is always base image of container.
10251 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
10252 */
10253VBOXDDU_DECL(int) VDSetOpenFlags(PVBOXHDD pDisk, unsigned nImage,
10254 unsigned uOpenFlags)
10255{
10256 int rc;
10257 int rc2;
10258 bool fLockWrite = false;
10259
10260 LogFlowFunc(("pDisk=%#p uOpenFlags=%#u\n", pDisk, uOpenFlags));
10261 do
10262 {
10263 /* sanity check */
10264 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10265 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10266
10267 /* Check arguments. */
10268 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
10269 ("uOpenFlags=%#x\n", uOpenFlags),
10270 rc = VERR_INVALID_PARAMETER);
10271
10272 rc2 = vdThreadStartWrite(pDisk);
10273 AssertRC(rc2);
10274 fLockWrite = true;
10275
10276 /* Destroy any discard state because the image might be changed to readonly mode. */
10277 rc = vdDiscardStateDestroy(pDisk);
10278 if (RT_FAILURE(rc))
10279 break;
10280
10281 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10282 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10283
10284 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData,
10285 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS));
10286 if (RT_SUCCESS(rc))
10287 pImage->uOpenFlags = uOpenFlags & (VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_DISCARD | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS);
10288 } while (0);
10289
10290 if (RT_UNLIKELY(fLockWrite))
10291 {
10292 rc2 = vdThreadFinishWrite(pDisk);
10293 AssertRC(rc2);
10294 }
10295
10296 LogFlowFunc(("returns %Rrc\n", rc));
10297 return rc;
10298}
10299
10300/**
10301 * Get base filename of image in HDD container. Some image formats use
10302 * other filenames as well, so don't use this for anything but informational
10303 * purposes.
10304 *
10305 * @returns VBox status code.
10306 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10307 * @returns VERR_BUFFER_OVERFLOW if pszFilename buffer too small to hold filename.
10308 * @param pDisk Pointer to HDD container.
10309 * @param nImage Image number, counts from 0. 0 is always base image of container.
10310 * @param pszFilename Where to store the image file name.
10311 * @param cbFilename Size of buffer pszFilename points to.
10312 */
10313VBOXDDU_DECL(int) VDGetFilename(PVBOXHDD pDisk, unsigned nImage,
10314 char *pszFilename, unsigned cbFilename)
10315{
10316 int rc;
10317 int rc2;
10318 bool fLockRead = false;
10319
10320 LogFlowFunc(("pDisk=%#p nImage=%u pszFilename=%#p cbFilename=%u\n",
10321 pDisk, nImage, pszFilename, cbFilename));
10322 do
10323 {
10324 /* sanity check */
10325 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10326 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10327
10328 /* Check arguments. */
10329 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
10330 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
10331 rc = VERR_INVALID_PARAMETER);
10332 AssertMsgBreakStmt(cbFilename,
10333 ("cbFilename=%u\n", cbFilename),
10334 rc = VERR_INVALID_PARAMETER);
10335
10336 rc2 = vdThreadStartRead(pDisk);
10337 AssertRC(rc2);
10338 fLockRead = true;
10339
10340 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10341 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10342
10343 size_t cb = strlen(pImage->pszFilename);
10344 if (cb <= cbFilename)
10345 {
10346 strcpy(pszFilename, pImage->pszFilename);
10347 rc = VINF_SUCCESS;
10348 }
10349 else
10350 {
10351 strncpy(pszFilename, pImage->pszFilename, cbFilename - 1);
10352 pszFilename[cbFilename - 1] = '\0';
10353 rc = VERR_BUFFER_OVERFLOW;
10354 }
10355 } while (0);
10356
10357 if (RT_UNLIKELY(fLockRead))
10358 {
10359 rc2 = vdThreadFinishRead(pDisk);
10360 AssertRC(rc2);
10361 }
10362
10363 LogFlowFunc(("returns %Rrc, pszFilename=\"%s\"\n", rc, pszFilename));
10364 return rc;
10365}
10366
10367/**
10368 * Get the comment line of image in HDD container.
10369 *
10370 * @returns VBox status code.
10371 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10372 * @returns VERR_BUFFER_OVERFLOW if pszComment buffer too small to hold comment text.
10373 * @param pDisk Pointer to HDD container.
10374 * @param nImage Image number, counts from 0. 0 is always base image of container.
10375 * @param pszComment Where to store the comment string of image. NULL is ok.
10376 * @param cbComment The size of pszComment buffer. 0 is ok.
10377 */
10378VBOXDDU_DECL(int) VDGetComment(PVBOXHDD pDisk, unsigned nImage,
10379 char *pszComment, unsigned cbComment)
10380{
10381 int rc;
10382 int rc2;
10383 bool fLockRead = false;
10384
10385 LogFlowFunc(("pDisk=%#p nImage=%u pszComment=%#p cbComment=%u\n",
10386 pDisk, nImage, pszComment, cbComment));
10387 do
10388 {
10389 /* sanity check */
10390 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10391 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10392
10393 /* Check arguments. */
10394 AssertMsgBreakStmt(VALID_PTR(pszComment),
10395 ("pszComment=%#p \"%s\"\n", pszComment, pszComment),
10396 rc = VERR_INVALID_PARAMETER);
10397 AssertMsgBreakStmt(cbComment,
10398 ("cbComment=%u\n", cbComment),
10399 rc = VERR_INVALID_PARAMETER);
10400
10401 rc2 = vdThreadStartRead(pDisk);
10402 AssertRC(rc2);
10403 fLockRead = true;
10404
10405 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10406 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10407
10408 rc = pImage->Backend->pfnGetComment(pImage->pBackendData, pszComment,
10409 cbComment);
10410 } while (0);
10411
10412 if (RT_UNLIKELY(fLockRead))
10413 {
10414 rc2 = vdThreadFinishRead(pDisk);
10415 AssertRC(rc2);
10416 }
10417
10418 LogFlowFunc(("returns %Rrc, pszComment=\"%s\"\n", rc, pszComment));
10419 return rc;
10420}
10421
10422/**
10423 * Changes the comment line of image in HDD container.
10424 *
10425 * @returns VBox status code.
10426 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10427 * @param pDisk Pointer to HDD container.
10428 * @param nImage Image number, counts from 0. 0 is always base image of container.
10429 * @param pszComment New comment string (UTF-8). NULL is allowed to reset the comment.
10430 */
10431VBOXDDU_DECL(int) VDSetComment(PVBOXHDD pDisk, unsigned nImage,
10432 const char *pszComment)
10433{
10434 int rc;
10435 int rc2;
10436 bool fLockWrite = false;
10437
10438 LogFlowFunc(("pDisk=%#p nImage=%u pszComment=%#p \"%s\"\n",
10439 pDisk, nImage, pszComment, pszComment));
10440 do
10441 {
10442 /* sanity check */
10443 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10444 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10445
10446 /* Check arguments. */
10447 AssertMsgBreakStmt(VALID_PTR(pszComment) || pszComment == NULL,
10448 ("pszComment=%#p \"%s\"\n", pszComment, pszComment),
10449 rc = VERR_INVALID_PARAMETER);
10450
10451 rc2 = vdThreadStartWrite(pDisk);
10452 AssertRC(rc2);
10453 fLockWrite = true;
10454
10455 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10456 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10457
10458 rc = pImage->Backend->pfnSetComment(pImage->pBackendData, pszComment);
10459 } while (0);
10460
10461 if (RT_UNLIKELY(fLockWrite))
10462 {
10463 rc2 = vdThreadFinishWrite(pDisk);
10464 AssertRC(rc2);
10465 }
10466
10467 LogFlowFunc(("returns %Rrc\n", rc));
10468 return rc;
10469}
10470
10471
10472/**
10473 * Get UUID of image in HDD container.
10474 *
10475 * @returns VBox status code.
10476 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10477 * @param pDisk Pointer to HDD container.
10478 * @param nImage Image number, counts from 0. 0 is always base image of container.
10479 * @param pUuid Where to store the image creation UUID.
10480 */
10481VBOXDDU_DECL(int) VDGetUuid(PVBOXHDD pDisk, unsigned nImage, PRTUUID pUuid)
10482{
10483 int rc;
10484 int rc2;
10485 bool fLockRead = false;
10486
10487 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10488 do
10489 {
10490 /* sanity check */
10491 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10492 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10493
10494 /* Check arguments. */
10495 AssertMsgBreakStmt(VALID_PTR(pUuid),
10496 ("pUuid=%#p\n", pUuid),
10497 rc = VERR_INVALID_PARAMETER);
10498
10499 rc2 = vdThreadStartRead(pDisk);
10500 AssertRC(rc2);
10501 fLockRead = true;
10502
10503 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10504 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10505
10506 rc = pImage->Backend->pfnGetUuid(pImage->pBackendData, pUuid);
10507 } while (0);
10508
10509 if (RT_UNLIKELY(fLockRead))
10510 {
10511 rc2 = vdThreadFinishRead(pDisk);
10512 AssertRC(rc2);
10513 }
10514
10515 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10516 return rc;
10517}
10518
10519/**
10520 * Set the image's UUID. Should not be used by normal applications.
10521 *
10522 * @returns VBox status code.
10523 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10524 * @param pDisk Pointer to HDD container.
10525 * @param nImage Image number, counts from 0. 0 is always base image of container.
10526 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
10527 */
10528VBOXDDU_DECL(int) VDSetUuid(PVBOXHDD pDisk, unsigned nImage, PCRTUUID pUuid)
10529{
10530 int rc;
10531 int rc2;
10532 bool fLockWrite = false;
10533
10534 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10535 pDisk, nImage, pUuid, pUuid));
10536 do
10537 {
10538 /* sanity check */
10539 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10540 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10541
10542 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10543 ("pUuid=%#p\n", pUuid),
10544 rc = VERR_INVALID_PARAMETER);
10545
10546 rc2 = vdThreadStartWrite(pDisk);
10547 AssertRC(rc2);
10548 fLockWrite = true;
10549
10550 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10551 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10552
10553 RTUUID Uuid;
10554 if (!pUuid)
10555 {
10556 RTUuidCreate(&Uuid);
10557 pUuid = &Uuid;
10558 }
10559 rc = pImage->Backend->pfnSetUuid(pImage->pBackendData, pUuid);
10560 } while (0);
10561
10562 if (RT_UNLIKELY(fLockWrite))
10563 {
10564 rc2 = vdThreadFinishWrite(pDisk);
10565 AssertRC(rc2);
10566 }
10567
10568 LogFlowFunc(("returns %Rrc\n", rc));
10569 return rc;
10570}
10571
10572/**
10573 * Get last modification UUID of image in HDD container.
10574 *
10575 * @returns VBox status code.
10576 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10577 * @param pDisk Pointer to HDD container.
10578 * @param nImage Image number, counts from 0. 0 is always base image of container.
10579 * @param pUuid Where to store the image modification UUID.
10580 */
10581VBOXDDU_DECL(int) VDGetModificationUuid(PVBOXHDD pDisk, unsigned nImage, PRTUUID pUuid)
10582{
10583 int rc = VINF_SUCCESS;
10584 int rc2;
10585 bool fLockRead = false;
10586
10587 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10588 do
10589 {
10590 /* sanity check */
10591 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10592 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10593
10594 /* Check arguments. */
10595 AssertMsgBreakStmt(VALID_PTR(pUuid),
10596 ("pUuid=%#p\n", pUuid),
10597 rc = VERR_INVALID_PARAMETER);
10598
10599 rc2 = vdThreadStartRead(pDisk);
10600 AssertRC(rc2);
10601 fLockRead = true;
10602
10603 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10604 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10605
10606 rc = pImage->Backend->pfnGetModificationUuid(pImage->pBackendData,
10607 pUuid);
10608 } while (0);
10609
10610 if (RT_UNLIKELY(fLockRead))
10611 {
10612 rc2 = vdThreadFinishRead(pDisk);
10613 AssertRC(rc2);
10614 }
10615
10616 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10617 return rc;
10618}
10619
10620/**
10621 * Set the image's last modification UUID. Should not be used by normal applications.
10622 *
10623 * @returns VBox status code.
10624 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10625 * @param pDisk Pointer to HDD container.
10626 * @param nImage Image number, counts from 0. 0 is always base image of container.
10627 * @param pUuid New modification UUID of the image. If NULL, a new UUID is created.
10628 */
10629VBOXDDU_DECL(int) VDSetModificationUuid(PVBOXHDD pDisk, unsigned nImage, PCRTUUID pUuid)
10630{
10631 int rc;
10632 int rc2;
10633 bool fLockWrite = false;
10634
10635 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10636 pDisk, nImage, pUuid, pUuid));
10637 do
10638 {
10639 /* sanity check */
10640 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10641 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10642
10643 /* Check arguments. */
10644 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10645 ("pUuid=%#p\n", pUuid),
10646 rc = VERR_INVALID_PARAMETER);
10647
10648 rc2 = vdThreadStartWrite(pDisk);
10649 AssertRC(rc2);
10650 fLockWrite = true;
10651
10652 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10653 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10654
10655 RTUUID Uuid;
10656 if (!pUuid)
10657 {
10658 RTUuidCreate(&Uuid);
10659 pUuid = &Uuid;
10660 }
10661 rc = pImage->Backend->pfnSetModificationUuid(pImage->pBackendData,
10662 pUuid);
10663 } while (0);
10664
10665 if (RT_UNLIKELY(fLockWrite))
10666 {
10667 rc2 = vdThreadFinishWrite(pDisk);
10668 AssertRC(rc2);
10669 }
10670
10671 LogFlowFunc(("returns %Rrc\n", rc));
10672 return rc;
10673}
10674
10675/**
10676 * Get parent UUID of image in HDD container.
10677 *
10678 * @returns VBox status code.
10679 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10680 * @param pDisk Pointer to HDD container.
10681 * @param nImage Image number, counts from 0. 0 is always base image of container.
10682 * @param pUuid Where to store the parent image UUID.
10683 */
10684VBOXDDU_DECL(int) VDGetParentUuid(PVBOXHDD pDisk, unsigned nImage,
10685 PRTUUID pUuid)
10686{
10687 int rc = VINF_SUCCESS;
10688 int rc2;
10689 bool fLockRead = false;
10690
10691 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10692 do
10693 {
10694 /* sanity check */
10695 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10696 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10697
10698 /* Check arguments. */
10699 AssertMsgBreakStmt(VALID_PTR(pUuid),
10700 ("pUuid=%#p\n", pUuid),
10701 rc = VERR_INVALID_PARAMETER);
10702
10703 rc2 = vdThreadStartRead(pDisk);
10704 AssertRC(rc2);
10705 fLockRead = true;
10706
10707 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10708 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10709
10710 rc = pImage->Backend->pfnGetParentUuid(pImage->pBackendData, pUuid);
10711 } while (0);
10712
10713 if (RT_UNLIKELY(fLockRead))
10714 {
10715 rc2 = vdThreadFinishRead(pDisk);
10716 AssertRC(rc2);
10717 }
10718
10719 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10720 return rc;
10721}
10722
10723/**
10724 * Set the image's parent UUID. Should not be used by normal applications.
10725 *
10726 * @returns VBox status code.
10727 * @param pDisk Pointer to HDD container.
10728 * @param nImage Image number, counts from 0. 0 is always base image of container.
10729 * @param pUuid New parent UUID of the image. If NULL, a new UUID is created.
10730 */
10731VBOXDDU_DECL(int) VDSetParentUuid(PVBOXHDD pDisk, unsigned nImage,
10732 PCRTUUID pUuid)
10733{
10734 int rc;
10735 int rc2;
10736 bool fLockWrite = false;
10737
10738 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10739 pDisk, nImage, pUuid, pUuid));
10740 do
10741 {
10742 /* sanity check */
10743 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10744 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10745
10746 /* Check arguments. */
10747 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10748 ("pUuid=%#p\n", pUuid),
10749 rc = VERR_INVALID_PARAMETER);
10750
10751 rc2 = vdThreadStartWrite(pDisk);
10752 AssertRC(rc2);
10753 fLockWrite = true;
10754
10755 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10756 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10757
10758 RTUUID Uuid;
10759 if (!pUuid)
10760 {
10761 RTUuidCreate(&Uuid);
10762 pUuid = &Uuid;
10763 }
10764 rc = pImage->Backend->pfnSetParentUuid(pImage->pBackendData, pUuid);
10765 } while (0);
10766
10767 if (RT_UNLIKELY(fLockWrite))
10768 {
10769 rc2 = vdThreadFinishWrite(pDisk);
10770 AssertRC(rc2);
10771 }
10772
10773 LogFlowFunc(("returns %Rrc\n", rc));
10774 return rc;
10775}
10776
10777
10778/**
10779 * Debug helper - dumps all opened images in HDD container into the log file.
10780 *
10781 * @param pDisk Pointer to HDD container.
10782 */
10783VBOXDDU_DECL(void) VDDumpImages(PVBOXHDD pDisk)
10784{
10785 int rc2;
10786 bool fLockRead = false;
10787
10788 do
10789 {
10790 /* sanity check */
10791 AssertPtrBreak(pDisk);
10792 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10793
10794 if (!pDisk->pInterfaceError || !VALID_PTR(pDisk->pInterfaceError->pfnMessage))
10795 pDisk->pInterfaceError->pfnMessage = vdLogMessage;
10796
10797 rc2 = vdThreadStartRead(pDisk);
10798 AssertRC(rc2);
10799 fLockRead = true;
10800
10801 vdMessageWrapper(pDisk, "--- Dumping VD Disk, Images=%u\n", pDisk->cImages);
10802 for (PVDIMAGE pImage = pDisk->pBase; pImage; pImage = pImage->pNext)
10803 {
10804 vdMessageWrapper(pDisk, "Dumping VD image \"%s\" (Backend=%s)\n",
10805 pImage->pszFilename, pImage->Backend->pszBackendName);
10806 pImage->Backend->pfnDump(pImage->pBackendData);
10807 }
10808 } while (0);
10809
10810 if (RT_UNLIKELY(fLockRead))
10811 {
10812 rc2 = vdThreadFinishRead(pDisk);
10813 AssertRC(rc2);
10814 }
10815}
10816
10817
10818VBOXDDU_DECL(int) VDDiscardRanges(PVBOXHDD pDisk, PCRTRANGE paRanges, unsigned cRanges)
10819{
10820 int rc;
10821 int rc2;
10822 bool fLockWrite = false;
10823
10824 LogFlowFunc(("pDisk=%#p paRanges=%#p cRanges=%u\n",
10825 pDisk, paRanges, cRanges));
10826 do
10827 {
10828 /* sanity check */
10829 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10830 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10831
10832 /* Check arguments. */
10833 AssertMsgBreakStmt(cRanges,
10834 ("cRanges=%u\n", cRanges),
10835 rc = VERR_INVALID_PARAMETER);
10836 AssertMsgBreakStmt(VALID_PTR(paRanges),
10837 ("paRanges=%#p\n", paRanges),
10838 rc = VERR_INVALID_PARAMETER);
10839
10840 rc2 = vdThreadStartWrite(pDisk);
10841 AssertRC(rc2);
10842 fLockWrite = true;
10843
10844 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
10845
10846 AssertMsgBreakStmt(pDisk->pLast->uOpenFlags & VD_OPEN_FLAGS_DISCARD,
10847 ("Discarding not supported\n"),
10848 rc = VERR_NOT_SUPPORTED);
10849
10850 VDIOCTX IoCtx;
10851 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
10852
10853 rc = RTSemEventCreate(&hEventComplete);
10854 if (RT_FAILURE(rc))
10855 break;
10856
10857 vdIoCtxDiscardInit(&IoCtx, pDisk, paRanges, cRanges,
10858 vdIoCtxSyncComplete, pDisk, hEventComplete, NULL,
10859 vdDiscardHelperAsync, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE);
10860 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
10861
10862 RTSemEventDestroy(hEventComplete);
10863 } while (0);
10864
10865 if (RT_UNLIKELY(fLockWrite))
10866 {
10867 rc2 = vdThreadFinishWrite(pDisk);
10868 AssertRC(rc2);
10869 }
10870
10871 LogFlowFunc(("returns %Rrc\n", rc));
10872 return rc;
10873}
10874
10875
10876VBOXDDU_DECL(int) VDAsyncRead(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRead,
10877 PCRTSGBUF pcSgBuf,
10878 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
10879 void *pvUser1, void *pvUser2)
10880{
10881 int rc = VERR_VD_BLOCK_FREE;
10882 int rc2;
10883 bool fLockRead = false;
10884 PVDIOCTX pIoCtx = NULL;
10885
10886 LogFlowFunc(("pDisk=%#p uOffset=%llu pcSgBuf=%#p cbRead=%zu pvUser1=%#p pvUser2=%#p\n",
10887 pDisk, uOffset, pcSgBuf, cbRead, pvUser1, pvUser2));
10888
10889 do
10890 {
10891 /* sanity check */
10892 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10893 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10894
10895 /* Check arguments. */
10896 AssertMsgBreakStmt(cbRead,
10897 ("cbRead=%zu\n", cbRead),
10898 rc = VERR_INVALID_PARAMETER);
10899 AssertMsgBreakStmt(VALID_PTR(pcSgBuf),
10900 ("pcSgBuf=%#p\n", pcSgBuf),
10901 rc = VERR_INVALID_PARAMETER);
10902
10903 rc2 = vdThreadStartRead(pDisk);
10904 AssertRC(rc2);
10905 fLockRead = true;
10906
10907 AssertMsgBreakStmt(uOffset + cbRead <= pDisk->cbSize,
10908 ("uOffset=%llu cbRead=%zu pDisk->cbSize=%llu\n",
10909 uOffset, cbRead, pDisk->cbSize),
10910 rc = VERR_INVALID_PARAMETER);
10911 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
10912
10913 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_READ, uOffset,
10914 cbRead, pDisk->pLast, pcSgBuf,
10915 pfnComplete, pvUser1, pvUser2,
10916 NULL, vdReadHelperAsync,
10917 VDIOCTX_FLAGS_ZERO_FREE_BLOCKS);
10918 if (!pIoCtx)
10919 {
10920 rc = VERR_NO_MEMORY;
10921 break;
10922 }
10923
10924 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
10925 if (rc == VINF_VD_ASYNC_IO_FINISHED)
10926 {
10927 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
10928 vdIoCtxFree(pDisk, pIoCtx);
10929 else
10930 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
10931 }
10932 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
10933 vdIoCtxFree(pDisk, pIoCtx);
10934
10935 } while (0);
10936
10937 if (RT_UNLIKELY(fLockRead) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
10938 {
10939 rc2 = vdThreadFinishRead(pDisk);
10940 AssertRC(rc2);
10941 }
10942
10943 LogFlowFunc(("returns %Rrc\n", rc));
10944 return rc;
10945}
10946
10947
10948VBOXDDU_DECL(int) VDAsyncWrite(PVBOXHDD pDisk, uint64_t uOffset, size_t cbWrite,
10949 PCRTSGBUF pcSgBuf,
10950 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
10951 void *pvUser1, void *pvUser2)
10952{
10953 int rc;
10954 int rc2;
10955 bool fLockWrite = false;
10956 PVDIOCTX pIoCtx = NULL;
10957
10958 LogFlowFunc(("pDisk=%#p uOffset=%llu cSgBuf=%#p cbWrite=%zu pvUser1=%#p pvUser2=%#p\n",
10959 pDisk, uOffset, pcSgBuf, cbWrite, pvUser1, pvUser2));
10960 do
10961 {
10962 /* sanity check */
10963 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10964 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10965
10966 /* Check arguments. */
10967 AssertMsgBreakStmt(cbWrite,
10968 ("cbWrite=%zu\n", cbWrite),
10969 rc = VERR_INVALID_PARAMETER);
10970 AssertMsgBreakStmt(VALID_PTR(pcSgBuf),
10971 ("pcSgBuf=%#p\n", pcSgBuf),
10972 rc = VERR_INVALID_PARAMETER);
10973
10974 rc2 = vdThreadStartWrite(pDisk);
10975 AssertRC(rc2);
10976 fLockWrite = true;
10977
10978 AssertMsgBreakStmt(uOffset + cbWrite <= pDisk->cbSize,
10979 ("uOffset=%llu cbWrite=%zu pDisk->cbSize=%llu\n",
10980 uOffset, cbWrite, pDisk->cbSize),
10981 rc = VERR_INVALID_PARAMETER);
10982 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
10983
10984 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_WRITE, uOffset,
10985 cbWrite, pDisk->pLast, pcSgBuf,
10986 pfnComplete, pvUser1, pvUser2,
10987 NULL, vdWriteHelperAsync,
10988 VDIOCTX_FLAGS_DEFAULT);
10989 if (!pIoCtx)
10990 {
10991 rc = VERR_NO_MEMORY;
10992 break;
10993 }
10994
10995 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
10996 if (rc == VINF_VD_ASYNC_IO_FINISHED)
10997 {
10998 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
10999 vdIoCtxFree(pDisk, pIoCtx);
11000 else
11001 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11002 }
11003 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11004 vdIoCtxFree(pDisk, pIoCtx);
11005 } while (0);
11006
11007 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11008 {
11009 rc2 = vdThreadFinishWrite(pDisk);
11010 AssertRC(rc2);
11011 }
11012
11013 LogFlowFunc(("returns %Rrc\n", rc));
11014 return rc;
11015}
11016
11017
11018VBOXDDU_DECL(int) VDAsyncFlush(PVBOXHDD pDisk, PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11019 void *pvUser1, void *pvUser2)
11020{
11021 int rc;
11022 int rc2;
11023 bool fLockWrite = false;
11024 PVDIOCTX pIoCtx = NULL;
11025
11026 LogFlowFunc(("pDisk=%#p\n", pDisk));
11027
11028 do
11029 {
11030 /* sanity check */
11031 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11032 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11033
11034 rc2 = vdThreadStartWrite(pDisk);
11035 AssertRC(rc2);
11036 fLockWrite = true;
11037
11038 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11039
11040 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_FLUSH, 0,
11041 0, pDisk->pLast, NULL,
11042 pfnComplete, pvUser1, pvUser2,
11043 NULL, vdFlushHelperAsync,
11044 VDIOCTX_FLAGS_DEFAULT);
11045 if (!pIoCtx)
11046 {
11047 rc = VERR_NO_MEMORY;
11048 break;
11049 }
11050
11051 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11052 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11053 {
11054 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11055 vdIoCtxFree(pDisk, pIoCtx);
11056 else
11057 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11058 }
11059 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11060 vdIoCtxFree(pDisk, pIoCtx);
11061 } while (0);
11062
11063 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11064 {
11065 rc2 = vdThreadFinishWrite(pDisk);
11066 AssertRC(rc2);
11067 }
11068
11069 LogFlowFunc(("returns %Rrc\n", rc));
11070 return rc;
11071}
11072
11073VBOXDDU_DECL(int) VDAsyncDiscardRanges(PVBOXHDD pDisk, PCRTRANGE paRanges, unsigned cRanges,
11074 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11075 void *pvUser1, void *pvUser2)
11076{
11077 int rc;
11078 int rc2;
11079 bool fLockWrite = false;
11080 PVDIOCTX pIoCtx = NULL;
11081
11082 LogFlowFunc(("pDisk=%#p\n", pDisk));
11083
11084 do
11085 {
11086 /* sanity check */
11087 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11088 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11089
11090 rc2 = vdThreadStartWrite(pDisk);
11091 AssertRC(rc2);
11092 fLockWrite = true;
11093
11094 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11095
11096 pIoCtx = vdIoCtxDiscardAlloc(pDisk, paRanges, cRanges,
11097 pfnComplete, pvUser1, pvUser2, NULL,
11098 vdDiscardHelperAsync,
11099 VDIOCTX_FLAGS_DEFAULT);
11100 if (!pIoCtx)
11101 {
11102 rc = VERR_NO_MEMORY;
11103 break;
11104 }
11105
11106 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11107 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11108 {
11109 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11110 vdIoCtxFree(pDisk, pIoCtx);
11111 else
11112 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11113 }
11114 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11115 vdIoCtxFree(pDisk, pIoCtx);
11116 } while (0);
11117
11118 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11119 {
11120 rc2 = vdThreadFinishWrite(pDisk);
11121 AssertRC(rc2);
11122 }
11123
11124 LogFlowFunc(("returns %Rrc\n", rc));
11125 return rc;
11126}
11127
11128VBOXDDU_DECL(int) VDRepair(PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
11129 const char *pszFilename, const char *pszBackend,
11130 uint32_t fFlags)
11131{
11132 int rc = VERR_NOT_SUPPORTED;
11133 PCVDIMAGEBACKEND pBackend = NULL;
11134 VDINTERFACEIOINT VDIfIoInt;
11135 VDINTERFACEIO VDIfIoFallback;
11136 PVDINTERFACEIO pInterfaceIo;
11137
11138 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
11139 /* Check arguments. */
11140 AssertMsgReturn(VALID_PTR(pszFilename) && *pszFilename,
11141 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
11142 VERR_INVALID_PARAMETER);
11143 AssertMsgReturn(VALID_PTR(pszBackend),
11144 ("pszBackend=%#p\n", pszBackend),
11145 VERR_INVALID_PARAMETER);
11146 AssertMsgReturn((fFlags & ~VD_REPAIR_FLAGS_MASK) == 0,
11147 ("fFlags=%#x\n", fFlags),
11148 VERR_INVALID_PARAMETER);
11149
11150 pInterfaceIo = VDIfIoGet(pVDIfsImage);
11151 if (!pInterfaceIo)
11152 {
11153 /*
11154 * Caller doesn't provide an I/O interface, create our own using the
11155 * native file API.
11156 */
11157 vdIfIoFallbackCallbacksSetup(&VDIfIoFallback);
11158 pInterfaceIo = &VDIfIoFallback;
11159 }
11160
11161 /* Set up the internal I/O interface. */
11162 AssertReturn(!VDIfIoIntGet(pVDIfsImage), VERR_INVALID_PARAMETER);
11163 VDIfIoInt.pfnOpen = vdIOIntOpenLimited;
11164 VDIfIoInt.pfnClose = vdIOIntCloseLimited;
11165 VDIfIoInt.pfnDelete = vdIOIntDeleteLimited;
11166 VDIfIoInt.pfnMove = vdIOIntMoveLimited;
11167 VDIfIoInt.pfnGetFreeSpace = vdIOIntGetFreeSpaceLimited;
11168 VDIfIoInt.pfnGetModificationTime = vdIOIntGetModificationTimeLimited;
11169 VDIfIoInt.pfnGetSize = vdIOIntGetSizeLimited;
11170 VDIfIoInt.pfnSetSize = vdIOIntSetSizeLimited;
11171 VDIfIoInt.pfnReadUser = vdIOIntReadUserLimited;
11172 VDIfIoInt.pfnWriteUser = vdIOIntWriteUserLimited;
11173 VDIfIoInt.pfnReadMeta = vdIOIntReadMetaLimited;
11174 VDIfIoInt.pfnWriteMeta = vdIOIntWriteMetaLimited;
11175 VDIfIoInt.pfnFlush = vdIOIntFlushLimited;
11176 rc = VDInterfaceAdd(&VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
11177 pInterfaceIo, sizeof(VDINTERFACEIOINT), &pVDIfsImage);
11178 AssertRC(rc);
11179
11180 rc = vdFindBackend(pszBackend, &pBackend);
11181 if (RT_SUCCESS(rc))
11182 {
11183 if (pBackend->pfnRepair)
11184 rc = pBackend->pfnRepair(pszFilename, pVDIfsDisk, pVDIfsImage, fFlags);
11185 else
11186 rc = VERR_VD_IMAGE_REPAIR_NOT_SUPPORTED;
11187 }
11188
11189 LogFlowFunc(("returns %Rrc\n", rc));
11190 return rc;
11191}
11192
11193
11194/*
11195 * generic plugin functions
11196 */
11197
11198/**
11199 * @interface_method_impl{VDIMAGEBACKEND,pfnComposeLocation}
11200 */
11201DECLCALLBACK(int) genericFileComposeLocation(PVDINTERFACE pConfig, char **pszLocation)
11202{
11203 RT_NOREF1(pConfig);
11204 *pszLocation = NULL;
11205 return VINF_SUCCESS;
11206}
11207
11208/**
11209 * @interface_method_impl{VDIMAGEBACKEND,pfnComposeName}
11210 */
11211DECLCALLBACK(int) genericFileComposeName(PVDINTERFACE pConfig, char **pszName)
11212{
11213 RT_NOREF1(pConfig);
11214 *pszName = NULL;
11215 return VINF_SUCCESS;
11216}
11217
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette