VirtualBox

source: vbox/trunk/src/VBox/Storage/VD.cpp@ 64711

Last change on this file since 64711 was 64711, checked in by vboxsync, 8 years ago

VD: Check that the size is aligned to a 512 byte sector boundary

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 397.1 KB
Line 
1/* $Id: VD.cpp 64711 2016-11-18 11:45:00Z vboxsync $ */
2/** @file
3 * VBoxHDD - VBox HDD Container implementation.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VD
23#include <VBox/vd.h>
24#include <VBox/err.h>
25#include <VBox/sup.h>
26#include <VBox/log.h>
27
28#include <iprt/alloc.h>
29#include <iprt/assert.h>
30#include <iprt/uuid.h>
31#include <iprt/file.h>
32#include <iprt/string.h>
33#include <iprt/asm.h>
34#include <iprt/ldr.h>
35#include <iprt/dir.h>
36#include <iprt/path.h>
37#include <iprt/param.h>
38#include <iprt/memcache.h>
39#include <iprt/sg.h>
40#include <iprt/list.h>
41#include <iprt/avl.h>
42#include <iprt/semaphore.h>
43
44#include <VBox/vd-plugin.h>
45
46#include "VDBackends.h"
47
48/** Disable dynamic backends on non x86 architectures. This feature
49 * requires the SUPR3 library which is not available there.
50 */
51#if !defined(VBOX_HDD_NO_DYNAMIC_BACKENDS) && !defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)
52# define VBOX_HDD_NO_DYNAMIC_BACKENDS
53#endif
54
55#define VBOXHDDDISK_SIGNATURE 0x6f0e2a7d
56
57/** Buffer size used for merging images. */
58#define VD_MERGE_BUFFER_SIZE (16 * _1M)
59
60/** Maximum number of segments in one I/O task. */
61#define VD_IO_TASK_SEGMENTS_MAX 64
62
63/** Threshold after not recently used blocks are removed from the list. */
64#define VD_DISCARD_REMOVE_THRESHOLD (10 * _1M) /** @todo experiment */
65
66/**
67 * VD async I/O interface storage descriptor.
68 */
69typedef struct VDIIOFALLBACKSTORAGE
70{
71 /** File handle. */
72 RTFILE File;
73 /** Completion callback. */
74 PFNVDCOMPLETED pfnCompleted;
75 /** Thread for async access. */
76 RTTHREAD ThreadAsync;
77} VDIIOFALLBACKSTORAGE, *PVDIIOFALLBACKSTORAGE;
78
79/**
80 * Structure containing everything I/O related
81 * for the image and cache descriptors.
82 */
83typedef struct VDIO
84{
85 /** I/O interface to the upper layer. */
86 PVDINTERFACEIO pInterfaceIo;
87
88 /** Per image internal I/O interface. */
89 VDINTERFACEIOINT VDIfIoInt;
90
91 /** Fallback I/O interface, only used if the caller doesn't provide it. */
92 VDINTERFACEIO VDIfIo;
93
94 /** Opaque backend data. */
95 void *pBackendData;
96 /** Disk this image is part of */
97 PVBOXHDD pDisk;
98 /** Flag whether to ignore flush requests. */
99 bool fIgnoreFlush;
100} VDIO, *PVDIO;
101
102/** Forward declaration of an I/O task */
103typedef struct VDIOTASK *PVDIOTASK;
104
105/**
106 * VBox HDD Container image descriptor.
107 */
108typedef struct VDIMAGE
109{
110 /** Link to parent image descriptor, if any. */
111 struct VDIMAGE *pPrev;
112 /** Link to child image descriptor, if any. */
113 struct VDIMAGE *pNext;
114 /** Container base filename. (UTF-8) */
115 char *pszFilename;
116 /** Data managed by the backend which keeps the actual info. */
117 void *pBackendData;
118 /** Cached sanitized image flags. */
119 unsigned uImageFlags;
120 /** Image open flags (only those handled generically in this code and which
121 * the backends will never ever see). */
122 unsigned uOpenFlags;
123
124 /** Function pointers for the various backend methods. */
125 PCVDIMAGEBACKEND Backend;
126 /** Pointer to list of VD interfaces, per-image. */
127 PVDINTERFACE pVDIfsImage;
128 /** I/O related things. */
129 VDIO VDIo;
130} VDIMAGE, *PVDIMAGE;
131
132/**
133 * uModified bit flags.
134 */
135#define VD_IMAGE_MODIFIED_FLAG RT_BIT(0)
136#define VD_IMAGE_MODIFIED_FIRST RT_BIT(1)
137#define VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE RT_BIT(2)
138
139
140/**
141 * VBox HDD Cache image descriptor.
142 */
143typedef struct VDCACHE
144{
145 /** Cache base filename. (UTF-8) */
146 char *pszFilename;
147 /** Data managed by the backend which keeps the actual info. */
148 void *pBackendData;
149 /** Cached sanitized image flags. */
150 unsigned uImageFlags;
151 /** Image open flags (only those handled generically in this code and which
152 * the backends will never ever see). */
153 unsigned uOpenFlags;
154
155 /** Function pointers for the various backend methods. */
156 PCVDCACHEBACKEND Backend;
157
158 /** Pointer to list of VD interfaces, per-cache. */
159 PVDINTERFACE pVDIfsCache;
160 /** I/O related things. */
161 VDIO VDIo;
162} VDCACHE, *PVDCACHE;
163
164/**
165 * A block waiting for a discard.
166 */
167typedef struct VDDISCARDBLOCK
168{
169 /** AVL core. */
170 AVLRU64NODECORE Core;
171 /** LRU list node. */
172 RTLISTNODE NodeLru;
173 /** Number of bytes to discard. */
174 size_t cbDiscard;
175 /** Bitmap of allocated sectors. */
176 void *pbmAllocated;
177} VDDISCARDBLOCK, *PVDDISCARDBLOCK;
178
179/**
180 * VD discard state.
181 */
182typedef struct VDDISCARDSTATE
183{
184 /** Number of bytes waiting for a discard. */
185 size_t cbDiscarding;
186 /** AVL tree with blocks waiting for a discard.
187 * The uOffset + cbDiscard range is the search key. */
188 PAVLRU64TREE pTreeBlocks;
189 /** LRU list of the least frequently discarded blocks.
190 * If there are to many blocks waiting the least frequently used
191 * will be removed and the range will be set to 0.
192 */
193 RTLISTNODE ListLru;
194} VDDISCARDSTATE, *PVDDISCARDSTATE;
195
196/**
197 * VD filter instance.
198 */
199typedef struct VDFILTER
200{
201 /** List node for the read filter chain. */
202 RTLISTNODE ListNodeChainRead;
203 /** List node for the write filter chain. */
204 RTLISTNODE ListNodeChainWrite;
205 /** Number of references to this filter. */
206 uint32_t cRefs;
207 /** Opaque VD filter backend instance data. */
208 void *pvBackendData;
209 /** Pointer to the filter backend interface. */
210 PCVDFILTERBACKEND pBackend;
211 /** Pointer to list of VD interfaces, per-filter. */
212 PVDINTERFACE pVDIfsFilter;
213 /** I/O related things. */
214 VDIO VDIo;
215} VDFILTER;
216/** Pointer to a VD filter instance. */
217typedef VDFILTER *PVDFILTER;
218
219/**
220 * VBox HDD Container main structure, private part.
221 */
222struct VBOXHDD
223{
224 /** Structure signature (VBOXHDDDISK_SIGNATURE). */
225 uint32_t u32Signature;
226
227 /** Image type. */
228 VDTYPE enmType;
229
230 /** Number of opened images. */
231 unsigned cImages;
232
233 /** Base image. */
234 PVDIMAGE pBase;
235
236 /** Last opened image in the chain.
237 * The same as pBase if only one image is used. */
238 PVDIMAGE pLast;
239
240 /** If a merge to one of the parents is running this may be non-NULL
241 * to indicate to what image the writes should be additionally relayed. */
242 PVDIMAGE pImageRelay;
243
244 /** Flags representing the modification state. */
245 unsigned uModified;
246
247 /** Cached size of this disk. */
248 uint64_t cbSize;
249 /** Cached PCHS geometry for this disk. */
250 VDGEOMETRY PCHSGeometry;
251 /** Cached LCHS geometry for this disk. */
252 VDGEOMETRY LCHSGeometry;
253
254 /** Pointer to list of VD interfaces, per-disk. */
255 PVDINTERFACE pVDIfsDisk;
256 /** Pointer to the common interface structure for error reporting. */
257 PVDINTERFACEERROR pInterfaceError;
258 /** Pointer to the optional thread synchronization callbacks. */
259 PVDINTERFACETHREADSYNC pInterfaceThreadSync;
260
261 /** Memory cache for I/O contexts */
262 RTMEMCACHE hMemCacheIoCtx;
263 /** Memory cache for I/O tasks. */
264 RTMEMCACHE hMemCacheIoTask;
265 /** An I/O context is currently using the disk structures
266 * Every I/O context must be placed on one of the lists below. */
267 volatile bool fLocked;
268 /** Head of pending I/O tasks waiting for completion - LIFO order. */
269 volatile PVDIOTASK pIoTasksPendingHead;
270 /** Head of newly queued I/O contexts - LIFO order. */
271 volatile PVDIOCTX pIoCtxHead;
272 /** Head of halted I/O contexts which are given back to generic
273 * disk framework by the backend. - LIFO order. */
274 volatile PVDIOCTX pIoCtxHaltedHead;
275
276 /** Head of blocked I/O contexts, processed only
277 * after pIoCtxLockOwner was freed - LIFO order. */
278 volatile PVDIOCTX pIoCtxBlockedHead;
279 /** I/O context which locked the disk for a growing write or flush request.
280 * Other flush or growing write requests need to wait until
281 * the current one completes. - NIL_VDIOCTX if unlocked. */
282 volatile PVDIOCTX pIoCtxLockOwner;
283 /** If the disk was locked by a growing write, flush or discard request this
284 * contains the start offset to check for interfering I/O while it is in progress. */
285 uint64_t uOffsetStartLocked;
286 /** If the disk was locked by a growing write, flush or discard request this contains
287 * the first non affected offset to check for interfering I/O while it is in progress. */
288 uint64_t uOffsetEndLocked;
289
290 /** Pointer to the L2 disk cache if any. */
291 PVDCACHE pCache;
292 /** Pointer to the discard state if any. */
293 PVDDISCARDSTATE pDiscard;
294
295 /** Read filter chain - PVDFILTER. */
296 RTLISTANCHOR ListFilterChainRead;
297 /** Write filter chain - PVDFILTER. */
298 RTLISTANCHOR ListFilterChainWrite;
299};
300
301# define VD_IS_LOCKED(a_pDisk) \
302 do \
303 { \
304 NOREF(a_pDisk); \
305 AssertMsg((a_pDisk)->fLocked, \
306 ("Lock not held\n"));\
307 } while(0)
308
309/**
310 * VBox parent read descriptor, used internally for compaction.
311 */
312typedef struct VDPARENTSTATEDESC
313{
314 /** Pointer to disk descriptor. */
315 PVBOXHDD pDisk;
316 /** Pointer to image descriptor. */
317 PVDIMAGE pImage;
318} VDPARENTSTATEDESC, *PVDPARENTSTATEDESC;
319
320/**
321 * Transfer direction.
322 */
323typedef enum VDIOCTXTXDIR
324{
325 /** Read */
326 VDIOCTXTXDIR_READ = 0,
327 /** Write */
328 VDIOCTXTXDIR_WRITE,
329 /** Flush */
330 VDIOCTXTXDIR_FLUSH,
331 /** Discard */
332 VDIOCTXTXDIR_DISCARD,
333 /** 32bit hack */
334 VDIOCTXTXDIR_32BIT_HACK = 0x7fffffff
335} VDIOCTXTXDIR, *PVDIOCTXTXDIR;
336
337/** Transfer function */
338typedef DECLCALLBACK(int) FNVDIOCTXTRANSFER (PVDIOCTX pIoCtx);
339/** Pointer to a transfer function. */
340typedef FNVDIOCTXTRANSFER *PFNVDIOCTXTRANSFER;
341
342/**
343 * I/O context
344 */
345typedef struct VDIOCTX
346{
347 /** Pointer to the next I/O context. */
348 struct VDIOCTX * volatile pIoCtxNext;
349 /** Disk this is request is for. */
350 PVBOXHDD pDisk;
351 /** Return code. */
352 int rcReq;
353 /** Various flags for the I/O context. */
354 uint32_t fFlags;
355 /** Number of data transfers currently pending. */
356 volatile uint32_t cDataTransfersPending;
357 /** How many meta data transfers are pending. */
358 volatile uint32_t cMetaTransfersPending;
359 /** Flag whether the request finished */
360 volatile bool fComplete;
361 /** Temporary allocated memory which is freed
362 * when the context completes. */
363 void *pvAllocation;
364 /** Transfer function. */
365 PFNVDIOCTXTRANSFER pfnIoCtxTransfer;
366 /** Next transfer part after the current one completed. */
367 PFNVDIOCTXTRANSFER pfnIoCtxTransferNext;
368 /** Transfer direction */
369 VDIOCTXTXDIR enmTxDir;
370 /** Request type dependent data. */
371 union
372 {
373 /** I/O request (read/write). */
374 struct
375 {
376 /** Number of bytes left until this context completes. */
377 volatile uint32_t cbTransferLeft;
378 /** Current offset */
379 volatile uint64_t uOffset;
380 /** Number of bytes to transfer */
381 volatile size_t cbTransfer;
382 /** Current image in the chain. */
383 PVDIMAGE pImageCur;
384 /** Start image to read from. pImageCur is reset to this
385 * value after it reached the first image in the chain. */
386 PVDIMAGE pImageStart;
387 /** S/G buffer */
388 RTSGBUF SgBuf;
389 /** Number of bytes to clear in the buffer before the current read. */
390 size_t cbBufClear;
391 /** Number of images to read. */
392 unsigned cImagesRead;
393 /** Override for the parent image to start reading from. */
394 PVDIMAGE pImageParentOverride;
395 /** Original offset of the transfer - required for filtering read requests. */
396 uint64_t uOffsetXferOrig;
397 /** Original size of the transfer - required for fitlering read requests. */
398 size_t cbXferOrig;
399 } Io;
400 /** Discard requests. */
401 struct
402 {
403 /** Pointer to the range descriptor array. */
404 PCRTRANGE paRanges;
405 /** Number of ranges in the array. */
406 unsigned cRanges;
407 /** Range descriptor index which is processed. */
408 unsigned idxRange;
409 /** Start offset to discard currently. */
410 uint64_t offCur;
411 /** How many bytes left to discard in the current range. */
412 size_t cbDiscardLeft;
413 /** How many bytes to discard in the current block (<= cbDiscardLeft). */
414 size_t cbThisDiscard;
415 /** Discard block handled currently. */
416 PVDDISCARDBLOCK pBlock;
417 } Discard;
418 } Req;
419 /** Parent I/O context if any. Sets the type of the context (root/child) */
420 PVDIOCTX pIoCtxParent;
421 /** Type dependent data (root/child) */
422 union
423 {
424 /** Root data */
425 struct
426 {
427 /** Completion callback */
428 PFNVDASYNCTRANSFERCOMPLETE pfnComplete;
429 /** User argument 1 passed on completion. */
430 void *pvUser1;
431 /** User argument 2 passed on completion. */
432 void *pvUser2;
433 } Root;
434 /** Child data */
435 struct
436 {
437 /** Saved start offset */
438 uint64_t uOffsetSaved;
439 /** Saved transfer size */
440 size_t cbTransferLeftSaved;
441 /** Number of bytes transferred from the parent if this context completes. */
442 size_t cbTransferParent;
443 /** Number of bytes to pre read */
444 size_t cbPreRead;
445 /** Number of bytes to post read. */
446 size_t cbPostRead;
447 /** Number of bytes to write left in the parent. */
448 size_t cbWriteParent;
449 /** Write type dependent data. */
450 union
451 {
452 /** Optimized */
453 struct
454 {
455 /** Bytes to fill to satisfy the block size. Not part of the virtual disk. */
456 size_t cbFill;
457 /** Bytes to copy instead of reading from the parent */
458 size_t cbWriteCopy;
459 /** Bytes to read from the image. */
460 size_t cbReadImage;
461 } Optimized;
462 } Write;
463 } Child;
464 } Type;
465} VDIOCTX;
466
467/** Default flags for an I/O context, i.e. unblocked and async. */
468#define VDIOCTX_FLAGS_DEFAULT (0)
469/** Flag whether the context is blocked. */
470#define VDIOCTX_FLAGS_BLOCKED RT_BIT_32(0)
471/** Flag whether the I/O context is using synchronous I/O. */
472#define VDIOCTX_FLAGS_SYNC RT_BIT_32(1)
473/** Flag whether the read should update the cache. */
474#define VDIOCTX_FLAGS_READ_UPDATE_CACHE RT_BIT_32(2)
475/** Flag whether free blocks should be zeroed.
476 * If false and no image has data for sepcified
477 * range VERR_VD_BLOCK_FREE is returned for the I/O context.
478 * Note that unallocated blocks are still zeroed
479 * if at least one image has valid data for a part
480 * of the range.
481 */
482#define VDIOCTX_FLAGS_ZERO_FREE_BLOCKS RT_BIT_32(3)
483/** Don't free the I/O context when complete because
484 * it was alloacted elsewhere (stack, ...). */
485#define VDIOCTX_FLAGS_DONT_FREE RT_BIT_32(4)
486/** Don't set the modified flag for this I/O context when writing. */
487#define VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG RT_BIT_32(5)
488/** The write filter was applied already and shouldn't be applied a second time.
489 * Used at the beginning of vdWriteHelperAsync() because it might be called
490 * multiple times.
491 */
492#define VDIOCTX_FLAGS_WRITE_FILTER_APPLIED RT_BIT_32(6)
493
494/** NIL I/O context pointer value. */
495#define NIL_VDIOCTX ((PVDIOCTX)0)
496
497/**
498 * List node for deferred I/O contexts.
499 */
500typedef struct VDIOCTXDEFERRED
501{
502 /** Node in the list of deferred requests.
503 * A request can be deferred if the image is growing
504 * and the request accesses the same range or if
505 * the backend needs to read or write metadata from the disk
506 * before it can continue. */
507 RTLISTNODE NodeDeferred;
508 /** I/O context this entry points to. */
509 PVDIOCTX pIoCtx;
510} VDIOCTXDEFERRED, *PVDIOCTXDEFERRED;
511
512/**
513 * I/O task.
514 */
515typedef struct VDIOTASK
516{
517 /** Next I/O task waiting in the list. */
518 struct VDIOTASK * volatile pNext;
519 /** Storage this task belongs to. */
520 PVDIOSTORAGE pIoStorage;
521 /** Optional completion callback. */
522 PFNVDXFERCOMPLETED pfnComplete;
523 /** Opaque user data. */
524 void *pvUser;
525 /** Completion status code for the task. */
526 int rcReq;
527 /** Flag whether this is a meta data transfer. */
528 bool fMeta;
529 /** Type dependent data. */
530 union
531 {
532 /** User data transfer. */
533 struct
534 {
535 /** Number of bytes this task transferred. */
536 uint32_t cbTransfer;
537 /** Pointer to the I/O context the task belongs. */
538 PVDIOCTX pIoCtx;
539 } User;
540 /** Meta data transfer. */
541 struct
542 {
543 /** Meta transfer this task is for. */
544 PVDMETAXFER pMetaXfer;
545 } Meta;
546 } Type;
547} VDIOTASK;
548
549/**
550 * Storage handle.
551 */
552typedef struct VDIOSTORAGE
553{
554 /** Image I/O state this storage handle belongs to. */
555 PVDIO pVDIo;
556 /** AVL tree for pending async metadata transfers. */
557 PAVLRFOFFTREE pTreeMetaXfers;
558 /** Storage handle */
559 void *pStorage;
560} VDIOSTORAGE;
561
562/**
563 * Metadata transfer.
564 *
565 * @note This entry can't be freed if either the list is not empty or
566 * the reference counter is not 0.
567 * The assumption is that the backends don't need to read huge amounts of
568 * metadata to complete a transfer so the additional memory overhead should
569 * be relatively small.
570 */
571typedef struct VDMETAXFER
572{
573 /** AVL core for fast search (the file offset is the key) */
574 AVLRFOFFNODECORE Core;
575 /** I/O storage for this transfer. */
576 PVDIOSTORAGE pIoStorage;
577 /** Flags. */
578 uint32_t fFlags;
579 /** List of I/O contexts waiting for this metadata transfer to complete. */
580 RTLISTNODE ListIoCtxWaiting;
581 /** Number of references to this entry. */
582 unsigned cRefs;
583 /** Size of the data stored with this entry. */
584 size_t cbMeta;
585 /** Shadow buffer which is used in case a write is still active and other
586 * writes update the shadow buffer. */
587 uint8_t *pbDataShw;
588 /** List of I/O contexts updating the shadow buffer while there is a write
589 * in progress. */
590 RTLISTNODE ListIoCtxShwWrites;
591 /** Data stored - variable size. */
592 uint8_t abData[1];
593} VDMETAXFER;
594
595/**
596 * The transfer direction for the metadata.
597 */
598#define VDMETAXFER_TXDIR_MASK 0x3
599#define VDMETAXFER_TXDIR_NONE 0x0
600#define VDMETAXFER_TXDIR_WRITE 0x1
601#define VDMETAXFER_TXDIR_READ 0x2
602#define VDMETAXFER_TXDIR_FLUSH 0x3
603#define VDMETAXFER_TXDIR_GET(flags) ((flags) & VDMETAXFER_TXDIR_MASK)
604#define VDMETAXFER_TXDIR_SET(flags, dir) ((flags) = (flags & ~VDMETAXFER_TXDIR_MASK) | (dir))
605
606/**
607 * Plugin structure.
608 */
609typedef struct VDPLUGIN
610{
611 /** Pointer to the next plugin structure. */
612 RTLISTNODE NodePlugin;
613 /** Handle of loaded plugin library. */
614 RTLDRMOD hPlugin;
615 /** Filename of the loaded plugin. */
616 char *pszFilename;
617} VDPLUGIN;
618/** Pointer to a plugin structure. */
619typedef VDPLUGIN *PVDPLUGIN;
620
621/** Head of loaded plugin list. */
622static RTLISTANCHOR g_ListPluginsLoaded;
623
624/** Number of image backends supported. */
625static unsigned g_cBackends = 0;
626/** Array of pointers to the image backends. */
627static PCVDIMAGEBACKEND *g_apBackends = NULL;
628/** Array of handles to the corresponding plugin. */
629static RTLDRMOD *g_ahBackendPlugins = NULL;
630/** Builtin image backends. */
631static PCVDIMAGEBACKEND aStaticBackends[] =
632{
633 &g_VmdkBackend,
634 &g_VDIBackend,
635 &g_VhdBackend,
636 &g_ParallelsBackend,
637 &g_DmgBackend,
638 &g_QedBackend,
639 &g_QCowBackend,
640 &g_VhdxBackend,
641 &g_RawBackend,
642 &g_ISCSIBackend
643};
644
645/** Number of supported cache backends. */
646static unsigned g_cCacheBackends = 0;
647/** Array of pointers to the cache backends. */
648static PCVDCACHEBACKEND *g_apCacheBackends = NULL;
649/** Array of handles to the corresponding plugin. */
650static RTLDRMOD *g_ahCacheBackendPlugins = NULL;
651/** Builtin cache backends. */
652static PCVDCACHEBACKEND aStaticCacheBackends[] =
653{
654 &g_VciCacheBackend
655};
656
657/** Number of supported filter backends. */
658static unsigned g_cFilterBackends = 0;
659/** Array of pointers to the filters backends. */
660static PCVDFILTERBACKEND *g_apFilterBackends = NULL;
661#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
662/** Array of handles to the corresponding plugin. */
663static PRTLDRMOD g_pahFilterBackendPlugins = NULL;
664#endif
665
666/** Forward declaration of the async discard helper. */
667static DECLCALLBACK(int) vdDiscardHelperAsync(PVDIOCTX pIoCtx);
668static DECLCALLBACK(int) vdWriteHelperAsync(PVDIOCTX pIoCtx);
669static void vdDiskProcessBlockedIoCtx(PVBOXHDD pDisk);
670static int vdDiskUnlock(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc);
671static DECLCALLBACK(void) vdIoCtxSyncComplete(void *pvUser1, void *pvUser2, int rcReq);
672
673/**
674 * internal: add several backends.
675 */
676static int vdAddBackends(RTLDRMOD hPlugin, PCVDIMAGEBACKEND *ppBackends, unsigned cBackends)
677{
678 PCVDIMAGEBACKEND *pTmp = (PCVDIMAGEBACKEND *)RTMemRealloc(g_apBackends,
679 (g_cBackends + cBackends) * sizeof(PCVDIMAGEBACKEND));
680 if (RT_UNLIKELY(!pTmp))
681 return VERR_NO_MEMORY;
682 g_apBackends = pTmp;
683
684 RTLDRMOD *pTmpPlugins = (RTLDRMOD*)RTMemRealloc(g_ahBackendPlugins,
685 (g_cBackends + cBackends) * sizeof(RTLDRMOD));
686 if (RT_UNLIKELY(!pTmpPlugins))
687 return VERR_NO_MEMORY;
688 g_ahBackendPlugins = pTmpPlugins;
689 memcpy(&g_apBackends[g_cBackends], ppBackends, cBackends * sizeof(PCVDIMAGEBACKEND));
690 for (unsigned i = g_cBackends; i < g_cBackends + cBackends; i++)
691 g_ahBackendPlugins[i] = hPlugin;
692 g_cBackends += cBackends;
693 return VINF_SUCCESS;
694}
695
696#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
697/**
698 * internal: add single backend.
699 */
700DECLINLINE(int) vdAddBackend(RTLDRMOD hPlugin, PCVDIMAGEBACKEND pBackend)
701{
702 return vdAddBackends(hPlugin, &pBackend, 1);
703}
704#endif
705
706/**
707 * internal: add several cache backends.
708 */
709static int vdAddCacheBackends(RTLDRMOD hPlugin, PCVDCACHEBACKEND *ppBackends, unsigned cBackends)
710{
711 PCVDCACHEBACKEND *pTmp = (PCVDCACHEBACKEND*)RTMemRealloc(g_apCacheBackends,
712 (g_cCacheBackends + cBackends) * sizeof(PCVDCACHEBACKEND));
713 if (RT_UNLIKELY(!pTmp))
714 return VERR_NO_MEMORY;
715 g_apCacheBackends = pTmp;
716
717 RTLDRMOD *pTmpPlugins = (RTLDRMOD*)RTMemRealloc(g_ahCacheBackendPlugins,
718 (g_cCacheBackends + cBackends) * sizeof(RTLDRMOD));
719 if (RT_UNLIKELY(!pTmpPlugins))
720 return VERR_NO_MEMORY;
721 g_ahCacheBackendPlugins = pTmpPlugins;
722 memcpy(&g_apCacheBackends[g_cCacheBackends], ppBackends, cBackends * sizeof(PCVDCACHEBACKEND));
723 for (unsigned i = g_cCacheBackends; i < g_cCacheBackends + cBackends; i++)
724 g_ahCacheBackendPlugins[i] = hPlugin;
725 g_cCacheBackends += cBackends;
726 return VINF_SUCCESS;
727}
728
729#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
730
731/**
732 * internal: add single cache backend.
733 */
734DECLINLINE(int) vdAddCacheBackend(RTLDRMOD hPlugin, PCVDCACHEBACKEND pBackend)
735{
736 return vdAddCacheBackends(hPlugin, &pBackend, 1);
737}
738
739
740/**
741 * Add several filter backends.
742 *
743 * @returns VBox status code.
744 * @param hPlugin Plugin handle to add.
745 * @param ppBackends Array of filter backends to add.
746 * @param cBackends Number of backends to add.
747 */
748static int vdAddFilterBackends(RTLDRMOD hPlugin, PCVDFILTERBACKEND *ppBackends, unsigned cBackends)
749{
750 PCVDFILTERBACKEND *pTmp = (PCVDFILTERBACKEND *)RTMemRealloc(g_apFilterBackends,
751 (g_cFilterBackends + cBackends) * sizeof(PCVDFILTERBACKEND));
752 if (RT_UNLIKELY(!pTmp))
753 return VERR_NO_MEMORY;
754 g_apFilterBackends = pTmp;
755
756 PRTLDRMOD pTmpPlugins = (PRTLDRMOD)RTMemRealloc(g_pahFilterBackendPlugins,
757 (g_cFilterBackends + cBackends) * sizeof(RTLDRMOD));
758 if (RT_UNLIKELY(!pTmpPlugins))
759 return VERR_NO_MEMORY;
760
761 g_pahFilterBackendPlugins = pTmpPlugins;
762 memcpy(&g_apFilterBackends[g_cFilterBackends], ppBackends, cBackends * sizeof(PCVDFILTERBACKEND));
763 for (unsigned i = g_cFilterBackends; i < g_cFilterBackends + cBackends; i++)
764 g_pahFilterBackendPlugins[i] = hPlugin;
765 g_cFilterBackends += cBackends;
766 return VINF_SUCCESS;
767}
768
769
770/**
771 * Add a single filter backend to the list of supported filters.
772 *
773 * @returns VBox status code.
774 * @param hPlugin Plugin handle to add.
775 * @param pBackend The backend to add.
776 */
777DECLINLINE(int) vdAddFilterBackend(RTLDRMOD hPlugin, PCVDFILTERBACKEND pBackend)
778{
779 return vdAddFilterBackends(hPlugin, &pBackend, 1);
780}
781
782#endif /* VBOX_HDD_NO_DYNAMIC_BACKENDS*/
783
784/**
785 * internal: issue error message.
786 */
787static int vdError(PVBOXHDD pDisk, int rc, RT_SRC_POS_DECL,
788 const char *pszFormat, ...)
789{
790 va_list va;
791 va_start(va, pszFormat);
792 if (pDisk->pInterfaceError)
793 pDisk->pInterfaceError->pfnError(pDisk->pInterfaceError->Core.pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
794 va_end(va);
795 return rc;
796}
797
798/**
799 * internal: thread synchronization, start read.
800 */
801DECLINLINE(int) vdThreadStartRead(PVBOXHDD pDisk)
802{
803 int rc = VINF_SUCCESS;
804 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
805 rc = pDisk->pInterfaceThreadSync->pfnStartRead(pDisk->pInterfaceThreadSync->Core.pvUser);
806 return rc;
807}
808
809/**
810 * internal: thread synchronization, finish read.
811 */
812DECLINLINE(int) vdThreadFinishRead(PVBOXHDD pDisk)
813{
814 int rc = VINF_SUCCESS;
815 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
816 rc = pDisk->pInterfaceThreadSync->pfnFinishRead(pDisk->pInterfaceThreadSync->Core.pvUser);
817 return rc;
818}
819
820/**
821 * internal: thread synchronization, start write.
822 */
823DECLINLINE(int) vdThreadStartWrite(PVBOXHDD pDisk)
824{
825 int rc = VINF_SUCCESS;
826 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
827 rc = pDisk->pInterfaceThreadSync->pfnStartWrite(pDisk->pInterfaceThreadSync->Core.pvUser);
828 return rc;
829}
830
831/**
832 * internal: thread synchronization, finish write.
833 */
834DECLINLINE(int) vdThreadFinishWrite(PVBOXHDD pDisk)
835{
836 int rc = VINF_SUCCESS;
837 if (RT_UNLIKELY(pDisk->pInterfaceThreadSync))
838 rc = pDisk->pInterfaceThreadSync->pfnFinishWrite(pDisk->pInterfaceThreadSync->Core.pvUser);
839 return rc;
840}
841
842/**
843 * internal: find image format backend.
844 */
845static int vdFindBackend(const char *pszBackend, PCVDIMAGEBACKEND *ppBackend)
846{
847 int rc = VINF_SUCCESS;
848 PCVDIMAGEBACKEND pBackend = NULL;
849
850 if (!g_apBackends)
851 VDInit();
852
853 for (unsigned i = 0; i < g_cBackends; i++)
854 {
855 if (!RTStrICmp(pszBackend, g_apBackends[i]->pszBackendName))
856 {
857 pBackend = g_apBackends[i];
858 break;
859 }
860 }
861 *ppBackend = pBackend;
862 return rc;
863}
864
865/**
866 * internal: find cache format backend.
867 */
868static int vdFindCacheBackend(const char *pszBackend, PCVDCACHEBACKEND *ppBackend)
869{
870 int rc = VINF_SUCCESS;
871 PCVDCACHEBACKEND pBackend = NULL;
872
873 if (!g_apCacheBackends)
874 VDInit();
875
876 for (unsigned i = 0; i < g_cCacheBackends; i++)
877 {
878 if (!RTStrICmp(pszBackend, g_apCacheBackends[i]->pszBackendName))
879 {
880 pBackend = g_apCacheBackends[i];
881 break;
882 }
883 }
884 *ppBackend = pBackend;
885 return rc;
886}
887
888/**
889 * internal: find filter backend.
890 */
891static int vdFindFilterBackend(const char *pszFilter, PCVDFILTERBACKEND *ppBackend)
892{
893 int rc = VINF_SUCCESS;
894 PCVDFILTERBACKEND pBackend = NULL;
895
896 for (unsigned i = 0; i < g_cFilterBackends; i++)
897 {
898 if (!RTStrICmp(pszFilter, g_apFilterBackends[i]->pszBackendName))
899 {
900 pBackend = g_apFilterBackends[i];
901 break;
902 }
903 }
904 *ppBackend = pBackend;
905 return rc;
906}
907
908
909/**
910 * internal: add image structure to the end of images list.
911 */
912static void vdAddImageToList(PVBOXHDD pDisk, PVDIMAGE pImage)
913{
914 pImage->pPrev = NULL;
915 pImage->pNext = NULL;
916
917 if (pDisk->pBase)
918 {
919 Assert(pDisk->cImages > 0);
920 pImage->pPrev = pDisk->pLast;
921 pDisk->pLast->pNext = pImage;
922 pDisk->pLast = pImage;
923 }
924 else
925 {
926 Assert(pDisk->cImages == 0);
927 pDisk->pBase = pImage;
928 pDisk->pLast = pImage;
929 }
930
931 pDisk->cImages++;
932}
933
934/**
935 * internal: remove image structure from the images list.
936 */
937static void vdRemoveImageFromList(PVBOXHDD pDisk, PVDIMAGE pImage)
938{
939 Assert(pDisk->cImages > 0);
940
941 if (pImage->pPrev)
942 pImage->pPrev->pNext = pImage->pNext;
943 else
944 pDisk->pBase = pImage->pNext;
945
946 if (pImage->pNext)
947 pImage->pNext->pPrev = pImage->pPrev;
948 else
949 pDisk->pLast = pImage->pPrev;
950
951 pImage->pPrev = NULL;
952 pImage->pNext = NULL;
953
954 pDisk->cImages--;
955}
956
957/**
958 * Release a referene to the filter decrementing the counter and destroying the filter
959 * when the counter reaches zero.
960 *
961 * @returns The new reference count.
962 * @param pFilter The filter to release.
963 */
964static uint32_t vdFilterRelease(PVDFILTER pFilter)
965{
966 uint32_t cRefs = ASMAtomicDecU32(&pFilter->cRefs);
967 if (!cRefs)
968 {
969 pFilter->pBackend->pfnDestroy(pFilter->pvBackendData);
970 RTMemFree(pFilter);
971 }
972
973 return cRefs;
974}
975
976/**
977 * Increments the reference counter of the given filter.
978 *
979 * @return The new reference count.
980 * @param pFilter The filter.
981 */
982static uint32_t vdFilterRetain(PVDFILTER pFilter)
983{
984 return ASMAtomicIncU32(&pFilter->cRefs);
985}
986
987/**
988 * internal: find image by index into the images list.
989 */
990static PVDIMAGE vdGetImageByNumber(PVBOXHDD pDisk, unsigned nImage)
991{
992 PVDIMAGE pImage = pDisk->pBase;
993 if (nImage == VD_LAST_IMAGE)
994 return pDisk->pLast;
995 while (pImage && nImage)
996 {
997 pImage = pImage->pNext;
998 nImage--;
999 }
1000 return pImage;
1001}
1002
1003/**
1004 * Applies the filter chain to the given write request.
1005 *
1006 * @returns VBox status code.
1007 * @param pDisk The HDD container.
1008 * @param uOffset The start offset of the write.
1009 * @param cbWrite Number of bytes to write.
1010 * @param pIoCtx The I/O context associated with the request.
1011 */
1012static int vdFilterChainApplyWrite(PVBOXHDD pDisk, uint64_t uOffset, size_t cbWrite,
1013 PVDIOCTX pIoCtx)
1014{
1015 int rc = VINF_SUCCESS;
1016
1017 VD_IS_LOCKED(pDisk);
1018
1019 PVDFILTER pFilter;
1020 RTListForEach(&pDisk->ListFilterChainWrite, pFilter, VDFILTER, ListNodeChainWrite)
1021 {
1022 rc = pFilter->pBackend->pfnFilterWrite(pFilter->pvBackendData, uOffset, cbWrite, pIoCtx);
1023 if (RT_FAILURE(rc))
1024 break;
1025 /* Reset S/G buffer for the next filter. */
1026 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1027 }
1028
1029 return rc;
1030}
1031
1032/**
1033 * Applies the filter chain to the given read request.
1034 *
1035 * @returns VBox status code.
1036 * @param pDisk The HDD container.
1037 * @param uOffset The start offset of the read.
1038 * @param cbRead Number of bytes read.
1039 * @param pIoCtx The I/O context associated with the request.
1040 */
1041static int vdFilterChainApplyRead(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRead,
1042 PVDIOCTX pIoCtx)
1043{
1044 int rc = VINF_SUCCESS;
1045
1046 VD_IS_LOCKED(pDisk);
1047
1048 /* Reset buffer before starting. */
1049 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1050
1051 PVDFILTER pFilter;
1052 RTListForEach(&pDisk->ListFilterChainRead, pFilter, VDFILTER, ListNodeChainRead)
1053 {
1054 rc = pFilter->pBackend->pfnFilterRead(pFilter->pvBackendData, uOffset, cbRead, pIoCtx);
1055 if (RT_FAILURE(rc))
1056 break;
1057 /* Reset S/G buffer for the next filter. */
1058 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1059 }
1060
1061 return rc;
1062}
1063
1064DECLINLINE(void) vdIoCtxRootComplete(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1065{
1066 if ( RT_SUCCESS(pIoCtx->rcReq)
1067 && pIoCtx->enmTxDir == VDIOCTXTXDIR_READ)
1068 pIoCtx->rcReq = vdFilterChainApplyRead(pDisk, pIoCtx->Req.Io.uOffsetXferOrig,
1069 pIoCtx->Req.Io.cbXferOrig, pIoCtx);
1070
1071 pIoCtx->Type.Root.pfnComplete(pIoCtx->Type.Root.pvUser1,
1072 pIoCtx->Type.Root.pvUser2,
1073 pIoCtx->rcReq);
1074}
1075
1076/**
1077 * Initialize the structure members of a given I/O context.
1078 */
1079DECLINLINE(void) vdIoCtxInit(PVDIOCTX pIoCtx, PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1080 uint64_t uOffset, size_t cbTransfer, PVDIMAGE pImageStart,
1081 PCRTSGBUF pcSgBuf, void *pvAllocation,
1082 PFNVDIOCTXTRANSFER pfnIoCtxTransfer, uint32_t fFlags)
1083{
1084 pIoCtx->pDisk = pDisk;
1085 pIoCtx->enmTxDir = enmTxDir;
1086 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbTransfer; Assert((uint32_t)cbTransfer == cbTransfer);
1087 pIoCtx->Req.Io.uOffset = uOffset;
1088 pIoCtx->Req.Io.cbTransfer = cbTransfer;
1089 pIoCtx->Req.Io.pImageStart = pImageStart;
1090 pIoCtx->Req.Io.pImageCur = pImageStart;
1091 pIoCtx->Req.Io.cbBufClear = 0;
1092 pIoCtx->Req.Io.pImageParentOverride = NULL;
1093 pIoCtx->Req.Io.uOffsetXferOrig = uOffset;
1094 pIoCtx->Req.Io.cbXferOrig = cbTransfer;
1095 pIoCtx->cDataTransfersPending = 0;
1096 pIoCtx->cMetaTransfersPending = 0;
1097 pIoCtx->fComplete = false;
1098 pIoCtx->fFlags = fFlags;
1099 pIoCtx->pvAllocation = pvAllocation;
1100 pIoCtx->pfnIoCtxTransfer = pfnIoCtxTransfer;
1101 pIoCtx->pfnIoCtxTransferNext = NULL;
1102 pIoCtx->rcReq = VINF_SUCCESS;
1103 pIoCtx->pIoCtxParent = NULL;
1104
1105 /* There is no S/G list for a flush request. */
1106 if ( enmTxDir != VDIOCTXTXDIR_FLUSH
1107 && enmTxDir != VDIOCTXTXDIR_DISCARD)
1108 RTSgBufClone(&pIoCtx->Req.Io.SgBuf, pcSgBuf);
1109 else
1110 memset(&pIoCtx->Req.Io.SgBuf, 0, sizeof(RTSGBUF));
1111}
1112
1113/**
1114 * Internal: Tries to read the desired range from the given cache.
1115 *
1116 * @returns VBox status code.
1117 * @retval VERR_VD_BLOCK_FREE if the block is not in the cache.
1118 * pcbRead will be set to the number of bytes not in the cache.
1119 * Everything thereafter might be in the cache.
1120 * @param pCache The cache to read from.
1121 * @param uOffset Offset of the virtual disk to read.
1122 * @param cbRead How much to read.
1123 * @param pIoCtx The I/O context to read into.
1124 * @param pcbRead Where to store the number of bytes actually read.
1125 * On success this indicates the number of bytes read from the cache.
1126 * If VERR_VD_BLOCK_FREE is returned this gives the number of bytes
1127 * which are not in the cache.
1128 * In both cases everything beyond this value
1129 * might or might not be in the cache.
1130 */
1131static int vdCacheReadHelper(PVDCACHE pCache, uint64_t uOffset,
1132 size_t cbRead, PVDIOCTX pIoCtx, size_t *pcbRead)
1133{
1134 int rc = VINF_SUCCESS;
1135
1136 LogFlowFunc(("pCache=%#p uOffset=%llu pIoCtx=%p cbRead=%zu pcbRead=%#p\n",
1137 pCache, uOffset, pIoCtx, cbRead, pcbRead));
1138
1139 AssertPtr(pCache);
1140 AssertPtr(pcbRead);
1141
1142 rc = pCache->Backend->pfnRead(pCache->pBackendData, uOffset, cbRead,
1143 pIoCtx, pcbRead);
1144
1145 LogFlowFunc(("returns rc=%Rrc pcbRead=%zu\n", rc, *pcbRead));
1146 return rc;
1147}
1148
1149/**
1150 * Internal: Writes data for the given block into the cache.
1151 *
1152 * @returns VBox status code.
1153 * @param pCache The cache to write to.
1154 * @param uOffset Offset of the virtual disk to write to the cache.
1155 * @param cbWrite How much to write.
1156 * @param pIoCtx The I/O context to write from.
1157 * @param pcbWritten How much data could be written, optional.
1158 */
1159static int vdCacheWriteHelper(PVDCACHE pCache, uint64_t uOffset, size_t cbWrite,
1160 PVDIOCTX pIoCtx, size_t *pcbWritten)
1161{
1162 int rc = VINF_SUCCESS;
1163
1164 LogFlowFunc(("pCache=%#p uOffset=%llu pIoCtx=%p cbWrite=%zu pcbWritten=%#p\n",
1165 pCache, uOffset, pIoCtx, cbWrite, pcbWritten));
1166
1167 AssertPtr(pCache);
1168 AssertPtr(pIoCtx);
1169 Assert(cbWrite > 0);
1170
1171 if (pcbWritten)
1172 rc = pCache->Backend->pfnWrite(pCache->pBackendData, uOffset, cbWrite,
1173 pIoCtx, pcbWritten);
1174 else
1175 {
1176 size_t cbWritten = 0;
1177
1178 do
1179 {
1180 rc = pCache->Backend->pfnWrite(pCache->pBackendData, uOffset, cbWrite,
1181 pIoCtx, &cbWritten);
1182 uOffset += cbWritten;
1183 cbWrite -= cbWritten;
1184 } while ( cbWrite
1185 && ( RT_SUCCESS(rc)
1186 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS));
1187 }
1188
1189 LogFlowFunc(("returns rc=%Rrc pcbWritten=%zu\n",
1190 rc, pcbWritten ? *pcbWritten : cbWrite));
1191 return rc;
1192}
1193
1194/**
1195 * Creates a new empty discard state.
1196 *
1197 * @returns Pointer to the new discard state or NULL if out of memory.
1198 */
1199static PVDDISCARDSTATE vdDiscardStateCreate(void)
1200{
1201 PVDDISCARDSTATE pDiscard = (PVDDISCARDSTATE)RTMemAllocZ(sizeof(VDDISCARDSTATE));
1202
1203 if (pDiscard)
1204 {
1205 RTListInit(&pDiscard->ListLru);
1206 pDiscard->pTreeBlocks = (PAVLRU64TREE)RTMemAllocZ(sizeof(AVLRU64TREE));
1207 if (!pDiscard->pTreeBlocks)
1208 {
1209 RTMemFree(pDiscard);
1210 pDiscard = NULL;
1211 }
1212 }
1213
1214 return pDiscard;
1215}
1216
1217/**
1218 * Removes the least recently used blocks from the waiting list until
1219 * the new value is reached.
1220 *
1221 * @returns VBox status code.
1222 * @param pDisk VD disk container.
1223 * @param pDiscard The discard state.
1224 * @param cbDiscardingNew How many bytes should be waiting on success.
1225 * The number of bytes waiting can be less.
1226 */
1227static int vdDiscardRemoveBlocks(PVBOXHDD pDisk, PVDDISCARDSTATE pDiscard, size_t cbDiscardingNew)
1228{
1229 int rc = VINF_SUCCESS;
1230
1231 LogFlowFunc(("pDisk=%#p pDiscard=%#p cbDiscardingNew=%zu\n",
1232 pDisk, pDiscard, cbDiscardingNew));
1233
1234 while (pDiscard->cbDiscarding > cbDiscardingNew)
1235 {
1236 PVDDISCARDBLOCK pBlock = RTListGetLast(&pDiscard->ListLru, VDDISCARDBLOCK, NodeLru);
1237
1238 Assert(!RTListIsEmpty(&pDiscard->ListLru));
1239
1240 /* Go over the allocation bitmap and mark all discarded sectors as unused. */
1241 uint64_t offStart = pBlock->Core.Key;
1242 uint32_t idxStart = 0;
1243 size_t cbLeft = pBlock->cbDiscard;
1244 bool fAllocated = ASMBitTest(pBlock->pbmAllocated, idxStart);
1245 uint32_t cSectors = (uint32_t)(pBlock->cbDiscard / 512);
1246
1247 while (cbLeft > 0)
1248 {
1249 int32_t idxEnd;
1250 size_t cbThis = cbLeft;
1251
1252 if (fAllocated)
1253 {
1254 /* Check for the first unallocated bit. */
1255 idxEnd = ASMBitNextClear(pBlock->pbmAllocated, cSectors, idxStart);
1256 if (idxEnd != -1)
1257 {
1258 cbThis = (idxEnd - idxStart) * 512;
1259 fAllocated = false;
1260 }
1261 }
1262 else
1263 {
1264 /* Mark as unused and check for the first set bit. */
1265 idxEnd = ASMBitNextSet(pBlock->pbmAllocated, cSectors, idxStart);
1266 if (idxEnd != -1)
1267 cbThis = (idxEnd - idxStart) * 512;
1268
1269
1270 VDIOCTX IoCtx;
1271 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_DISCARD, 0, 0, NULL,
1272 NULL, NULL, NULL, VDIOCTX_FLAGS_SYNC);
1273 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData,
1274 &IoCtx, offStart, cbThis, NULL,
1275 NULL, &cbThis, NULL,
1276 VD_DISCARD_MARK_UNUSED);
1277 if (RT_FAILURE(rc))
1278 break;
1279
1280 fAllocated = true;
1281 }
1282
1283 idxStart = idxEnd;
1284 offStart += cbThis;
1285 cbLeft -= cbThis;
1286 }
1287
1288 if (RT_FAILURE(rc))
1289 break;
1290
1291 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
1292 Assert(pBlockRemove == pBlock); NOREF(pBlockRemove);
1293 RTListNodeRemove(&pBlock->NodeLru);
1294
1295 pDiscard->cbDiscarding -= pBlock->cbDiscard;
1296 RTMemFree(pBlock->pbmAllocated);
1297 RTMemFree(pBlock);
1298 }
1299
1300 Assert(RT_FAILURE(rc) || pDiscard->cbDiscarding <= cbDiscardingNew);
1301
1302 LogFlowFunc(("returns rc=%Rrc\n", rc));
1303 return rc;
1304}
1305
1306/**
1307 * Destroys the current discard state, writing any waiting blocks to the image.
1308 *
1309 * @returns VBox status code.
1310 * @param pDisk VD disk container.
1311 */
1312static int vdDiscardStateDestroy(PVBOXHDD pDisk)
1313{
1314 int rc = VINF_SUCCESS;
1315
1316 if (pDisk->pDiscard)
1317 {
1318 rc = vdDiscardRemoveBlocks(pDisk, pDisk->pDiscard, 0 /* Remove all blocks. */);
1319 AssertRC(rc);
1320 RTMemFree(pDisk->pDiscard->pTreeBlocks);
1321 RTMemFree(pDisk->pDiscard);
1322 pDisk->pDiscard = NULL;
1323 }
1324
1325 return rc;
1326}
1327
1328/**
1329 * Marks the given range as allocated in the image.
1330 * Required if there are discards in progress and a write to a block which can get discarded
1331 * is written to.
1332 *
1333 * @returns VBox status code.
1334 * @param pDisk VD container data.
1335 * @param uOffset First byte to mark as allocated.
1336 * @param cbRange Number of bytes to mark as allocated.
1337 */
1338static int vdDiscardSetRangeAllocated(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRange)
1339{
1340 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
1341 int rc = VINF_SUCCESS;
1342
1343 if (pDiscard)
1344 {
1345 do
1346 {
1347 size_t cbThisRange = cbRange;
1348 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTAvlrU64RangeGet(pDiscard->pTreeBlocks, uOffset);
1349
1350 if (pBlock)
1351 {
1352 int32_t idxStart, idxEnd;
1353
1354 Assert(!(cbThisRange % 512));
1355 Assert(!((uOffset - pBlock->Core.Key) % 512));
1356
1357 cbThisRange = RT_MIN(cbThisRange, pBlock->Core.KeyLast - uOffset + 1);
1358
1359 idxStart = (uOffset - pBlock->Core.Key) / 512;
1360 idxEnd = idxStart + (int32_t)(cbThisRange / 512);
1361 ASMBitSetRange(pBlock->pbmAllocated, idxStart, idxEnd);
1362 }
1363 else
1364 {
1365 pBlock = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, uOffset, true);
1366 if (pBlock)
1367 cbThisRange = RT_MIN(cbThisRange, pBlock->Core.Key - uOffset);
1368 }
1369
1370 Assert(cbRange >= cbThisRange);
1371
1372 uOffset += cbThisRange;
1373 cbRange -= cbThisRange;
1374 } while (cbRange != 0);
1375 }
1376
1377 return rc;
1378}
1379
1380DECLINLINE(PVDIOCTX) vdIoCtxAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1381 uint64_t uOffset, size_t cbTransfer,
1382 PVDIMAGE pImageStart,PCRTSGBUF pcSgBuf,
1383 void *pvAllocation, PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1384 uint32_t fFlags)
1385{
1386 PVDIOCTX pIoCtx = NULL;
1387
1388 pIoCtx = (PVDIOCTX)RTMemCacheAlloc(pDisk->hMemCacheIoCtx);
1389 if (RT_LIKELY(pIoCtx))
1390 {
1391 vdIoCtxInit(pIoCtx, pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1392 pcSgBuf, pvAllocation, pfnIoCtxTransfer, fFlags);
1393 }
1394
1395 return pIoCtx;
1396}
1397
1398DECLINLINE(PVDIOCTX) vdIoCtxRootAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1399 uint64_t uOffset, size_t cbTransfer,
1400 PVDIMAGE pImageStart, PCRTSGBUF pcSgBuf,
1401 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1402 void *pvUser1, void *pvUser2,
1403 void *pvAllocation,
1404 PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1405 uint32_t fFlags)
1406{
1407 PVDIOCTX pIoCtx = vdIoCtxAlloc(pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1408 pcSgBuf, pvAllocation, pfnIoCtxTransfer, fFlags);
1409
1410 if (RT_LIKELY(pIoCtx))
1411 {
1412 pIoCtx->pIoCtxParent = NULL;
1413 pIoCtx->Type.Root.pfnComplete = pfnComplete;
1414 pIoCtx->Type.Root.pvUser1 = pvUser1;
1415 pIoCtx->Type.Root.pvUser2 = pvUser2;
1416 }
1417
1418 LogFlow(("Allocated root I/O context %#p\n", pIoCtx));
1419 return pIoCtx;
1420}
1421
1422DECLINLINE(void) vdIoCtxDiscardInit(PVDIOCTX pIoCtx, PVBOXHDD pDisk, PCRTRANGE paRanges,
1423 unsigned cRanges, PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1424 void *pvUser1, void *pvUser2, void *pvAllocation,
1425 PFNVDIOCTXTRANSFER pfnIoCtxTransfer, uint32_t fFlags)
1426{
1427 pIoCtx->pIoCtxNext = NULL;
1428 pIoCtx->pDisk = pDisk;
1429 pIoCtx->enmTxDir = VDIOCTXTXDIR_DISCARD;
1430 pIoCtx->cDataTransfersPending = 0;
1431 pIoCtx->cMetaTransfersPending = 0;
1432 pIoCtx->fComplete = false;
1433 pIoCtx->fFlags = fFlags;
1434 pIoCtx->pvAllocation = pvAllocation;
1435 pIoCtx->pfnIoCtxTransfer = pfnIoCtxTransfer;
1436 pIoCtx->pfnIoCtxTransferNext = NULL;
1437 pIoCtx->rcReq = VINF_SUCCESS;
1438 pIoCtx->Req.Discard.paRanges = paRanges;
1439 pIoCtx->Req.Discard.cRanges = cRanges;
1440 pIoCtx->Req.Discard.idxRange = 0;
1441 pIoCtx->Req.Discard.cbDiscardLeft = 0;
1442 pIoCtx->Req.Discard.offCur = 0;
1443 pIoCtx->Req.Discard.cbThisDiscard = 0;
1444
1445 pIoCtx->pIoCtxParent = NULL;
1446 pIoCtx->Type.Root.pfnComplete = pfnComplete;
1447 pIoCtx->Type.Root.pvUser1 = pvUser1;
1448 pIoCtx->Type.Root.pvUser2 = pvUser2;
1449}
1450
1451DECLINLINE(PVDIOCTX) vdIoCtxDiscardAlloc(PVBOXHDD pDisk, PCRTRANGE paRanges,
1452 unsigned cRanges,
1453 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
1454 void *pvUser1, void *pvUser2,
1455 void *pvAllocation,
1456 PFNVDIOCTXTRANSFER pfnIoCtxTransfer,
1457 uint32_t fFlags)
1458{
1459 PVDIOCTX pIoCtx = NULL;
1460
1461 pIoCtx = (PVDIOCTX)RTMemCacheAlloc(pDisk->hMemCacheIoCtx);
1462 if (RT_LIKELY(pIoCtx))
1463 {
1464 vdIoCtxDiscardInit(pIoCtx, pDisk, paRanges, cRanges, pfnComplete, pvUser1,
1465 pvUser2, pvAllocation, pfnIoCtxTransfer, fFlags);
1466 }
1467
1468 LogFlow(("Allocated discard I/O context %#p\n", pIoCtx));
1469 return pIoCtx;
1470}
1471
1472DECLINLINE(PVDIOCTX) vdIoCtxChildAlloc(PVBOXHDD pDisk, VDIOCTXTXDIR enmTxDir,
1473 uint64_t uOffset, size_t cbTransfer,
1474 PVDIMAGE pImageStart, PCRTSGBUF pcSgBuf,
1475 PVDIOCTX pIoCtxParent, size_t cbTransferParent,
1476 size_t cbWriteParent, void *pvAllocation,
1477 PFNVDIOCTXTRANSFER pfnIoCtxTransfer)
1478{
1479 PVDIOCTX pIoCtx = vdIoCtxAlloc(pDisk, enmTxDir, uOffset, cbTransfer, pImageStart,
1480 pcSgBuf, pvAllocation, pfnIoCtxTransfer, pIoCtxParent->fFlags & ~VDIOCTX_FLAGS_DONT_FREE);
1481
1482 AssertPtr(pIoCtxParent);
1483 Assert(!pIoCtxParent->pIoCtxParent);
1484
1485 if (RT_LIKELY(pIoCtx))
1486 {
1487 pIoCtx->pIoCtxParent = pIoCtxParent;
1488 pIoCtx->Type.Child.uOffsetSaved = uOffset;
1489 pIoCtx->Type.Child.cbTransferLeftSaved = cbTransfer;
1490 pIoCtx->Type.Child.cbTransferParent = cbTransferParent;
1491 pIoCtx->Type.Child.cbWriteParent = cbWriteParent;
1492 }
1493
1494 LogFlow(("Allocated child I/O context %#p\n", pIoCtx));
1495 return pIoCtx;
1496}
1497
1498DECLINLINE(PVDIOTASK) vdIoTaskUserAlloc(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser, PVDIOCTX pIoCtx, uint32_t cbTransfer)
1499{
1500 PVDIOTASK pIoTask = NULL;
1501
1502 pIoTask = (PVDIOTASK)RTMemCacheAlloc(pIoStorage->pVDIo->pDisk->hMemCacheIoTask);
1503 if (pIoTask)
1504 {
1505 pIoTask->pIoStorage = pIoStorage;
1506 pIoTask->pfnComplete = pfnComplete;
1507 pIoTask->pvUser = pvUser;
1508 pIoTask->fMeta = false;
1509 pIoTask->Type.User.cbTransfer = cbTransfer;
1510 pIoTask->Type.User.pIoCtx = pIoCtx;
1511 }
1512
1513 return pIoTask;
1514}
1515
1516DECLINLINE(PVDIOTASK) vdIoTaskMetaAlloc(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser, PVDMETAXFER pMetaXfer)
1517{
1518 PVDIOTASK pIoTask = NULL;
1519
1520 pIoTask = (PVDIOTASK)RTMemCacheAlloc(pIoStorage->pVDIo->pDisk->hMemCacheIoTask);
1521 if (pIoTask)
1522 {
1523 pIoTask->pIoStorage = pIoStorage;
1524 pIoTask->pfnComplete = pfnComplete;
1525 pIoTask->pvUser = pvUser;
1526 pIoTask->fMeta = true;
1527 pIoTask->Type.Meta.pMetaXfer = pMetaXfer;
1528 }
1529
1530 return pIoTask;
1531}
1532
1533DECLINLINE(void) vdIoCtxFree(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1534{
1535 Log(("Freeing I/O context %#p\n", pIoCtx));
1536
1537 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_DONT_FREE))
1538 {
1539 if (pIoCtx->pvAllocation)
1540 RTMemFree(pIoCtx->pvAllocation);
1541#ifdef DEBUG
1542 memset(&pIoCtx->pDisk, 0xff, sizeof(void *));
1543#endif
1544 RTMemCacheFree(pDisk->hMemCacheIoCtx, pIoCtx);
1545 }
1546}
1547
1548DECLINLINE(void) vdIoTaskFree(PVBOXHDD pDisk, PVDIOTASK pIoTask)
1549{
1550#ifdef DEBUG
1551 memset(pIoTask, 0xff, sizeof(VDIOTASK));
1552#endif
1553 RTMemCacheFree(pDisk->hMemCacheIoTask, pIoTask);
1554}
1555
1556DECLINLINE(void) vdIoCtxChildReset(PVDIOCTX pIoCtx)
1557{
1558 AssertPtr(pIoCtx->pIoCtxParent);
1559
1560 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
1561 pIoCtx->Req.Io.uOffset = pIoCtx->Type.Child.uOffsetSaved;
1562 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)pIoCtx->Type.Child.cbTransferLeftSaved;
1563 Assert((uint32_t)pIoCtx->Type.Child.cbTransferLeftSaved == pIoCtx->Type.Child.cbTransferLeftSaved);
1564}
1565
1566DECLINLINE(PVDMETAXFER) vdMetaXferAlloc(PVDIOSTORAGE pIoStorage, uint64_t uOffset, size_t cb)
1567{
1568 PVDMETAXFER pMetaXfer = (PVDMETAXFER)RTMemAlloc(RT_OFFSETOF(VDMETAXFER, abData[cb]));
1569
1570 if (RT_LIKELY(pMetaXfer))
1571 {
1572 pMetaXfer->Core.Key = uOffset;
1573 pMetaXfer->Core.KeyLast = uOffset + cb - 1;
1574 pMetaXfer->fFlags = VDMETAXFER_TXDIR_NONE;
1575 pMetaXfer->cbMeta = cb;
1576 pMetaXfer->pIoStorage = pIoStorage;
1577 pMetaXfer->cRefs = 0;
1578 pMetaXfer->pbDataShw = NULL;
1579 RTListInit(&pMetaXfer->ListIoCtxWaiting);
1580 RTListInit(&pMetaXfer->ListIoCtxShwWrites);
1581 }
1582 return pMetaXfer;
1583}
1584
1585DECLINLINE(void) vdIoCtxAddToWaitingList(volatile PVDIOCTX *ppList, PVDIOCTX pIoCtx)
1586{
1587 /* Put it on the waiting list. */
1588 PVDIOCTX pNext = ASMAtomicUoReadPtrT(ppList, PVDIOCTX);
1589 PVDIOCTX pHeadOld;
1590 pIoCtx->pIoCtxNext = pNext;
1591 while (!ASMAtomicCmpXchgExPtr(ppList, pIoCtx, pNext, &pHeadOld))
1592 {
1593 pNext = pHeadOld;
1594 Assert(pNext != pIoCtx);
1595 pIoCtx->pIoCtxNext = pNext;
1596 ASMNopPause();
1597 }
1598}
1599
1600DECLINLINE(void) vdIoCtxDefer(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1601{
1602 LogFlowFunc(("Deferring I/O context pIoCtx=%#p\n", pIoCtx));
1603
1604 Assert(!pIoCtx->pIoCtxParent && !(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED));
1605 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
1606 vdIoCtxAddToWaitingList(&pDisk->pIoCtxBlockedHead, pIoCtx);
1607}
1608
1609static size_t vdIoCtxCopy(PVDIOCTX pIoCtxDst, PVDIOCTX pIoCtxSrc, size_t cbData)
1610{
1611 return RTSgBufCopy(&pIoCtxDst->Req.Io.SgBuf, &pIoCtxSrc->Req.Io.SgBuf, cbData);
1612}
1613
1614#if 0 /* unused */
1615static int vdIoCtxCmp(PVDIOCTX pIoCtx1, PVDIOCTX pIoCtx2, size_t cbData)
1616{
1617 return RTSgBufCmp(&pIoCtx1->Req.Io.SgBuf, &pIoCtx2->Req.Io.SgBuf, cbData);
1618}
1619#endif
1620
1621static size_t vdIoCtxCopyTo(PVDIOCTX pIoCtx, const uint8_t *pbData, size_t cbData)
1622{
1623 return RTSgBufCopyFromBuf(&pIoCtx->Req.Io.SgBuf, pbData, cbData);
1624}
1625
1626static size_t vdIoCtxCopyFrom(PVDIOCTX pIoCtx, uint8_t *pbData, size_t cbData)
1627{
1628 return RTSgBufCopyToBuf(&pIoCtx->Req.Io.SgBuf, pbData, cbData);
1629}
1630
1631static size_t vdIoCtxSet(PVDIOCTX pIoCtx, uint8_t ch, size_t cbData)
1632{
1633 return RTSgBufSet(&pIoCtx->Req.Io.SgBuf, ch, cbData);
1634}
1635
1636/**
1637 * Returns whether the given I/O context has completed.
1638 *
1639 * @returns Flag whether the I/O context is complete.
1640 * @param pIoCtx The I/O context to check.
1641 */
1642DECLINLINE(bool) vdIoCtxIsComplete(PVDIOCTX pIoCtx)
1643{
1644 if ( !pIoCtx->cMetaTransfersPending
1645 && !pIoCtx->cDataTransfersPending
1646 && !pIoCtx->pfnIoCtxTransfer)
1647 return true;
1648
1649 /*
1650 * We complete the I/O context in case of an error
1651 * if there is no I/O task pending.
1652 */
1653 if ( RT_FAILURE(pIoCtx->rcReq)
1654 && !pIoCtx->cMetaTransfersPending
1655 && !pIoCtx->cDataTransfersPending)
1656 return true;
1657
1658 return false;
1659}
1660
1661/**
1662 * Returns whether the given I/O context is blocked due to a metadata transfer
1663 * or because the backend blocked it.
1664 *
1665 * @returns Flag whether the I/O context is blocked.
1666 * @param pIoCtx The I/O context to check.
1667 */
1668DECLINLINE(bool) vdIoCtxIsBlocked(PVDIOCTX pIoCtx)
1669{
1670 /* Don't change anything if there is a metadata transfer pending or we are blocked. */
1671 if ( pIoCtx->cMetaTransfersPending
1672 || (pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
1673 return true;
1674
1675 return false;
1676}
1677
1678/**
1679 * Process the I/O context, core method which assumes that the I/O context
1680 * acquired the lock.
1681 *
1682 * @returns VBox status code.
1683 * @param pIoCtx I/O context to process.
1684 */
1685static int vdIoCtxProcessLocked(PVDIOCTX pIoCtx)
1686{
1687 int rc = VINF_SUCCESS;
1688
1689 VD_IS_LOCKED(pIoCtx->pDisk);
1690
1691 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
1692
1693 if (!vdIoCtxIsComplete(pIoCtx))
1694 {
1695 if (!vdIoCtxIsBlocked(pIoCtx))
1696 {
1697 if (pIoCtx->pfnIoCtxTransfer)
1698 {
1699 /* Call the transfer function advancing to the next while there is no error. */
1700 while ( pIoCtx->pfnIoCtxTransfer
1701 && !pIoCtx->cMetaTransfersPending
1702 && RT_SUCCESS(rc))
1703 {
1704 LogFlowFunc(("calling transfer function %#p\n", pIoCtx->pfnIoCtxTransfer));
1705 rc = pIoCtx->pfnIoCtxTransfer(pIoCtx);
1706
1707 /* Advance to the next part of the transfer if the current one succeeded. */
1708 if (RT_SUCCESS(rc))
1709 {
1710 pIoCtx->pfnIoCtxTransfer = pIoCtx->pfnIoCtxTransferNext;
1711 pIoCtx->pfnIoCtxTransferNext = NULL;
1712 }
1713 }
1714 }
1715
1716 if ( RT_SUCCESS(rc)
1717 && !pIoCtx->cMetaTransfersPending
1718 && !pIoCtx->cDataTransfersPending
1719 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
1720 rc = VINF_VD_ASYNC_IO_FINISHED;
1721 else if ( RT_SUCCESS(rc)
1722 || rc == VERR_VD_NOT_ENOUGH_METADATA
1723 || rc == VERR_VD_IOCTX_HALT)
1724 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1725 else if ( RT_FAILURE(rc)
1726 && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
1727 {
1728 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rc, VINF_SUCCESS);
1729
1730 /*
1731 * The I/O context completed if we have an error and there is no data
1732 * or meta data transfer pending.
1733 */
1734 if ( !pIoCtx->cMetaTransfersPending
1735 && !pIoCtx->cDataTransfersPending)
1736 rc = VINF_VD_ASYNC_IO_FINISHED;
1737 else
1738 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1739 }
1740 }
1741 else
1742 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1743 }
1744 else
1745 rc = VINF_VD_ASYNC_IO_FINISHED;
1746
1747 LogFlowFunc(("pIoCtx=%#p rc=%Rrc cDataTransfersPending=%u cMetaTransfersPending=%u fComplete=%RTbool\n",
1748 pIoCtx, rc, pIoCtx->cDataTransfersPending, pIoCtx->cMetaTransfersPending,
1749 pIoCtx->fComplete));
1750
1751 return rc;
1752}
1753
1754/**
1755 * Processes the list of waiting I/O contexts.
1756 *
1757 * @returns VBox status code, only valid if pIoCtxRc is not NULL, treat as void
1758 * function otherwise.
1759 * @param pDisk The disk structure.
1760 * @param pIoCtxRc An I/O context handle which waits on the list. When processed
1761 * The status code is returned. NULL if there is no I/O context
1762 * to return the status code for.
1763 */
1764static int vdDiskProcessWaitingIoCtx(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc)
1765{
1766 int rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1767
1768 LogFlowFunc(("pDisk=%#p pIoCtxRc=%#p\n", pDisk, pIoCtxRc));
1769
1770 VD_IS_LOCKED(pDisk);
1771
1772 /* Get the waiting list and process it in FIFO order. */
1773 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxHead, NULL, PVDIOCTX);
1774
1775 /* Reverse it. */
1776 PVDIOCTX pCur = pIoCtxHead;
1777 pIoCtxHead = NULL;
1778 while (pCur)
1779 {
1780 PVDIOCTX pInsert = pCur;
1781 pCur = pCur->pIoCtxNext;
1782 pInsert->pIoCtxNext = pIoCtxHead;
1783 pIoCtxHead = pInsert;
1784 }
1785
1786 /* Process now. */
1787 pCur = pIoCtxHead;
1788 while (pCur)
1789 {
1790 int rcTmp;
1791 PVDIOCTX pTmp = pCur;
1792
1793 pCur = pCur->pIoCtxNext;
1794 pTmp->pIoCtxNext = NULL;
1795
1796 /*
1797 * Need to clear the sync flag here if there is a new I/O context
1798 * with it set and the context is not given in pIoCtxRc.
1799 * This happens most likely on a different thread and that one shouldn't
1800 * process the context synchronously.
1801 *
1802 * The thread who issued the context will wait on the event semaphore
1803 * anyway which is signalled when the completion handler is called.
1804 */
1805 if ( pTmp->fFlags & VDIOCTX_FLAGS_SYNC
1806 && pTmp != pIoCtxRc)
1807 pTmp->fFlags &= ~VDIOCTX_FLAGS_SYNC;
1808
1809 rcTmp = vdIoCtxProcessLocked(pTmp);
1810 if (pTmp == pIoCtxRc)
1811 {
1812 if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1813 && RT_SUCCESS(pTmp->rcReq)
1814 && pTmp->enmTxDir == VDIOCTXTXDIR_READ)
1815 {
1816 int rc2 = vdFilterChainApplyRead(pDisk, pTmp->Req.Io.uOffsetXferOrig,
1817 pTmp->Req.Io.cbXferOrig, pTmp);
1818 if (RT_FAILURE(rc2))
1819 rcTmp = rc2;
1820 }
1821
1822 /* The given I/O context was processed, pass the return code to the caller. */
1823 if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1824 && (pTmp->fFlags & VDIOCTX_FLAGS_SYNC))
1825 rc = pTmp->rcReq;
1826 else
1827 rc = rcTmp;
1828 }
1829 else if ( rcTmp == VINF_VD_ASYNC_IO_FINISHED
1830 && ASMAtomicCmpXchgBool(&pTmp->fComplete, true, false))
1831 {
1832 LogFlowFunc(("Waiting I/O context completed pTmp=%#p\n", pTmp));
1833 vdThreadFinishWrite(pDisk);
1834 vdIoCtxRootComplete(pDisk, pTmp);
1835 vdIoCtxFree(pDisk, pTmp);
1836 }
1837 }
1838
1839 LogFlowFunc(("returns rc=%Rrc\n", rc));
1840 return rc;
1841}
1842
1843/**
1844 * Processes the list of blocked I/O contexts.
1845 *
1846 * @returns nothing.
1847 * @param pDisk The disk structure.
1848 */
1849static void vdDiskProcessBlockedIoCtx(PVBOXHDD pDisk)
1850{
1851 LogFlowFunc(("pDisk=%#p\n", pDisk));
1852
1853 VD_IS_LOCKED(pDisk);
1854
1855 /* Get the waiting list and process it in FIFO order. */
1856 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxBlockedHead, NULL, PVDIOCTX);
1857
1858 /* Reverse it. */
1859 PVDIOCTX pCur = pIoCtxHead;
1860 pIoCtxHead = NULL;
1861 while (pCur)
1862 {
1863 PVDIOCTX pInsert = pCur;
1864 pCur = pCur->pIoCtxNext;
1865 pInsert->pIoCtxNext = pIoCtxHead;
1866 pIoCtxHead = pInsert;
1867 }
1868
1869 /* Process now. */
1870 pCur = pIoCtxHead;
1871 while (pCur)
1872 {
1873 int rc;
1874 PVDIOCTX pTmp = pCur;
1875
1876 pCur = pCur->pIoCtxNext;
1877 pTmp->pIoCtxNext = NULL;
1878
1879 Assert(!pTmp->pIoCtxParent);
1880 Assert(pTmp->fFlags & VDIOCTX_FLAGS_BLOCKED);
1881 pTmp->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
1882
1883 rc = vdIoCtxProcessLocked(pTmp);
1884 if ( rc == VINF_VD_ASYNC_IO_FINISHED
1885 && ASMAtomicCmpXchgBool(&pTmp->fComplete, true, false))
1886 {
1887 LogFlowFunc(("Waiting I/O context completed pTmp=%#p\n", pTmp));
1888 vdThreadFinishWrite(pDisk);
1889 vdIoCtxRootComplete(pDisk, pTmp);
1890 vdIoCtxFree(pDisk, pTmp);
1891 }
1892 }
1893
1894 LogFlowFunc(("returns\n"));
1895}
1896
1897/**
1898 * Processes the I/O context trying to lock the criticial section.
1899 * The context is deferred if the critical section is busy.
1900 *
1901 * @returns VBox status code.
1902 * @param pIoCtx The I/O context to process.
1903 */
1904static int vdIoCtxProcessTryLockDefer(PVDIOCTX pIoCtx)
1905{
1906 int rc = VINF_SUCCESS;
1907 PVBOXHDD pDisk = pIoCtx->pDisk;
1908
1909 Log(("Defer pIoCtx=%#p\n", pIoCtx));
1910
1911 /* Put it on the waiting list first. */
1912 vdIoCtxAddToWaitingList(&pDisk->pIoCtxHead, pIoCtx);
1913
1914 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
1915 {
1916 /* Leave it again, the context will be processed just before leaving the lock. */
1917 LogFlowFunc(("Successfully acquired the lock\n"));
1918 rc = vdDiskUnlock(pDisk, pIoCtx);
1919 }
1920 else
1921 {
1922 LogFlowFunc(("Lock is held\n"));
1923 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1924 }
1925
1926 return rc;
1927}
1928
1929/**
1930 * Process the I/O context in a synchronous manner, waiting
1931 * for it to complete.
1932 *
1933 * @returns VBox status code of the completed request.
1934 * @param pIoCtx The sync I/O context.
1935 * @param hEventComplete Event sempahore to wait on for completion.
1936 */
1937static int vdIoCtxProcessSync(PVDIOCTX pIoCtx, RTSEMEVENT hEventComplete)
1938{
1939 int rc = VINF_SUCCESS;
1940 PVBOXHDD pDisk = pIoCtx->pDisk;
1941
1942 LogFlowFunc(("pIoCtx=%p\n", pIoCtx));
1943
1944 AssertMsg(pIoCtx->fFlags & (VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE),
1945 ("I/O context is not marked as synchronous\n"));
1946
1947 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
1948 if (rc == VINF_VD_ASYNC_IO_FINISHED)
1949 rc = VINF_SUCCESS;
1950
1951 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1952 {
1953 rc = RTSemEventWait(hEventComplete, RT_INDEFINITE_WAIT);
1954 AssertRC(rc);
1955 }
1956
1957 rc = pIoCtx->rcReq;
1958 vdIoCtxFree(pDisk, pIoCtx);
1959
1960 return rc;
1961}
1962
1963DECLINLINE(bool) vdIoCtxIsDiskLockOwner(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1964{
1965 return pDisk->pIoCtxLockOwner == pIoCtx;
1966}
1967
1968static int vdIoCtxLockDisk(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
1969{
1970 int rc = VINF_SUCCESS;
1971
1972 VD_IS_LOCKED(pDisk);
1973
1974 LogFlowFunc(("pDisk=%#p pIoCtx=%#p\n", pDisk, pIoCtx));
1975
1976 if (!ASMAtomicCmpXchgPtr(&pDisk->pIoCtxLockOwner, pIoCtx, NIL_VDIOCTX))
1977 {
1978 Assert(pDisk->pIoCtxLockOwner != pIoCtx); /* No nesting allowed. */
1979 vdIoCtxDefer(pDisk, pIoCtx);
1980 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
1981 }
1982
1983 LogFlowFunc(("returns -> %Rrc\n", rc));
1984 return rc;
1985}
1986
1987static void vdIoCtxUnlockDisk(PVBOXHDD pDisk, PVDIOCTX pIoCtx, bool fProcessBlockedReqs)
1988{
1989 RT_NOREF1(pIoCtx);
1990 LogFlowFunc(("pDisk=%#p pIoCtx=%#p fProcessBlockedReqs=%RTbool\n",
1991 pDisk, pIoCtx, fProcessBlockedReqs));
1992
1993 VD_IS_LOCKED(pDisk);
1994
1995 LogFlow(("Unlocking disk lock owner is %#p\n", pDisk->pIoCtxLockOwner));
1996 Assert(pDisk->pIoCtxLockOwner == pIoCtx);
1997 ASMAtomicXchgPtrT(&pDisk->pIoCtxLockOwner, NIL_VDIOCTX, PVDIOCTX);
1998
1999 if (fProcessBlockedReqs)
2000 {
2001 /* Process any blocked writes if the current request didn't caused another growing. */
2002 vdDiskProcessBlockedIoCtx(pDisk);
2003 }
2004
2005 LogFlowFunc(("returns\n"));
2006}
2007
2008/**
2009 * Internal: Reads a given amount of data from the image chain of the disk.
2010 **/
2011static int vdDiskReadHelper(PVBOXHDD pDisk, PVDIMAGE pImage, PVDIMAGE pImageParentOverride,
2012 uint64_t uOffset, size_t cbRead, PVDIOCTX pIoCtx, size_t *pcbThisRead)
2013{
2014 RT_NOREF1(pDisk);
2015 int rc = VINF_SUCCESS;
2016 size_t cbThisRead = cbRead;
2017
2018 AssertPtr(pcbThisRead);
2019
2020 *pcbThisRead = 0;
2021
2022 /*
2023 * Try to read from the given image.
2024 * If the block is not allocated read from override chain if present.
2025 */
2026 rc = pImage->Backend->pfnRead(pImage->pBackendData,
2027 uOffset, cbThisRead, pIoCtx,
2028 &cbThisRead);
2029
2030 if (rc == VERR_VD_BLOCK_FREE)
2031 {
2032 for (PVDIMAGE pCurrImage = pImageParentOverride ? pImageParentOverride : pImage->pPrev;
2033 pCurrImage != NULL && rc == VERR_VD_BLOCK_FREE;
2034 pCurrImage = pCurrImage->pPrev)
2035 {
2036 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2037 uOffset, cbThisRead, pIoCtx,
2038 &cbThisRead);
2039 }
2040 }
2041
2042 if (RT_SUCCESS(rc) || rc == VERR_VD_BLOCK_FREE)
2043 *pcbThisRead = cbThisRead;
2044
2045 return rc;
2046}
2047
2048/**
2049 * internal: read the specified amount of data in whatever blocks the backend
2050 * will give us - async version.
2051 */
2052static DECLCALLBACK(int) vdReadHelperAsync(PVDIOCTX pIoCtx)
2053{
2054 int rc;
2055 PVBOXHDD pDisk = pIoCtx->pDisk;
2056 size_t cbToRead = pIoCtx->Req.Io.cbTransfer;
2057 uint64_t uOffset = pIoCtx->Req.Io.uOffset;
2058 PVDIMAGE pCurrImage = pIoCtx->Req.Io.pImageCur;
2059 PVDIMAGE pImageParentOverride = pIoCtx->Req.Io.pImageParentOverride;
2060 unsigned cImagesRead = pIoCtx->Req.Io.cImagesRead;
2061 size_t cbThisRead;
2062
2063 /*
2064 * Check whether there is a full block write in progress which was not allocated.
2065 * Defer I/O if the range interferes but only if it does not belong to the
2066 * write doing the allocation.
2067 */
2068 if ( pDisk->pIoCtxLockOwner != NIL_VDIOCTX
2069 && uOffset >= pDisk->uOffsetStartLocked
2070 && uOffset < pDisk->uOffsetEndLocked
2071 && ( !pIoCtx->pIoCtxParent
2072 || pIoCtx->pIoCtxParent != pDisk->pIoCtxLockOwner))
2073 {
2074 Log(("Interferring read while allocating a new block => deferring read\n"));
2075 vdIoCtxDefer(pDisk, pIoCtx);
2076 return VERR_VD_ASYNC_IO_IN_PROGRESS;
2077 }
2078
2079 /* Loop until all reads started or we have a backend which needs to read metadata. */
2080 do
2081 {
2082 /* Search for image with allocated block. Do not attempt to read more
2083 * than the previous reads marked as valid. Otherwise this would return
2084 * stale data when different block sizes are used for the images. */
2085 cbThisRead = cbToRead;
2086
2087 if ( pDisk->pCache
2088 && !pImageParentOverride)
2089 {
2090 rc = vdCacheReadHelper(pDisk->pCache, uOffset, cbThisRead,
2091 pIoCtx, &cbThisRead);
2092 if (rc == VERR_VD_BLOCK_FREE)
2093 {
2094 rc = vdDiskReadHelper(pDisk, pCurrImage, NULL, uOffset, cbThisRead,
2095 pIoCtx, &cbThisRead);
2096
2097 /* If the read was successful, write the data back into the cache. */
2098 if ( RT_SUCCESS(rc)
2099 && pIoCtx->fFlags & VDIOCTX_FLAGS_READ_UPDATE_CACHE)
2100 {
2101 rc = vdCacheWriteHelper(pDisk->pCache, uOffset, cbThisRead,
2102 pIoCtx, NULL);
2103 }
2104 }
2105 }
2106 else
2107 {
2108 /*
2109 * Try to read from the given image.
2110 * If the block is not allocated read from override chain if present.
2111 */
2112 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2113 uOffset, cbThisRead, pIoCtx,
2114 &cbThisRead);
2115
2116 if ( rc == VERR_VD_BLOCK_FREE
2117 && cImagesRead != 1)
2118 {
2119 unsigned cImagesToProcess = cImagesRead;
2120
2121 pCurrImage = pImageParentOverride ? pImageParentOverride : pCurrImage->pPrev;
2122 pIoCtx->Req.Io.pImageParentOverride = NULL;
2123
2124 while (pCurrImage && rc == VERR_VD_BLOCK_FREE)
2125 {
2126 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2127 uOffset, cbThisRead,
2128 pIoCtx, &cbThisRead);
2129 if (cImagesToProcess == 1)
2130 break;
2131 else if (cImagesToProcess > 0)
2132 cImagesToProcess--;
2133
2134 if (rc == VERR_VD_BLOCK_FREE)
2135 pCurrImage = pCurrImage->pPrev;
2136 }
2137 }
2138 }
2139
2140 /* The task state will be updated on success already, don't do it here!. */
2141 if (rc == VERR_VD_BLOCK_FREE)
2142 {
2143 /* No image in the chain contains the data for the block. */
2144 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbThisRead); Assert(cbThisRead == (uint32_t)cbThisRead);
2145
2146 /* Fill the free space with 0 if we are told to do so
2147 * or a previous read returned valid data. */
2148 if (pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS)
2149 vdIoCtxSet(pIoCtx, '\0', cbThisRead);
2150 else
2151 pIoCtx->Req.Io.cbBufClear += cbThisRead;
2152
2153 if (pIoCtx->Req.Io.pImageCur->uOpenFlags & VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS)
2154 rc = VINF_VD_NEW_ZEROED_BLOCK;
2155 else
2156 rc = VINF_SUCCESS;
2157 }
2158 else if (rc == VERR_VD_IOCTX_HALT)
2159 {
2160 uOffset += cbThisRead;
2161 cbToRead -= cbThisRead;
2162 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2163 }
2164 else if ( RT_SUCCESS(rc)
2165 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2166 {
2167 /* First not free block, fill the space before with 0. */
2168 if ( pIoCtx->Req.Io.cbBufClear
2169 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS))
2170 {
2171 RTSGBUF SgBuf;
2172 RTSgBufClone(&SgBuf, &pIoCtx->Req.Io.SgBuf);
2173 RTSgBufReset(&SgBuf);
2174 RTSgBufSet(&SgBuf, 0, pIoCtx->Req.Io.cbBufClear);
2175 pIoCtx->Req.Io.cbBufClear = 0;
2176 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2177 }
2178 rc = VINF_SUCCESS;
2179 }
2180
2181 if (RT_FAILURE(rc))
2182 break;
2183
2184 cbToRead -= cbThisRead;
2185 uOffset += cbThisRead;
2186 pCurrImage = pIoCtx->Req.Io.pImageStart; /* Start with the highest image in the chain. */
2187 } while (cbToRead != 0 && RT_SUCCESS(rc));
2188
2189 if ( rc == VERR_VD_NOT_ENOUGH_METADATA
2190 || rc == VERR_VD_IOCTX_HALT)
2191 {
2192 /* Save the current state. */
2193 pIoCtx->Req.Io.uOffset = uOffset;
2194 pIoCtx->Req.Io.cbTransfer = cbToRead;
2195 pIoCtx->Req.Io.pImageCur = pCurrImage ? pCurrImage : pIoCtx->Req.Io.pImageStart;
2196 }
2197
2198 return (!(pIoCtx->fFlags & VDIOCTX_FLAGS_ZERO_FREE_BLOCKS))
2199 ? VERR_VD_BLOCK_FREE
2200 : rc;
2201}
2202
2203/**
2204 * internal: parent image read wrapper for compacting.
2205 */
2206static DECLCALLBACK(int) vdParentRead(void *pvUser, uint64_t uOffset, void *pvBuf,
2207 size_t cbRead)
2208{
2209 PVDPARENTSTATEDESC pParentState = (PVDPARENTSTATEDESC)pvUser;
2210
2211 /** @todo
2212 * Only used for compaction so far which is not possible to mix with async I/O.
2213 * Needs to be changed if we want to support online compaction of images.
2214 */
2215 bool fLocked = ASMAtomicXchgBool(&pParentState->pDisk->fLocked, true);
2216 AssertMsgReturn(!fLocked,
2217 ("Calling synchronous parent read while another thread holds the disk lock\n"),
2218 VERR_VD_INVALID_STATE);
2219
2220 /* Fake an I/O context. */
2221 RTSGSEG Segment;
2222 RTSGBUF SgBuf;
2223 VDIOCTX IoCtx;
2224
2225 Segment.pvSeg = pvBuf;
2226 Segment.cbSeg = cbRead;
2227 RTSgBufInit(&SgBuf, &Segment, 1);
2228 vdIoCtxInit(&IoCtx, pParentState->pDisk, VDIOCTXTXDIR_READ, uOffset, cbRead, pParentState->pImage,
2229 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_ZERO_FREE_BLOCKS);
2230 int rc = vdReadHelperAsync(&IoCtx);
2231 ASMAtomicXchgBool(&pParentState->pDisk->fLocked, false);
2232 return rc;
2233}
2234
2235/**
2236 * Extended version of vdReadHelper(), implementing certain optimizations
2237 * for image cloning.
2238 *
2239 * @returns VBox status code.
2240 * @param pDisk The disk to read from.
2241 * @param pImage The image to start reading from.
2242 * @param pImageParentOverride The parent image to read from
2243 * if the starting image returns a free block.
2244 * If NULL is passed the real parent of the image
2245 * in the chain is used.
2246 * @param uOffset Offset in the disk to start reading from.
2247 * @param pvBuf Where to store the read data.
2248 * @param cbRead How much to read.
2249 * @param fZeroFreeBlocks Flag whether free blocks should be zeroed.
2250 * If false and no image has data for sepcified
2251 * range VERR_VD_BLOCK_FREE is returned.
2252 * Note that unallocated blocks are still zeroed
2253 * if at least one image has valid data for a part
2254 * of the range.
2255 * @param fUpdateCache Flag whether to update the attached cache if
2256 * available.
2257 * @param cImagesRead Number of images in the chain to read until
2258 * the read is cut off. A value of 0 disables the cut off.
2259 */
2260static int vdReadHelperEx(PVBOXHDD pDisk, PVDIMAGE pImage, PVDIMAGE pImageParentOverride,
2261 uint64_t uOffset, void *pvBuf, size_t cbRead,
2262 bool fZeroFreeBlocks, bool fUpdateCache, unsigned cImagesRead)
2263{
2264 int rc = VINF_SUCCESS;
2265 uint32_t fFlags = VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE;
2266 RTSGSEG Segment;
2267 RTSGBUF SgBuf;
2268 VDIOCTX IoCtx;
2269 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
2270
2271 rc = RTSemEventCreate(&hEventComplete);
2272 if (RT_FAILURE(rc))
2273 return rc;
2274
2275 if (fZeroFreeBlocks)
2276 fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2277 if (fUpdateCache)
2278 fFlags |= VDIOCTX_FLAGS_READ_UPDATE_CACHE;
2279
2280 Segment.pvSeg = pvBuf;
2281 Segment.cbSeg = cbRead;
2282 RTSgBufInit(&SgBuf, &Segment, 1);
2283 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, uOffset, cbRead, pImage, &SgBuf,
2284 NULL, vdReadHelperAsync, fFlags);
2285
2286 IoCtx.Req.Io.pImageParentOverride = pImageParentOverride;
2287 IoCtx.Req.Io.cImagesRead = cImagesRead;
2288 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
2289 IoCtx.Type.Root.pvUser1 = pDisk;
2290 IoCtx.Type.Root.pvUser2 = hEventComplete;
2291 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
2292 RTSemEventDestroy(hEventComplete);
2293 return rc;
2294}
2295
2296/**
2297 * internal: read the specified amount of data in whatever blocks the backend
2298 * will give us.
2299 */
2300static int vdReadHelper(PVBOXHDD pDisk, PVDIMAGE pImage, uint64_t uOffset,
2301 void *pvBuf, size_t cbRead, bool fUpdateCache)
2302{
2303 return vdReadHelperEx(pDisk, pImage, NULL, uOffset, pvBuf, cbRead,
2304 true /* fZeroFreeBlocks */, fUpdateCache, 0);
2305}
2306
2307/**
2308 * internal: mark the disk as not modified.
2309 */
2310static void vdResetModifiedFlag(PVBOXHDD pDisk)
2311{
2312 if (pDisk->uModified & VD_IMAGE_MODIFIED_FLAG)
2313 {
2314 /* generate new last-modified uuid */
2315 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2316 {
2317 RTUUID Uuid;
2318
2319 RTUuidCreate(&Uuid);
2320 pDisk->pLast->Backend->pfnSetModificationUuid(pDisk->pLast->pBackendData,
2321 &Uuid);
2322
2323 if (pDisk->pCache)
2324 pDisk->pCache->Backend->pfnSetModificationUuid(pDisk->pCache->pBackendData,
2325 &Uuid);
2326 }
2327
2328 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FLAG;
2329 }
2330}
2331
2332/**
2333 * internal: mark the disk as modified.
2334 */
2335static void vdSetModifiedFlag(PVBOXHDD pDisk)
2336{
2337 pDisk->uModified |= VD_IMAGE_MODIFIED_FLAG;
2338 if (pDisk->uModified & VD_IMAGE_MODIFIED_FIRST)
2339 {
2340 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FIRST;
2341
2342 /* First modify, so create a UUID and ensure it's written to disk. */
2343 vdResetModifiedFlag(pDisk);
2344
2345 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2346 {
2347 VDIOCTX IoCtx;
2348 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_FLUSH, 0, 0, NULL,
2349 NULL, NULL, NULL, VDIOCTX_FLAGS_SYNC);
2350 pDisk->pLast->Backend->pfnFlush(pDisk->pLast->pBackendData, &IoCtx);
2351 }
2352 }
2353}
2354
2355/**
2356 * internal: write buffer to the image, taking care of block boundaries and
2357 * write optimizations.
2358 */
2359static int vdWriteHelperEx(PVBOXHDD pDisk, PVDIMAGE pImage,
2360 PVDIMAGE pImageParentOverride, uint64_t uOffset,
2361 const void *pvBuf, size_t cbWrite,
2362 uint32_t fFlags, unsigned cImagesRead)
2363{
2364 int rc = VINF_SUCCESS;
2365 RTSGSEG Segment;
2366 RTSGBUF SgBuf;
2367 VDIOCTX IoCtx;
2368 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
2369
2370 rc = RTSemEventCreate(&hEventComplete);
2371 if (RT_FAILURE(rc))
2372 return rc;
2373
2374 fFlags |= VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE;
2375
2376 Segment.pvSeg = (void *)pvBuf;
2377 Segment.cbSeg = cbWrite;
2378 RTSgBufInit(&SgBuf, &Segment, 1);
2379 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_WRITE, uOffset, cbWrite, pImage, &SgBuf,
2380 NULL, vdWriteHelperAsync, fFlags);
2381
2382 IoCtx.Req.Io.pImageParentOverride = pImageParentOverride;
2383 IoCtx.Req.Io.cImagesRead = cImagesRead;
2384 IoCtx.pIoCtxParent = NULL;
2385 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
2386 IoCtx.Type.Root.pvUser1 = pDisk;
2387 IoCtx.Type.Root.pvUser2 = hEventComplete;
2388 if (RT_SUCCESS(rc))
2389 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
2390
2391 RTSemEventDestroy(hEventComplete);
2392 return rc;
2393}
2394
2395/**
2396 * internal: write buffer to the image, taking care of block boundaries and
2397 * write optimizations.
2398 */
2399static int vdWriteHelper(PVBOXHDD pDisk, PVDIMAGE pImage, uint64_t uOffset,
2400 const void *pvBuf, size_t cbWrite, uint32_t fFlags)
2401{
2402 return vdWriteHelperEx(pDisk, pImage, NULL, uOffset, pvBuf, cbWrite,
2403 fFlags, 0);
2404}
2405
2406/**
2407 * Internal: Copies the content of one disk to another one applying optimizations
2408 * to speed up the copy process if possible.
2409 */
2410static int vdCopyHelper(PVBOXHDD pDiskFrom, PVDIMAGE pImageFrom, PVBOXHDD pDiskTo,
2411 uint64_t cbSize, unsigned cImagesFromRead, unsigned cImagesToRead,
2412 bool fSuppressRedundantIo, PVDINTERFACEPROGRESS pIfProgress,
2413 PVDINTERFACEPROGRESS pDstIfProgress)
2414{
2415 int rc = VINF_SUCCESS;
2416 int rc2;
2417 uint64_t uOffset = 0;
2418 uint64_t cbRemaining = cbSize;
2419 void *pvBuf = NULL;
2420 bool fLockReadFrom = false;
2421 bool fLockWriteTo = false;
2422 bool fBlockwiseCopy = false;
2423 unsigned uProgressOld = 0;
2424
2425 LogFlowFunc(("pDiskFrom=%#p pImageFrom=%#p pDiskTo=%#p cbSize=%llu cImagesFromRead=%u cImagesToRead=%u fSuppressRedundantIo=%RTbool pIfProgress=%#p pDstIfProgress=%#p\n",
2426 pDiskFrom, pImageFrom, pDiskTo, cbSize, cImagesFromRead, cImagesToRead, fSuppressRedundantIo, pDstIfProgress, pDstIfProgress));
2427
2428 if ( (fSuppressRedundantIo || (cImagesFromRead > 0))
2429 && RTListIsEmpty(&pDiskFrom->ListFilterChainRead))
2430 fBlockwiseCopy = true;
2431
2432 /* Allocate tmp buffer. */
2433 pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
2434 if (!pvBuf)
2435 return rc;
2436
2437 do
2438 {
2439 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
2440
2441 /* Note that we don't attempt to synchronize cross-disk accesses.
2442 * It wouldn't be very difficult to do, just the lock order would
2443 * need to be defined somehow to prevent deadlocks. Postpone such
2444 * magic as there is no use case for this. */
2445
2446 rc2 = vdThreadStartRead(pDiskFrom);
2447 AssertRC(rc2);
2448 fLockReadFrom = true;
2449
2450 if (fBlockwiseCopy)
2451 {
2452 RTSGSEG SegmentBuf;
2453 RTSGBUF SgBuf;
2454 VDIOCTX IoCtx;
2455
2456 SegmentBuf.pvSeg = pvBuf;
2457 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
2458 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
2459 vdIoCtxInit(&IoCtx, pDiskFrom, VDIOCTXTXDIR_READ, 0, 0, NULL,
2460 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
2461
2462 /* Read the source data. */
2463 rc = pImageFrom->Backend->pfnRead(pImageFrom->pBackendData,
2464 uOffset, cbThisRead, &IoCtx,
2465 &cbThisRead);
2466
2467 if ( rc == VERR_VD_BLOCK_FREE
2468 && cImagesFromRead != 1)
2469 {
2470 unsigned cImagesToProcess = cImagesFromRead;
2471
2472 for (PVDIMAGE pCurrImage = pImageFrom->pPrev;
2473 pCurrImage != NULL && rc == VERR_VD_BLOCK_FREE;
2474 pCurrImage = pCurrImage->pPrev)
2475 {
2476 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
2477 uOffset, cbThisRead,
2478 &IoCtx, &cbThisRead);
2479 if (cImagesToProcess == 1)
2480 break;
2481 else if (cImagesToProcess > 0)
2482 cImagesToProcess--;
2483 }
2484 }
2485 }
2486 else
2487 rc = vdReadHelper(pDiskFrom, pImageFrom, uOffset, pvBuf, cbThisRead,
2488 false /* fUpdateCache */);
2489
2490 if (RT_FAILURE(rc) && rc != VERR_VD_BLOCK_FREE)
2491 break;
2492
2493 rc2 = vdThreadFinishRead(pDiskFrom);
2494 AssertRC(rc2);
2495 fLockReadFrom = false;
2496
2497 if (rc != VERR_VD_BLOCK_FREE)
2498 {
2499 rc2 = vdThreadStartWrite(pDiskTo);
2500 AssertRC(rc2);
2501 fLockWriteTo = true;
2502
2503 /* Only do collapsed I/O if we are copying the data blockwise. */
2504 rc = vdWriteHelperEx(pDiskTo, pDiskTo->pLast, NULL, uOffset, pvBuf,
2505 cbThisRead, VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG /* fFlags */,
2506 fBlockwiseCopy ? cImagesToRead : 0);
2507 if (RT_FAILURE(rc))
2508 break;
2509
2510 rc2 = vdThreadFinishWrite(pDiskTo);
2511 AssertRC(rc2);
2512 fLockWriteTo = false;
2513 }
2514 else /* Don't propagate the error to the outside */
2515 rc = VINF_SUCCESS;
2516
2517 uOffset += cbThisRead;
2518 cbRemaining -= cbThisRead;
2519
2520 unsigned uProgressNew = uOffset * 99 / cbSize;
2521 if (uProgressNew != uProgressOld)
2522 {
2523 uProgressOld = uProgressNew;
2524
2525 if (pIfProgress && pIfProgress->pfnProgress)
2526 {
2527 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
2528 uProgressOld);
2529 if (RT_FAILURE(rc))
2530 break;
2531 }
2532 if (pDstIfProgress && pDstIfProgress->pfnProgress)
2533 {
2534 rc = pDstIfProgress->pfnProgress(pDstIfProgress->Core.pvUser,
2535 uProgressOld);
2536 if (RT_FAILURE(rc))
2537 break;
2538 }
2539 }
2540 } while (uOffset < cbSize);
2541
2542 RTMemFree(pvBuf);
2543
2544 if (fLockReadFrom)
2545 {
2546 rc2 = vdThreadFinishRead(pDiskFrom);
2547 AssertRC(rc2);
2548 }
2549
2550 if (fLockWriteTo)
2551 {
2552 rc2 = vdThreadFinishWrite(pDiskTo);
2553 AssertRC(rc2);
2554 }
2555
2556 LogFlowFunc(("returns rc=%Rrc\n", rc));
2557 return rc;
2558}
2559
2560/**
2561 * Flush helper async version.
2562 */
2563static DECLCALLBACK(int) vdSetModifiedHelperAsync(PVDIOCTX pIoCtx)
2564{
2565 int rc = VINF_SUCCESS;
2566 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
2567
2568 rc = pImage->Backend->pfnFlush(pImage->pBackendData, pIoCtx);
2569 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2570 rc = VINF_SUCCESS;
2571
2572 return rc;
2573}
2574
2575/**
2576 * internal: mark the disk as modified - async version.
2577 */
2578static int vdSetModifiedFlagAsync(PVBOXHDD pDisk, PVDIOCTX pIoCtx)
2579{
2580 int rc = VINF_SUCCESS;
2581
2582 VD_IS_LOCKED(pDisk);
2583
2584 pDisk->uModified |= VD_IMAGE_MODIFIED_FLAG;
2585 if (pDisk->uModified & VD_IMAGE_MODIFIED_FIRST)
2586 {
2587 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
2588 if (RT_SUCCESS(rc))
2589 {
2590 pDisk->uModified &= ~VD_IMAGE_MODIFIED_FIRST;
2591
2592 /* First modify, so create a UUID and ensure it's written to disk. */
2593 vdResetModifiedFlag(pDisk);
2594
2595 if (!(pDisk->uModified & VD_IMAGE_MODIFIED_DISABLE_UUID_UPDATE))
2596 {
2597 PVDIOCTX pIoCtxFlush = vdIoCtxChildAlloc(pDisk, VDIOCTXTXDIR_FLUSH,
2598 0, 0, pDisk->pLast,
2599 NULL, pIoCtx, 0, 0, NULL,
2600 vdSetModifiedHelperAsync);
2601
2602 if (pIoCtxFlush)
2603 {
2604 rc = vdIoCtxProcessLocked(pIoCtxFlush);
2605 if (rc == VINF_VD_ASYNC_IO_FINISHED)
2606 {
2607 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs */);
2608 vdIoCtxFree(pDisk, pIoCtxFlush);
2609 }
2610 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2611 {
2612 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
2613 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2614 }
2615 else /* Another error */
2616 vdIoCtxFree(pDisk, pIoCtxFlush);
2617 }
2618 else
2619 rc = VERR_NO_MEMORY;
2620 }
2621 }
2622 }
2623
2624 return rc;
2625}
2626
2627static DECLCALLBACK(int) vdWriteHelperCommitAsync(PVDIOCTX pIoCtx)
2628{
2629 int rc = VINF_SUCCESS;
2630 PVDIMAGE pImage = pIoCtx->Req.Io.pImageStart;
2631 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2632 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2633 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2634
2635 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2636 rc = pImage->Backend->pfnWrite(pImage->pBackendData,
2637 pIoCtx->Req.Io.uOffset - cbPreRead,
2638 cbPreRead + cbThisWrite + cbPostRead,
2639 pIoCtx, NULL, &cbPreRead, &cbPostRead, 0);
2640 Assert(rc != VERR_VD_BLOCK_FREE);
2641 Assert(rc == VERR_VD_NOT_ENOUGH_METADATA || cbPreRead == 0);
2642 Assert(rc == VERR_VD_NOT_ENOUGH_METADATA || cbPostRead == 0);
2643 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
2644 rc = VINF_SUCCESS;
2645 else if (rc == VERR_VD_IOCTX_HALT)
2646 {
2647 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
2648 rc = VINF_SUCCESS;
2649 }
2650
2651 LogFlowFunc(("returns rc=%Rrc\n", rc));
2652 return rc;
2653}
2654
2655static DECLCALLBACK(int) vdWriteHelperOptimizedCmpAndWriteAsync(PVDIOCTX pIoCtx)
2656{
2657 int rc = VINF_SUCCESS;
2658 size_t cbThisWrite = 0;
2659 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2660 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2661 size_t cbWriteCopy = pIoCtx->Type.Child.Write.Optimized.cbWriteCopy;
2662 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2663 size_t cbReadImage = pIoCtx->Type.Child.Write.Optimized.cbReadImage;
2664 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
2665
2666 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2667
2668 AssertPtr(pIoCtxParent);
2669 Assert(!pIoCtxParent->pIoCtxParent);
2670 Assert(!pIoCtx->Req.Io.cbTransferLeft && !pIoCtx->cMetaTransfersPending);
2671
2672 vdIoCtxChildReset(pIoCtx);
2673 cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2674 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbPreRead);
2675
2676 /* Check if the write would modify anything in this block. */
2677 if (!RTSgBufCmp(&pIoCtx->Req.Io.SgBuf, &pIoCtxParent->Req.Io.SgBuf, cbThisWrite))
2678 {
2679 RTSGBUF SgBufSrcTmp;
2680
2681 RTSgBufClone(&SgBufSrcTmp, &pIoCtxParent->Req.Io.SgBuf);
2682 RTSgBufAdvance(&SgBufSrcTmp, cbThisWrite);
2683 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbThisWrite);
2684
2685 if (!cbWriteCopy || !RTSgBufCmp(&pIoCtx->Req.Io.SgBuf, &SgBufSrcTmp, cbWriteCopy))
2686 {
2687 /* Block is completely unchanged, so no need to write anything. */
2688 LogFlowFunc(("Block didn't changed\n"));
2689 ASMAtomicWriteU32(&pIoCtx->Req.Io.cbTransferLeft, 0);
2690 RTSgBufAdvance(&pIoCtxParent->Req.Io.SgBuf, cbThisWrite);
2691 return VINF_VD_ASYNC_IO_FINISHED;
2692 }
2693 }
2694
2695 /* Copy the data to the right place in the buffer. */
2696 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2697 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbPreRead);
2698 vdIoCtxCopy(pIoCtx, pIoCtxParent, cbThisWrite);
2699
2700 /* Handle the data that goes after the write to fill the block. */
2701 if (cbPostRead)
2702 {
2703 /* Now assemble the remaining data. */
2704 if (cbWriteCopy)
2705 {
2706 /*
2707 * The S/G buffer of the parent needs to be cloned because
2708 * it is not allowed to modify the state.
2709 */
2710 RTSGBUF SgBufParentTmp;
2711
2712 RTSgBufClone(&SgBufParentTmp, &pIoCtxParent->Req.Io.SgBuf);
2713 RTSgBufCopy(&pIoCtx->Req.Io.SgBuf, &SgBufParentTmp, cbWriteCopy);
2714 }
2715
2716 /* Zero out the remainder of this block. Will never be visible, as this
2717 * is beyond the limit of the image. */
2718 if (cbFill)
2719 {
2720 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbReadImage);
2721 vdIoCtxSet(pIoCtx, '\0', cbFill);
2722 }
2723 }
2724
2725 /* Write the full block to the virtual disk. */
2726 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2727 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2728
2729 return rc;
2730}
2731
2732static DECLCALLBACK(int) vdWriteHelperOptimizedPreReadAsync(PVDIOCTX pIoCtx)
2733{
2734 int rc = VINF_SUCCESS;
2735
2736 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2737
2738 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2739
2740 if ( pIoCtx->Req.Io.cbTransferLeft
2741 && !pIoCtx->cDataTransfersPending)
2742 rc = vdReadHelperAsync(pIoCtx);
2743
2744 if ( ( RT_SUCCESS(rc)
2745 || (rc == VERR_VD_ASYNC_IO_IN_PROGRESS))
2746 && ( pIoCtx->Req.Io.cbTransferLeft
2747 || pIoCtx->cMetaTransfersPending))
2748 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2749 else
2750 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperOptimizedCmpAndWriteAsync;
2751
2752 return rc;
2753}
2754
2755/**
2756 * internal: write a complete block (only used for diff images), taking the
2757 * remaining data from parent images. This implementation optimizes out writes
2758 * that do not change the data relative to the state as of the parent images.
2759 * All backends which support differential/growing images support this - async version.
2760 */
2761static DECLCALLBACK(int) vdWriteHelperOptimizedAsync(PVDIOCTX pIoCtx)
2762{
2763 PVBOXHDD pDisk = pIoCtx->pDisk;
2764 uint64_t uOffset = pIoCtx->Type.Child.uOffsetSaved;
2765 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2766 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2767 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2768 size_t cbWrite = pIoCtx->Type.Child.cbWriteParent;
2769 size_t cbFill = 0;
2770 size_t cbWriteCopy = 0;
2771 size_t cbReadImage = 0;
2772
2773 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2774
2775 AssertPtr(pIoCtx->pIoCtxParent);
2776 Assert(!pIoCtx->pIoCtxParent->pIoCtxParent);
2777
2778 if (cbPostRead)
2779 {
2780 /* Figure out how much we cannot read from the image, because
2781 * the last block to write might exceed the nominal size of the
2782 * image for technical reasons. */
2783 if (uOffset + cbThisWrite + cbPostRead > pDisk->cbSize)
2784 cbFill = uOffset + cbThisWrite + cbPostRead - pDisk->cbSize;
2785
2786 /* If we have data to be written, use that instead of reading
2787 * data from the image. */
2788 if (cbWrite > cbThisWrite)
2789 cbWriteCopy = RT_MIN(cbWrite - cbThisWrite, cbPostRead);
2790
2791 /* The rest must be read from the image. */
2792 cbReadImage = cbPostRead - cbWriteCopy - cbFill;
2793 }
2794
2795 pIoCtx->Type.Child.Write.Optimized.cbFill = cbFill;
2796 pIoCtx->Type.Child.Write.Optimized.cbWriteCopy = cbWriteCopy;
2797 pIoCtx->Type.Child.Write.Optimized.cbReadImage = cbReadImage;
2798
2799 /* Read the entire data of the block so that we can compare whether it will
2800 * be modified by the write or not. */
2801 size_t cbTmp = cbPreRead + cbThisWrite + cbPostRead - cbFill; Assert(cbTmp == (uint32_t)cbTmp);
2802 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbTmp;
2803 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2804 pIoCtx->Req.Io.uOffset -= cbPreRead;
2805
2806 /* Next step */
2807 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperOptimizedPreReadAsync;
2808 return VINF_SUCCESS;
2809}
2810
2811static DECLCALLBACK(int) vdWriteHelperStandardReadImageAsync(PVDIOCTX pIoCtx)
2812{
2813 int rc = VINF_SUCCESS;
2814
2815 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2816
2817 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2818
2819 if ( pIoCtx->Req.Io.cbTransferLeft
2820 && !pIoCtx->cDataTransfersPending)
2821 rc = vdReadHelperAsync(pIoCtx);
2822
2823 if ( RT_SUCCESS(rc)
2824 && ( pIoCtx->Req.Io.cbTransferLeft
2825 || pIoCtx->cMetaTransfersPending))
2826 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2827 else
2828 {
2829 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2830
2831 /* Zero out the remainder of this block. Will never be visible, as this
2832 * is beyond the limit of the image. */
2833 if (cbFill)
2834 vdIoCtxSet(pIoCtx, '\0', cbFill);
2835
2836 /* Write the full block to the virtual disk. */
2837 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2838
2839 vdIoCtxChildReset(pIoCtx);
2840 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2841 }
2842
2843 return rc;
2844}
2845
2846static DECLCALLBACK(int) vdWriteHelperStandardAssemble(PVDIOCTX pIoCtx)
2847{
2848 int rc = VINF_SUCCESS;
2849 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2850 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2851 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
2852
2853 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2854
2855 vdIoCtxCopy(pIoCtx, pIoCtxParent, cbThisWrite);
2856 if (cbPostRead)
2857 {
2858 size_t cbFill = pIoCtx->Type.Child.Write.Optimized.cbFill;
2859 size_t cbWriteCopy = pIoCtx->Type.Child.Write.Optimized.cbWriteCopy;
2860 size_t cbReadImage = pIoCtx->Type.Child.Write.Optimized.cbReadImage;
2861
2862 /* Now assemble the remaining data. */
2863 if (cbWriteCopy)
2864 {
2865 /*
2866 * The S/G buffer of the parent needs to be cloned because
2867 * it is not allowed to modify the state.
2868 */
2869 RTSGBUF SgBufParentTmp;
2870
2871 RTSgBufClone(&SgBufParentTmp, &pIoCtxParent->Req.Io.SgBuf);
2872 RTSgBufCopy(&pIoCtx->Req.Io.SgBuf, &SgBufParentTmp, cbWriteCopy);
2873 }
2874
2875 if (cbReadImage)
2876 {
2877 /* Read remaining data. */
2878 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardReadImageAsync;
2879
2880 /* Read the data that goes before the write to fill the block. */
2881 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbReadImage; Assert(cbReadImage == (uint32_t)cbReadImage);
2882 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2883 pIoCtx->Req.Io.uOffset += cbWriteCopy;
2884 }
2885 else
2886 {
2887 /* Zero out the remainder of this block. Will never be visible, as this
2888 * is beyond the limit of the image. */
2889 if (cbFill)
2890 vdIoCtxSet(pIoCtx, '\0', cbFill);
2891
2892 /* Write the full block to the virtual disk. */
2893 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2894 vdIoCtxChildReset(pIoCtx);
2895 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2896 }
2897 }
2898 else
2899 {
2900 /* Write the full block to the virtual disk. */
2901 RTSgBufReset(&pIoCtx->Req.Io.SgBuf);
2902 vdIoCtxChildReset(pIoCtx);
2903 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperCommitAsync;
2904 }
2905
2906 return rc;
2907}
2908
2909static DECLCALLBACK(int) vdWriteHelperStandardPreReadAsync(PVDIOCTX pIoCtx)
2910{
2911 int rc = VINF_SUCCESS;
2912
2913 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2914
2915 pIoCtx->fFlags |= VDIOCTX_FLAGS_ZERO_FREE_BLOCKS;
2916
2917 if ( pIoCtx->Req.Io.cbTransferLeft
2918 && !pIoCtx->cDataTransfersPending)
2919 rc = vdReadHelperAsync(pIoCtx);
2920
2921 if ( RT_SUCCESS(rc)
2922 && ( pIoCtx->Req.Io.cbTransferLeft
2923 || pIoCtx->cMetaTransfersPending))
2924 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
2925 else
2926 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardAssemble;
2927
2928 return rc;
2929}
2930
2931static DECLCALLBACK(int) vdWriteHelperStandardAsync(PVDIOCTX pIoCtx)
2932{
2933 PVBOXHDD pDisk = pIoCtx->pDisk;
2934 uint64_t uOffset = pIoCtx->Type.Child.uOffsetSaved;
2935 size_t cbThisWrite = pIoCtx->Type.Child.cbTransferParent;
2936 size_t cbPreRead = pIoCtx->Type.Child.cbPreRead;
2937 size_t cbPostRead = pIoCtx->Type.Child.cbPostRead;
2938 size_t cbWrite = pIoCtx->Type.Child.cbWriteParent;
2939 size_t cbFill = 0;
2940 size_t cbWriteCopy = 0;
2941 size_t cbReadImage = 0;
2942
2943 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
2944
2945 AssertPtr(pIoCtx->pIoCtxParent);
2946 Assert(!pIoCtx->pIoCtxParent->pIoCtxParent);
2947
2948 /* Calculate the amount of data to read that goes after the write to fill the block. */
2949 if (cbPostRead)
2950 {
2951 /* If we have data to be written, use that instead of reading
2952 * data from the image. */
2953 if (cbWrite > cbThisWrite)
2954 cbWriteCopy = RT_MIN(cbWrite - cbThisWrite, cbPostRead);
2955 else
2956 cbWriteCopy = 0;
2957
2958 /* Figure out how much we cannot read from the image, because
2959 * the last block to write might exceed the nominal size of the
2960 * image for technical reasons. */
2961 if (uOffset + cbThisWrite + cbPostRead > pDisk->cbSize)
2962 cbFill = uOffset + cbThisWrite + cbPostRead - pDisk->cbSize;
2963
2964 /* The rest must be read from the image. */
2965 cbReadImage = cbPostRead - cbWriteCopy - cbFill;
2966 }
2967
2968 pIoCtx->Type.Child.Write.Optimized.cbFill = cbFill;
2969 pIoCtx->Type.Child.Write.Optimized.cbWriteCopy = cbWriteCopy;
2970 pIoCtx->Type.Child.Write.Optimized.cbReadImage = cbReadImage;
2971
2972 /* Next step */
2973 if (cbPreRead)
2974 {
2975 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardPreReadAsync;
2976
2977 /* Read the data that goes before the write to fill the block. */
2978 pIoCtx->Req.Io.cbTransferLeft = (uint32_t)cbPreRead; Assert(cbPreRead == (uint32_t)cbPreRead);
2979 pIoCtx->Req.Io.cbTransfer = pIoCtx->Req.Io.cbTransferLeft;
2980 pIoCtx->Req.Io.uOffset -= cbPreRead;
2981 }
2982 else
2983 pIoCtx->pfnIoCtxTransferNext = vdWriteHelperStandardAssemble;
2984
2985 return VINF_SUCCESS;
2986}
2987
2988/**
2989 * internal: write buffer to the image, taking care of block boundaries and
2990 * write optimizations - async version.
2991 */
2992static DECLCALLBACK(int) vdWriteHelperAsync(PVDIOCTX pIoCtx)
2993{
2994 int rc;
2995 size_t cbWrite = pIoCtx->Req.Io.cbTransfer;
2996 uint64_t uOffset = pIoCtx->Req.Io.uOffset;
2997 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
2998 PVBOXHDD pDisk = pIoCtx->pDisk;
2999 unsigned fWrite;
3000 size_t cbThisWrite;
3001 size_t cbPreRead, cbPostRead;
3002
3003 /* Apply write filter chain here if it was not done already. */
3004 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_WRITE_FILTER_APPLIED))
3005 {
3006 rc = vdFilterChainApplyWrite(pDisk, uOffset, cbWrite, pIoCtx);
3007 if (RT_FAILURE(rc))
3008 return rc;
3009 pIoCtx->fFlags |= VDIOCTX_FLAGS_WRITE_FILTER_APPLIED;
3010 }
3011
3012 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_DONT_SET_MODIFIED_FLAG))
3013 {
3014 rc = vdSetModifiedFlagAsync(pDisk, pIoCtx);
3015 if (RT_FAILURE(rc)) /* Includes I/O in progress. */
3016 return rc;
3017 }
3018
3019 rc = vdDiscardSetRangeAllocated(pDisk, uOffset, cbWrite);
3020 if (RT_FAILURE(rc))
3021 return rc;
3022
3023 /* Loop until all written. */
3024 do
3025 {
3026 /* Try to write the possibly partial block to the last opened image.
3027 * This works when the block is already allocated in this image or
3028 * if it is a full-block write (and allocation isn't suppressed below).
3029 * For image formats which don't support zero blocks, it's beneficial
3030 * to avoid unnecessarily allocating unchanged blocks. This prevents
3031 * unwanted expanding of images. VMDK is an example. */
3032 cbThisWrite = cbWrite;
3033
3034 /*
3035 * Check whether there is a full block write in progress which was not allocated.
3036 * Defer I/O if the range interferes.
3037 */
3038 if ( pDisk->pIoCtxLockOwner != NIL_VDIOCTX
3039 && uOffset >= pDisk->uOffsetStartLocked
3040 && uOffset < pDisk->uOffsetEndLocked)
3041 {
3042 Log(("Interferring write while allocating a new block => deferring write\n"));
3043 vdIoCtxDefer(pDisk, pIoCtx);
3044 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3045 break;
3046 }
3047
3048 fWrite = (pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME)
3049 ? 0 : VD_WRITE_NO_ALLOC;
3050 rc = pImage->Backend->pfnWrite(pImage->pBackendData, uOffset, cbThisWrite,
3051 pIoCtx, &cbThisWrite, &cbPreRead, &cbPostRead,
3052 fWrite);
3053 if (rc == VERR_VD_BLOCK_FREE)
3054 {
3055 /* Lock the disk .*/
3056 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3057 if (RT_SUCCESS(rc))
3058 {
3059 /*
3060 * Allocate segment and buffer in one go.
3061 * A bit hackish but avoids the need to allocate memory twice.
3062 */
3063 PRTSGBUF pTmp = (PRTSGBUF)RTMemAlloc(cbPreRead + cbThisWrite + cbPostRead + sizeof(RTSGSEG) + sizeof(RTSGBUF));
3064 AssertBreakStmt(pTmp, rc = VERR_NO_MEMORY);
3065 PRTSGSEG pSeg = (PRTSGSEG)(pTmp + 1);
3066
3067 pSeg->pvSeg = pSeg + 1;
3068 pSeg->cbSeg = cbPreRead + cbThisWrite + cbPostRead;
3069 RTSgBufInit(pTmp, pSeg, 1);
3070
3071 PVDIOCTX pIoCtxWrite = vdIoCtxChildAlloc(pDisk, VDIOCTXTXDIR_WRITE,
3072 uOffset, pSeg->cbSeg, pImage,
3073 pTmp,
3074 pIoCtx, cbThisWrite,
3075 cbWrite,
3076 pTmp,
3077 (pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME)
3078 ? vdWriteHelperStandardAsync
3079 : vdWriteHelperOptimizedAsync);
3080 if (!VALID_PTR(pIoCtxWrite))
3081 {
3082 RTMemTmpFree(pTmp);
3083 rc = VERR_NO_MEMORY;
3084 break;
3085 }
3086
3087 LogFlowFunc(("Disk is growing because of pIoCtx=%#p pIoCtxWrite=%#p\n",
3088 pIoCtx, pIoCtxWrite));
3089
3090 /* Save the current range for the growing operation to check for intersecting requests later. */
3091 pDisk->uOffsetStartLocked = uOffset - cbPreRead;
3092 pDisk->uOffsetEndLocked = uOffset + cbThisWrite + cbPostRead;
3093
3094 pIoCtxWrite->Type.Child.cbPreRead = cbPreRead;
3095 pIoCtxWrite->Type.Child.cbPostRead = cbPostRead;
3096 pIoCtxWrite->Req.Io.pImageParentOverride = pIoCtx->Req.Io.pImageParentOverride;
3097
3098 /* Process the write request */
3099 rc = vdIoCtxProcessLocked(pIoCtxWrite);
3100
3101 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3102 {
3103 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs*/ );
3104 vdIoCtxFree(pDisk, pIoCtxWrite);
3105 break;
3106 }
3107 else if ( rc == VINF_VD_ASYNC_IO_FINISHED
3108 && ASMAtomicCmpXchgBool(&pIoCtxWrite->fComplete, true, false))
3109 {
3110 LogFlow(("Child write request completed\n"));
3111 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbThisWrite);
3112 Assert(cbThisWrite == (uint32_t)cbThisWrite);
3113 rc = pIoCtxWrite->rcReq;
3114 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbThisWrite);
3115 vdIoCtxUnlockDisk(pDisk, pIoCtx, false /* fProcessDeferredReqs*/ );
3116 vdIoCtxFree(pDisk, pIoCtxWrite);
3117 }
3118 else
3119 {
3120 LogFlow(("Child write pending\n"));
3121 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
3122 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
3123 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3124 cbWrite -= cbThisWrite;
3125 uOffset += cbThisWrite;
3126 break;
3127 }
3128 }
3129 else
3130 {
3131 rc = VERR_VD_ASYNC_IO_IN_PROGRESS;
3132 break;
3133 }
3134 }
3135
3136 if (rc == VERR_VD_IOCTX_HALT)
3137 {
3138 cbWrite -= cbThisWrite;
3139 uOffset += cbThisWrite;
3140 pIoCtx->fFlags |= VDIOCTX_FLAGS_BLOCKED;
3141 break;
3142 }
3143 else if (rc == VERR_VD_NOT_ENOUGH_METADATA)
3144 break;
3145
3146 cbWrite -= cbThisWrite;
3147 uOffset += cbThisWrite;
3148 } while (cbWrite != 0 && (RT_SUCCESS(rc) || rc == VERR_VD_ASYNC_IO_IN_PROGRESS));
3149
3150 if ( rc == VERR_VD_ASYNC_IO_IN_PROGRESS
3151 || rc == VERR_VD_NOT_ENOUGH_METADATA
3152 || rc == VERR_VD_IOCTX_HALT)
3153 {
3154 /*
3155 * Tell the caller that we don't need to go back here because all
3156 * writes are initiated.
3157 */
3158 if ( !cbWrite
3159 && rc != VERR_VD_IOCTX_HALT)
3160 rc = VINF_SUCCESS;
3161
3162 pIoCtx->Req.Io.uOffset = uOffset;
3163 pIoCtx->Req.Io.cbTransfer = cbWrite;
3164 }
3165
3166 return rc;
3167}
3168
3169/**
3170 * Flush helper async version.
3171 */
3172static DECLCALLBACK(int) vdFlushHelperAsync(PVDIOCTX pIoCtx)
3173{
3174 int rc = VINF_SUCCESS;
3175 PVBOXHDD pDisk = pIoCtx->pDisk;
3176 PVDIMAGE pImage = pIoCtx->Req.Io.pImageCur;
3177
3178 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3179 if (RT_SUCCESS(rc))
3180 {
3181 /* Mark the whole disk as locked. */
3182 pDisk->uOffsetStartLocked = 0;
3183 pDisk->uOffsetEndLocked = UINT64_C(0xffffffffffffffff);
3184
3185 vdResetModifiedFlag(pDisk);
3186 rc = pImage->Backend->pfnFlush(pImage->pBackendData, pIoCtx);
3187 if ( ( RT_SUCCESS(rc)
3188 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS
3189 || rc == VERR_VD_IOCTX_HALT)
3190 && pDisk->pCache)
3191 {
3192 rc = pDisk->pCache->Backend->pfnFlush(pDisk->pCache->pBackendData, pIoCtx);
3193 if ( RT_SUCCESS(rc)
3194 || ( rc != VERR_VD_ASYNC_IO_IN_PROGRESS
3195 && rc != VERR_VD_IOCTX_HALT))
3196 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessBlockedReqs */);
3197 else if (rc != VERR_VD_IOCTX_HALT)
3198 rc = VINF_SUCCESS;
3199 }
3200 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3201 rc = VINF_SUCCESS;
3202 else if (rc != VERR_VD_IOCTX_HALT)/* Some other error. */
3203 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessBlockedReqs */);
3204 }
3205
3206 return rc;
3207}
3208
3209/**
3210 * Async discard helper - discards a whole block which is recorded in the block
3211 * tree.
3212 *
3213 * @returns VBox status code.
3214 * @param pIoCtx The I/O context to operate on.
3215 */
3216static DECLCALLBACK(int) vdDiscardWholeBlockAsync(PVDIOCTX pIoCtx)
3217{
3218 int rc = VINF_SUCCESS;
3219 PVBOXHDD pDisk = pIoCtx->pDisk;
3220 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3221 PVDDISCARDBLOCK pBlock = pIoCtx->Req.Discard.pBlock;
3222 size_t cbPreAllocated, cbPostAllocated, cbActuallyDiscarded;
3223
3224 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3225
3226 AssertPtr(pBlock);
3227
3228 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3229 pBlock->Core.Key, pBlock->cbDiscard,
3230 &cbPreAllocated, &cbPostAllocated,
3231 &cbActuallyDiscarded, NULL, 0);
3232 Assert(rc != VERR_VD_DISCARD_ALIGNMENT_NOT_MET);
3233 Assert(!cbPreAllocated);
3234 Assert(!cbPostAllocated);
3235 Assert(cbActuallyDiscarded == pBlock->cbDiscard || RT_FAILURE(rc));
3236
3237 /* Remove the block on success. */
3238 if ( RT_SUCCESS(rc)
3239 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3240 {
3241 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
3242 Assert(pBlockRemove == pBlock); RT_NOREF1(pBlockRemove);
3243
3244 pDiscard->cbDiscarding -= pBlock->cbDiscard;
3245 RTListNodeRemove(&pBlock->NodeLru);
3246 RTMemFree(pBlock->pbmAllocated);
3247 RTMemFree(pBlock);
3248 pIoCtx->Req.Discard.pBlock = NULL;/* Safety precaution. */
3249 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync; /* Next part. */
3250 rc = VINF_SUCCESS;
3251 }
3252
3253 LogFlowFunc(("returns rc=%Rrc\n", rc));
3254 return rc;
3255}
3256
3257/**
3258 * Removes the least recently used blocks from the waiting list until
3259 * the new value is reached - version for async I/O.
3260 *
3261 * @returns VBox status code.
3262 * @param pDisk VD disk container.
3263 * @param pIoCtx The I/O context associated with this discard operation.
3264 * @param cbDiscardingNew How many bytes should be waiting on success.
3265 * The number of bytes waiting can be less.
3266 */
3267static int vdDiscardRemoveBlocksAsync(PVBOXHDD pDisk, PVDIOCTX pIoCtx, size_t cbDiscardingNew)
3268{
3269 int rc = VINF_SUCCESS;
3270 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3271
3272 LogFlowFunc(("pDisk=%#p pDiscard=%#p cbDiscardingNew=%zu\n",
3273 pDisk, pDiscard, cbDiscardingNew));
3274
3275 while (pDiscard->cbDiscarding > cbDiscardingNew)
3276 {
3277 PVDDISCARDBLOCK pBlock = RTListGetLast(&pDiscard->ListLru, VDDISCARDBLOCK, NodeLru);
3278
3279 Assert(!RTListIsEmpty(&pDiscard->ListLru));
3280
3281 /* Go over the allocation bitmap and mark all discarded sectors as unused. */
3282 uint64_t offStart = pBlock->Core.Key;
3283 uint32_t idxStart = 0;
3284 size_t cbLeft = pBlock->cbDiscard;
3285 bool fAllocated = ASMBitTest(pBlock->pbmAllocated, idxStart);
3286 uint32_t cSectors = (uint32_t)(pBlock->cbDiscard / 512);
3287
3288 while (cbLeft > 0)
3289 {
3290 int32_t idxEnd;
3291 size_t cbThis = cbLeft;
3292
3293 if (fAllocated)
3294 {
3295 /* Check for the first unallocated bit. */
3296 idxEnd = ASMBitNextClear(pBlock->pbmAllocated, cSectors, idxStart);
3297 if (idxEnd != -1)
3298 {
3299 cbThis = (idxEnd - idxStart) * 512;
3300 fAllocated = false;
3301 }
3302 }
3303 else
3304 {
3305 /* Mark as unused and check for the first set bit. */
3306 idxEnd = ASMBitNextSet(pBlock->pbmAllocated, cSectors, idxStart);
3307 if (idxEnd != -1)
3308 cbThis = (idxEnd - idxStart) * 512;
3309
3310 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3311 offStart, cbThis, NULL, NULL, &cbThis,
3312 NULL, VD_DISCARD_MARK_UNUSED);
3313 if ( RT_FAILURE(rc)
3314 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
3315 break;
3316
3317 fAllocated = true;
3318 }
3319
3320 idxStart = idxEnd;
3321 offStart += cbThis;
3322 cbLeft -= cbThis;
3323 }
3324
3325 if ( RT_FAILURE(rc)
3326 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
3327 break;
3328
3329 PVDDISCARDBLOCK pBlockRemove = (PVDDISCARDBLOCK)RTAvlrU64RangeRemove(pDiscard->pTreeBlocks, pBlock->Core.Key);
3330 Assert(pBlockRemove == pBlock); NOREF(pBlockRemove);
3331 RTListNodeRemove(&pBlock->NodeLru);
3332
3333 pDiscard->cbDiscarding -= pBlock->cbDiscard;
3334 RTMemFree(pBlock->pbmAllocated);
3335 RTMemFree(pBlock);
3336 }
3337
3338 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
3339 rc = VINF_SUCCESS;
3340
3341 Assert(RT_FAILURE(rc) || pDiscard->cbDiscarding <= cbDiscardingNew);
3342
3343 LogFlowFunc(("returns rc=%Rrc\n", rc));
3344 return rc;
3345}
3346
3347/**
3348 * Async discard helper - discards the current range if there is no matching
3349 * block in the tree.
3350 *
3351 * @returns VBox status code.
3352 * @param pIoCtx The I/O context to operate on.
3353 */
3354static DECLCALLBACK(int) vdDiscardCurrentRangeAsync(PVDIOCTX pIoCtx)
3355{
3356 PVBOXHDD pDisk = pIoCtx->pDisk;
3357 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3358 uint64_t offStart = pIoCtx->Req.Discard.offCur;
3359 size_t cbThisDiscard = pIoCtx->Req.Discard.cbThisDiscard;
3360 void *pbmAllocated = NULL;
3361 size_t cbPreAllocated, cbPostAllocated;
3362 int rc = VINF_SUCCESS;
3363
3364 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3365
3366 /* No block found, try to discard using the backend first. */
3367 rc = pDisk->pLast->Backend->pfnDiscard(pDisk->pLast->pBackendData, pIoCtx,
3368 offStart, cbThisDiscard, &cbPreAllocated,
3369 &cbPostAllocated, &cbThisDiscard,
3370 &pbmAllocated, 0);
3371 if (rc == VERR_VD_DISCARD_ALIGNMENT_NOT_MET)
3372 {
3373 /* Create new discard block. */
3374 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTMemAllocZ(sizeof(VDDISCARDBLOCK));
3375 if (pBlock)
3376 {
3377 pBlock->Core.Key = offStart - cbPreAllocated;
3378 pBlock->Core.KeyLast = offStart + cbThisDiscard + cbPostAllocated - 1;
3379 pBlock->cbDiscard = cbPreAllocated + cbThisDiscard + cbPostAllocated;
3380 pBlock->pbmAllocated = pbmAllocated;
3381 bool fInserted = RTAvlrU64Insert(pDiscard->pTreeBlocks, &pBlock->Core);
3382 Assert(fInserted); NOREF(fInserted);
3383
3384 RTListPrepend(&pDiscard->ListLru, &pBlock->NodeLru);
3385 pDiscard->cbDiscarding += pBlock->cbDiscard;
3386
3387 Assert(pIoCtx->Req.Discard.cbDiscardLeft >= cbThisDiscard);
3388 pIoCtx->Req.Discard.cbDiscardLeft -= cbThisDiscard;
3389 pIoCtx->Req.Discard.offCur += cbThisDiscard;
3390 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3391
3392 if (pDiscard->cbDiscarding > VD_DISCARD_REMOVE_THRESHOLD)
3393 rc = vdDiscardRemoveBlocksAsync(pDisk, pIoCtx, VD_DISCARD_REMOVE_THRESHOLD);
3394 else
3395 rc = VINF_SUCCESS;
3396
3397 if (RT_SUCCESS(rc))
3398 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync; /* Next part. */
3399 }
3400 else
3401 {
3402 RTMemFree(pbmAllocated);
3403 rc = VERR_NO_MEMORY;
3404 }
3405 }
3406 else if ( RT_SUCCESS(rc)
3407 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS) /* Save state and andvance to next range. */
3408 {
3409 Assert(pIoCtx->Req.Discard.cbDiscardLeft >= cbThisDiscard);
3410 pIoCtx->Req.Discard.cbDiscardLeft -= cbThisDiscard;
3411 pIoCtx->Req.Discard.offCur += cbThisDiscard;
3412 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3413 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync;
3414 rc = VINF_SUCCESS;
3415 }
3416
3417 LogFlowFunc(("returns rc=%Rrc\n", rc));
3418 return rc;
3419}
3420
3421/**
3422 * Async discard helper - entry point.
3423 *
3424 * @returns VBox status code.
3425 * @param pIoCtx The I/O context to operate on.
3426 */
3427static DECLCALLBACK(int) vdDiscardHelperAsync(PVDIOCTX pIoCtx)
3428{
3429 int rc = VINF_SUCCESS;
3430 PVBOXHDD pDisk = pIoCtx->pDisk;
3431 PCRTRANGE paRanges = pIoCtx->Req.Discard.paRanges;
3432 unsigned cRanges = pIoCtx->Req.Discard.cRanges;
3433 PVDDISCARDSTATE pDiscard = pDisk->pDiscard;
3434
3435 LogFlowFunc(("pIoCtx=%#p\n", pIoCtx));
3436
3437 /* Check if the I/O context processed all ranges. */
3438 if ( pIoCtx->Req.Discard.idxRange == cRanges
3439 && !pIoCtx->Req.Discard.cbDiscardLeft)
3440 {
3441 LogFlowFunc(("All ranges discarded, completing\n"));
3442 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessDeferredReqs*/);
3443 return VINF_SUCCESS;
3444 }
3445
3446 if (pDisk->pIoCtxLockOwner != pIoCtx)
3447 rc = vdIoCtxLockDisk(pDisk, pIoCtx);
3448
3449 if (RT_SUCCESS(rc))
3450 {
3451 uint64_t offStart = pIoCtx->Req.Discard.offCur;
3452 size_t cbDiscardLeft = pIoCtx->Req.Discard.cbDiscardLeft;
3453 size_t cbThisDiscard;
3454
3455 pDisk->uOffsetStartLocked = offStart;
3456 pDisk->uOffsetEndLocked = offStart + cbDiscardLeft;
3457
3458 if (RT_UNLIKELY(!pDiscard))
3459 {
3460 pDiscard = vdDiscardStateCreate();
3461 if (!pDiscard)
3462 return VERR_NO_MEMORY;
3463
3464 pDisk->pDiscard = pDiscard;
3465 }
3466
3467 if (!pIoCtx->Req.Discard.cbDiscardLeft)
3468 {
3469 offStart = paRanges[pIoCtx->Req.Discard.idxRange].offStart;
3470 cbDiscardLeft = paRanges[pIoCtx->Req.Discard.idxRange].cbRange;
3471 LogFlowFunc(("New range descriptor loaded (%u) offStart=%llu cbDiscard=%zu\n",
3472 pIoCtx->Req.Discard.idxRange, offStart, cbDiscardLeft));
3473 pIoCtx->Req.Discard.idxRange++;
3474 }
3475
3476 /* Look for a matching block in the AVL tree first. */
3477 PVDDISCARDBLOCK pBlock = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, offStart, false);
3478 if (!pBlock || pBlock->Core.KeyLast < offStart)
3479 {
3480 PVDDISCARDBLOCK pBlockAbove = (PVDDISCARDBLOCK)RTAvlrU64GetBestFit(pDiscard->pTreeBlocks, offStart, true);
3481
3482 /* Clip range to remain in the current block. */
3483 if (pBlockAbove)
3484 cbThisDiscard = RT_MIN(cbDiscardLeft, pBlockAbove->Core.KeyLast - offStart + 1);
3485 else
3486 cbThisDiscard = cbDiscardLeft;
3487
3488 Assert(!(cbThisDiscard % 512));
3489 pIoCtx->Req.Discard.pBlock = NULL;
3490 pIoCtx->pfnIoCtxTransferNext = vdDiscardCurrentRangeAsync;
3491 }
3492 else
3493 {
3494 /* Range lies partly in the block, update allocation bitmap. */
3495 int32_t idxStart, idxEnd;
3496
3497 cbThisDiscard = RT_MIN(cbDiscardLeft, pBlock->Core.KeyLast - offStart + 1);
3498
3499 AssertPtr(pBlock);
3500
3501 Assert(!(cbThisDiscard % 512));
3502 Assert(!((offStart - pBlock->Core.Key) % 512));
3503
3504 idxStart = (offStart - pBlock->Core.Key) / 512;
3505 idxEnd = idxStart + (int32_t)(cbThisDiscard / 512);
3506
3507 ASMBitClearRange(pBlock->pbmAllocated, idxStart, idxEnd);
3508
3509 cbDiscardLeft -= cbThisDiscard;
3510 offStart += cbThisDiscard;
3511
3512 /* Call the backend to discard the block if it is completely unallocated now. */
3513 if (ASMBitFirstSet((volatile void *)pBlock->pbmAllocated, (uint32_t)(pBlock->cbDiscard / 512)) == -1)
3514 {
3515 pIoCtx->Req.Discard.pBlock = pBlock;
3516 pIoCtx->pfnIoCtxTransferNext = vdDiscardWholeBlockAsync;
3517 rc = VINF_SUCCESS;
3518 }
3519 else
3520 {
3521 RTListNodeRemove(&pBlock->NodeLru);
3522 RTListPrepend(&pDiscard->ListLru, &pBlock->NodeLru);
3523
3524 /* Start with next range. */
3525 pIoCtx->pfnIoCtxTransferNext = vdDiscardHelperAsync;
3526 rc = VINF_SUCCESS;
3527 }
3528 }
3529
3530 /* Save state in the context. */
3531 pIoCtx->Req.Discard.offCur = offStart;
3532 pIoCtx->Req.Discard.cbDiscardLeft = cbDiscardLeft;
3533 pIoCtx->Req.Discard.cbThisDiscard = cbThisDiscard;
3534 }
3535
3536 LogFlowFunc(("returns rc=%Rrc\n", rc));
3537 return rc;
3538}
3539
3540#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3541
3542/**
3543 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterImage}
3544 */
3545static DECLCALLBACK(int) vdPluginRegisterImage(void *pvUser, PCVDIMAGEBACKEND pBackend)
3546{
3547 int rc = VINF_SUCCESS;
3548
3549 if (VD_VERSION_ARE_COMPATIBLE(VD_IMGBACKEND_VERSION, pBackend->u32Version))
3550 vdAddBackend((RTLDRMOD)pvUser, pBackend);
3551 else
3552 {
3553 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3554 rc = VERR_IGNORED;
3555 }
3556
3557 return rc;
3558}
3559
3560/**
3561 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterCache}
3562 */
3563static DECLCALLBACK(int) vdPluginRegisterCache(void *pvUser, PCVDCACHEBACKEND pBackend)
3564{
3565 int rc = VINF_SUCCESS;
3566
3567 if (VD_VERSION_ARE_COMPATIBLE(VD_CACHEBACKEND_VERSION, pBackend->u32Version))
3568 vdAddCacheBackend((RTLDRMOD)pvUser, pBackend);
3569 else
3570 {
3571 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3572 rc = VERR_IGNORED;
3573 }
3574
3575 return rc;
3576}
3577
3578/**
3579 * @interface_method_impl{VDBACKENDREGISTER,pfnRegisterFilter}
3580 */
3581static DECLCALLBACK(int) vdPluginRegisterFilter(void *pvUser, PCVDFILTERBACKEND pBackend)
3582{
3583 int rc = VINF_SUCCESS;
3584
3585 if (VD_VERSION_ARE_COMPATIBLE(VD_FLTBACKEND_VERSION, pBackend->u32Version))
3586 vdAddFilterBackend((RTLDRMOD)pvUser, pBackend);
3587 else
3588 {
3589 LogFunc(("ignored plugin: pBackend->u32Version=%u rc=%Rrc\n", pBackend->u32Version, rc));
3590 rc = VERR_IGNORED;
3591 }
3592
3593 return rc;
3594}
3595
3596/**
3597 * Checks whether the given plugin filename was already loaded.
3598 *
3599 * @returns true if the plugin was already loaded, false otherwise.
3600 * @param pszFilename The filename to check.
3601 */
3602static bool vdPluginFind(const char *pszFilename)
3603{
3604 PVDPLUGIN pIt = NULL;
3605
3606 RTListForEach(&g_ListPluginsLoaded, pIt, VDPLUGIN, NodePlugin)
3607 {
3608 if (!RTStrCmp(pIt->pszFilename, pszFilename))
3609 return true;
3610 }
3611
3612 return false;
3613}
3614
3615/**
3616 * Adds a plugin to the list of loaded plugins.
3617 *
3618 * @returns VBox status code.
3619 * @param hPlugin Plugin handle to add.
3620 * @param pszFilename The associated filename, used for finding duplicates.
3621 */
3622static int vdAddPlugin(RTLDRMOD hPlugin, const char *pszFilename)
3623{
3624 int rc = VINF_SUCCESS;
3625 PVDPLUGIN pPlugin = (PVDPLUGIN)RTMemAllocZ(sizeof(VDPLUGIN));
3626
3627 if (pPlugin)
3628 {
3629 pPlugin->hPlugin = hPlugin;
3630 pPlugin->pszFilename = RTStrDup(pszFilename);
3631 if (pPlugin->pszFilename)
3632 RTListAppend(&g_ListPluginsLoaded, &pPlugin->NodePlugin);
3633 else
3634 {
3635 RTMemFree(pPlugin);
3636 rc = VERR_NO_MEMORY;
3637 }
3638 }
3639 else
3640 rc = VERR_NO_MEMORY;
3641
3642 return rc;
3643}
3644
3645static int vdRemovePlugin(const char *pszFilename)
3646{
3647 /* Find plugin to be removed from the list. */
3648 PVDPLUGIN pIt = NULL;
3649 RTListForEach(&g_ListPluginsLoaded, pIt, VDPLUGIN, NodePlugin)
3650 {
3651 if (!RTStrCmp(pIt->pszFilename, pszFilename))
3652 break;
3653 }
3654 if (!pIt)
3655 return VINF_SUCCESS;
3656
3657 /** @todo r=klaus: need to add a plugin entry point for unregistering the
3658 * backends. Only if this doesn't exist (or fails to work) we should fall
3659 * back to the following uncoordinated backend cleanup. */
3660 for (unsigned i = 0; i < g_cBackends; i++)
3661 {
3662 while (i < g_cBackends && g_ahBackendPlugins[i] == pIt->hPlugin)
3663 {
3664 memcpy(&g_apBackends[i], &g_apBackends[i + 1], (g_cBackends - i - 1) * sizeof(PCVDIMAGEBACKEND));
3665 memcpy(&g_ahBackendPlugins[i], &g_ahBackendPlugins[i + 1], (g_cBackends - i - 1) * sizeof(RTLDRMOD));
3666 /** @todo for now skip reallocating, doesn't save much */
3667 g_cBackends--;
3668 }
3669 }
3670 for (unsigned i = 0; i < g_cCacheBackends; i++)
3671 {
3672 while (i < g_cCacheBackends && g_ahCacheBackendPlugins[i] == pIt->hPlugin)
3673 {
3674 memcpy(&g_apCacheBackends[i], &g_apCacheBackends[i + 1], (g_cCacheBackends - i - 1) * sizeof(PCVDCACHEBACKEND));
3675 memcpy(&g_ahCacheBackendPlugins[i], &g_ahCacheBackendPlugins[i + 1], (g_cCacheBackends - i - 1) * sizeof(RTLDRMOD));
3676 /** @todo for now skip reallocating, doesn't save much */
3677 g_cCacheBackends--;
3678 }
3679 }
3680 for (unsigned i = 0; i < g_cFilterBackends; i++)
3681 {
3682 while (i < g_cFilterBackends && g_pahFilterBackendPlugins[i] == pIt->hPlugin)
3683 {
3684 memcpy(&g_apFilterBackends[i], &g_apFilterBackends[i + 1], (g_cFilterBackends - i - 1) * sizeof(PCVDFILTERBACKEND));
3685 memcpy(&g_pahFilterBackendPlugins[i], &g_pahFilterBackendPlugins[i + 1], (g_cFilterBackends - i - 1) * sizeof(RTLDRMOD));
3686 /** @todo for now skip reallocating, doesn't save much */
3687 g_cFilterBackends--;
3688 }
3689 }
3690
3691 /* Remove the plugin node now, all traces of it are gone. */
3692 RTListNodeRemove(&pIt->NodePlugin);
3693 RTLdrClose(pIt->hPlugin);
3694 RTStrFree(pIt->pszFilename);
3695 RTMemFree(pIt);
3696
3697 return VINF_SUCCESS;
3698}
3699
3700#endif /* !VBOX_HDD_NO_DYNAMIC_BACKENDS */
3701
3702/**
3703 * Worker for VDPluginLoadFromFilename() and vdPluginLoadFromPath().
3704 *
3705 * @returns VBox status code.
3706 * @param pszFilename The plugin filename to load.
3707 */
3708static int vdPluginLoadFromFilename(const char *pszFilename)
3709{
3710#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3711 /* Plugin loaded? Nothing to do. */
3712 if (vdPluginFind(pszFilename))
3713 return VINF_SUCCESS;
3714
3715 RTLDRMOD hPlugin = NIL_RTLDRMOD;
3716 int rc = SUPR3HardenedLdrLoadPlugIn(pszFilename, &hPlugin, NULL);
3717 if (RT_SUCCESS(rc))
3718 {
3719 VDBACKENDREGISTER BackendRegister;
3720 PFNVDPLUGINLOAD pfnVDPluginLoad = NULL;
3721
3722 BackendRegister.u32Version = VD_BACKENDREG_CB_VERSION;
3723 BackendRegister.pfnRegisterImage = vdPluginRegisterImage;
3724 BackendRegister.pfnRegisterCache = vdPluginRegisterCache;
3725 BackendRegister.pfnRegisterFilter = vdPluginRegisterFilter;
3726
3727 rc = RTLdrGetSymbol(hPlugin, VD_PLUGIN_LOAD_NAME, (void**)&pfnVDPluginLoad);
3728 if (RT_FAILURE(rc) || !pfnVDPluginLoad)
3729 {
3730 LogFunc(("error resolving the entry point %s in plugin %s, rc=%Rrc, pfnVDPluginLoad=%#p\n",
3731 VD_PLUGIN_LOAD_NAME, pszFilename, rc, pfnVDPluginLoad));
3732 if (RT_SUCCESS(rc))
3733 rc = VERR_SYMBOL_NOT_FOUND;
3734 }
3735
3736 if (RT_SUCCESS(rc))
3737 {
3738 /* Get the function table. */
3739 rc = pfnVDPluginLoad(hPlugin, &BackendRegister);
3740 }
3741 else
3742 LogFunc(("ignored plugin '%s': rc=%Rrc\n", pszFilename, rc));
3743
3744 /* Create a plugin entry on success. */
3745 if (RT_SUCCESS(rc))
3746 vdAddPlugin(hPlugin, pszFilename);
3747 else
3748 RTLdrClose(hPlugin);
3749 }
3750
3751 return rc;
3752#else
3753 RT_NOREF1(pszFilename);
3754 return VERR_NOT_IMPLEMENTED;
3755#endif
3756}
3757
3758/**
3759 * Worker for VDPluginLoadFromPath() and vdLoadDynamicBackends().
3760 *
3761 * @returns VBox status code.
3762 * @param pszPath The path to load plugins from.
3763 */
3764static int vdPluginLoadFromPath(const char *pszPath)
3765{
3766#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3767 /* To get all entries with VBoxHDD as prefix. */
3768 char *pszPluginFilter = RTPathJoinA(pszPath, VD_PLUGIN_PREFIX "*");
3769 if (!pszPluginFilter)
3770 return VERR_NO_STR_MEMORY;
3771
3772 PRTDIRENTRYEX pPluginDirEntry = NULL;
3773 PRTDIR pPluginDir = NULL;
3774 size_t cbPluginDirEntry = sizeof(RTDIRENTRYEX);
3775 int rc = RTDirOpenFiltered(&pPluginDir, pszPluginFilter, RTDIRFILTER_WINNT, 0);
3776 if (RT_SUCCESS(rc))
3777 {
3778 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(sizeof(RTDIRENTRYEX));
3779 if (pPluginDirEntry)
3780 {
3781 while ( (rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK))
3782 != VERR_NO_MORE_FILES)
3783 {
3784 char *pszPluginPath = NULL;
3785
3786 if (rc == VERR_BUFFER_OVERFLOW)
3787 {
3788 /* allocate new buffer. */
3789 RTMemFree(pPluginDirEntry);
3790 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(cbPluginDirEntry);
3791 if (!pPluginDirEntry)
3792 {
3793 rc = VERR_NO_MEMORY;
3794 break;
3795 }
3796 /* Retry. */
3797 rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK);
3798 if (RT_FAILURE(rc))
3799 break;
3800 }
3801 else if (RT_FAILURE(rc))
3802 break;
3803
3804 /* We got the new entry. */
3805 if (!RTFS_IS_FILE(pPluginDirEntry->Info.Attr.fMode))
3806 continue;
3807
3808 /* Prepend the path to the libraries. */
3809 pszPluginPath = RTPathJoinA(pszPath, pPluginDirEntry->szName);
3810 if (!pszPluginPath)
3811 {
3812 rc = VERR_NO_STR_MEMORY;
3813 break;
3814 }
3815
3816 rc = vdPluginLoadFromFilename(pszPluginPath);
3817 RTStrFree(pszPluginPath);
3818 }
3819
3820 RTMemFree(pPluginDirEntry);
3821 }
3822 else
3823 rc = VERR_NO_MEMORY;
3824
3825 RTDirClose(pPluginDir);
3826 }
3827 else
3828 {
3829 /* On Windows the above immediately signals that there are no
3830 * files matching, while on other platforms enumerating the
3831 * files below fails. Either way: no plugins. */
3832 }
3833
3834 if (rc == VERR_NO_MORE_FILES)
3835 rc = VINF_SUCCESS;
3836 RTStrFree(pszPluginFilter);
3837 return rc;
3838#else
3839 RT_NOREF1(pszPath);
3840 return VERR_NOT_IMPLEMENTED;
3841#endif
3842}
3843
3844/**
3845 * internal: scans plugin directory and loads found plugins.
3846 */
3847static int vdLoadDynamicBackends(void)
3848{
3849#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3850 /*
3851 * Enumerate plugin backends from the application directory where the other
3852 * shared libraries are.
3853 */
3854 char szPath[RTPATH_MAX];
3855 int rc = RTPathAppPrivateArch(szPath, sizeof(szPath));
3856 if (RT_FAILURE(rc))
3857 return rc;
3858
3859 return vdPluginLoadFromPath(szPath);
3860#else
3861 return VINF_SUCCESS;
3862#endif
3863}
3864
3865/**
3866 * Worker for VDPluginUnloadFromFilename() and vdPluginUnloadFromPath().
3867 *
3868 * @returns VBox status code.
3869 * @param pszFilename The plugin filename to unload.
3870 */
3871static int vdPluginUnloadFromFilename(const char *pszFilename)
3872{
3873#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3874 return vdRemovePlugin(pszFilename);
3875#else
3876 RT_NOREF1(pszFilename);
3877 return VERR_NOT_IMPLEMENTED;
3878#endif
3879}
3880
3881/**
3882 * Worker for VDPluginUnloadFromPath().
3883 *
3884 * @returns VBox status code.
3885 * @param pszPath The path to unload plugins from.
3886 */
3887static int vdPluginUnloadFromPath(const char *pszPath)
3888{
3889#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
3890 /* To get all entries with VBoxHDD as prefix. */
3891 char *pszPluginFilter = RTPathJoinA(pszPath, VD_PLUGIN_PREFIX "*");
3892 if (!pszPluginFilter)
3893 return VERR_NO_STR_MEMORY;
3894
3895 PRTDIRENTRYEX pPluginDirEntry = NULL;
3896 PRTDIR pPluginDir = NULL;
3897 size_t cbPluginDirEntry = sizeof(RTDIRENTRYEX);
3898 int rc = RTDirOpenFiltered(&pPluginDir, pszPluginFilter, RTDIRFILTER_WINNT, 0);
3899 if (RT_SUCCESS(rc))
3900 {
3901 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(sizeof(RTDIRENTRYEX));
3902 if (pPluginDirEntry)
3903 {
3904 while ((rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK)) != VERR_NO_MORE_FILES)
3905 {
3906 char *pszPluginPath = NULL;
3907
3908 if (rc == VERR_BUFFER_OVERFLOW)
3909 {
3910 /* allocate new buffer. */
3911 RTMemFree(pPluginDirEntry);
3912 pPluginDirEntry = (PRTDIRENTRYEX)RTMemAllocZ(cbPluginDirEntry);
3913 if (!pPluginDirEntry)
3914 {
3915 rc = VERR_NO_MEMORY;
3916 break;
3917 }
3918 /* Retry. */
3919 rc = RTDirReadEx(pPluginDir, pPluginDirEntry, &cbPluginDirEntry, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK);
3920 if (RT_FAILURE(rc))
3921 break;
3922 }
3923 else if (RT_FAILURE(rc))
3924 break;
3925
3926 /* We got the new entry. */
3927 if (!RTFS_IS_FILE(pPluginDirEntry->Info.Attr.fMode))
3928 continue;
3929
3930 /* Prepend the path to the libraries. */
3931 pszPluginPath = RTPathJoinA(pszPath, pPluginDirEntry->szName);
3932 if (!pszPluginPath)
3933 {
3934 rc = VERR_NO_STR_MEMORY;
3935 break;
3936 }
3937
3938 rc = vdPluginUnloadFromFilename(pszPluginPath);
3939 RTStrFree(pszPluginPath);
3940 }
3941
3942 RTMemFree(pPluginDirEntry);
3943 }
3944 else
3945 rc = VERR_NO_MEMORY;
3946
3947 RTDirClose(pPluginDir);
3948 }
3949 else
3950 {
3951 /* On Windows the above immediately signals that there are no
3952 * files matching, while on other platforms enumerating the
3953 * files below fails. Either way: no plugins. */
3954 }
3955
3956 if (rc == VERR_NO_MORE_FILES)
3957 rc = VINF_SUCCESS;
3958 RTStrFree(pszPluginFilter);
3959 return rc;
3960#else
3961 RT_NOREF1(pszPath);
3962 return VERR_NOT_IMPLEMENTED;
3963#endif
3964}
3965
3966/**
3967 * VD async I/O interface open callback.
3968 */
3969static DECLCALLBACK(int) vdIOOpenFallback(void *pvUser, const char *pszLocation,
3970 uint32_t fOpen, PFNVDCOMPLETED pfnCompleted,
3971 void **ppStorage)
3972{
3973 RT_NOREF1(pvUser);
3974 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)RTMemAllocZ(sizeof(VDIIOFALLBACKSTORAGE));
3975
3976 if (!pStorage)
3977 return VERR_NO_MEMORY;
3978
3979 pStorage->pfnCompleted = pfnCompleted;
3980
3981 /* Open the file. */
3982 int rc = RTFileOpen(&pStorage->File, pszLocation, fOpen);
3983 if (RT_SUCCESS(rc))
3984 {
3985 *ppStorage = pStorage;
3986 return VINF_SUCCESS;
3987 }
3988
3989 RTMemFree(pStorage);
3990 return rc;
3991}
3992
3993/**
3994 * VD async I/O interface close callback.
3995 */
3996static DECLCALLBACK(int) vdIOCloseFallback(void *pvUser, void *pvStorage)
3997{
3998 RT_NOREF1(pvUser);
3999 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4000
4001 RTFileClose(pStorage->File);
4002 RTMemFree(pStorage);
4003 return VINF_SUCCESS;
4004}
4005
4006static DECLCALLBACK(int) vdIODeleteFallback(void *pvUser, const char *pcszFilename)
4007{
4008 RT_NOREF1(pvUser);
4009 return RTFileDelete(pcszFilename);
4010}
4011
4012static DECLCALLBACK(int) vdIOMoveFallback(void *pvUser, const char *pcszSrc, const char *pcszDst, unsigned fMove)
4013{
4014 RT_NOREF1(pvUser);
4015 return RTFileMove(pcszSrc, pcszDst, fMove);
4016}
4017
4018static DECLCALLBACK(int) vdIOGetFreeSpaceFallback(void *pvUser, const char *pcszFilename, int64_t *pcbFreeSpace)
4019{
4020 RT_NOREF1(pvUser);
4021 return RTFsQuerySizes(pcszFilename, NULL, pcbFreeSpace, NULL, NULL);
4022}
4023
4024static DECLCALLBACK(int) vdIOGetModificationTimeFallback(void *pvUser, const char *pcszFilename, PRTTIMESPEC pModificationTime)
4025{
4026 RT_NOREF1(pvUser);
4027 RTFSOBJINFO info;
4028 int rc = RTPathQueryInfo(pcszFilename, &info, RTFSOBJATTRADD_NOTHING);
4029 if (RT_SUCCESS(rc))
4030 *pModificationTime = info.ModificationTime;
4031 return rc;
4032}
4033
4034/**
4035 * VD async I/O interface callback for retrieving the file size.
4036 */
4037static DECLCALLBACK(int) vdIOGetSizeFallback(void *pvUser, void *pvStorage, uint64_t *pcbSize)
4038{
4039 RT_NOREF1(pvUser);
4040 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4041
4042 return RTFileGetSize(pStorage->File, pcbSize);
4043}
4044
4045/**
4046 * VD async I/O interface callback for setting the file size.
4047 */
4048static DECLCALLBACK(int) vdIOSetSizeFallback(void *pvUser, void *pvStorage, uint64_t cbSize)
4049{
4050 RT_NOREF1(pvUser);
4051 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4052
4053 return RTFileSetSize(pStorage->File, cbSize);
4054}
4055
4056/**
4057 * VD async I/O interface callback for setting the file allocation size.
4058 */
4059static DECLCALLBACK(int) vdIOSetAllocationSizeFallback(void *pvUser, void *pvStorage, uint64_t cbSize,
4060 uint32_t fFlags)
4061{
4062 RT_NOREF2(pvUser, fFlags);
4063 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4064
4065 return RTFileSetAllocationSize(pStorage->File, cbSize, RTFILE_ALLOC_SIZE_F_DEFAULT);
4066}
4067
4068/**
4069 * VD async I/O interface callback for a synchronous write to the file.
4070 */
4071static DECLCALLBACK(int) vdIOWriteSyncFallback(void *pvUser, void *pvStorage, uint64_t uOffset,
4072 const void *pvBuf, size_t cbWrite, size_t *pcbWritten)
4073{
4074 RT_NOREF1(pvUser);
4075 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4076
4077 return RTFileWriteAt(pStorage->File, uOffset, pvBuf, cbWrite, pcbWritten);
4078}
4079
4080/**
4081 * VD async I/O interface callback for a synchronous read from the file.
4082 */
4083static DECLCALLBACK(int) vdIOReadSyncFallback(void *pvUser, void *pvStorage, uint64_t uOffset,
4084 void *pvBuf, size_t cbRead, size_t *pcbRead)
4085{
4086 RT_NOREF1(pvUser);
4087 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4088
4089 return RTFileReadAt(pStorage->File, uOffset, pvBuf, cbRead, pcbRead);
4090}
4091
4092/**
4093 * VD async I/O interface callback for a synchronous flush of the file data.
4094 */
4095static DECLCALLBACK(int) vdIOFlushSyncFallback(void *pvUser, void *pvStorage)
4096{
4097 RT_NOREF1(pvUser);
4098 PVDIIOFALLBACKSTORAGE pStorage = (PVDIIOFALLBACKSTORAGE)pvStorage;
4099
4100 return RTFileFlush(pStorage->File);
4101}
4102
4103/**
4104 * VD async I/O interface callback for a asynchronous read from the file.
4105 */
4106static DECLCALLBACK(int) vdIOReadAsyncFallback(void *pvUser, void *pStorage, uint64_t uOffset,
4107 PCRTSGSEG paSegments, size_t cSegments,
4108 size_t cbRead, void *pvCompletion,
4109 void **ppTask)
4110{
4111 RT_NOREF8(pvUser, pStorage, uOffset, paSegments, cSegments, cbRead, pvCompletion, ppTask);
4112 return VERR_NOT_IMPLEMENTED;
4113}
4114
4115/**
4116 * VD async I/O interface callback for a asynchronous write to the file.
4117 */
4118static DECLCALLBACK(int) vdIOWriteAsyncFallback(void *pvUser, void *pStorage, uint64_t uOffset,
4119 PCRTSGSEG paSegments, size_t cSegments,
4120 size_t cbWrite, void *pvCompletion,
4121 void **ppTask)
4122{
4123 RT_NOREF8(pvUser, pStorage, uOffset, paSegments, cSegments, cbWrite, pvCompletion, ppTask);
4124 return VERR_NOT_IMPLEMENTED;
4125}
4126
4127/**
4128 * VD async I/O interface callback for a asynchronous flush of the file data.
4129 */
4130static DECLCALLBACK(int) vdIOFlushAsyncFallback(void *pvUser, void *pStorage,
4131 void *pvCompletion, void **ppTask)
4132{
4133 RT_NOREF4(pvUser, pStorage, pvCompletion, ppTask);
4134 return VERR_NOT_IMPLEMENTED;
4135}
4136
4137/**
4138 * Internal - Continues an I/O context after
4139 * it was halted because of an active transfer.
4140 */
4141static int vdIoCtxContinue(PVDIOCTX pIoCtx, int rcReq)
4142{
4143 PVBOXHDD pDisk = pIoCtx->pDisk;
4144 int rc = VINF_SUCCESS;
4145
4146 VD_IS_LOCKED(pDisk);
4147
4148 if (RT_FAILURE(rcReq))
4149 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rcReq, VINF_SUCCESS);
4150
4151 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_BLOCKED))
4152 {
4153 /* Continue the transfer */
4154 rc = vdIoCtxProcessLocked(pIoCtx);
4155
4156 if ( rc == VINF_VD_ASYNC_IO_FINISHED
4157 && ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
4158 {
4159 LogFlowFunc(("I/O context completed pIoCtx=%#p\n", pIoCtx));
4160 if (pIoCtx->pIoCtxParent)
4161 {
4162 PVDIOCTX pIoCtxParent = pIoCtx->pIoCtxParent;
4163
4164 Assert(!pIoCtxParent->pIoCtxParent);
4165 if (RT_FAILURE(pIoCtx->rcReq))
4166 ASMAtomicCmpXchgS32(&pIoCtxParent->rcReq, pIoCtx->rcReq, VINF_SUCCESS);
4167
4168 ASMAtomicDecU32(&pIoCtxParent->cDataTransfersPending);
4169
4170 if (pIoCtx->enmTxDir == VDIOCTXTXDIR_WRITE)
4171 {
4172 LogFlowFunc(("I/O context transferred %u bytes for the parent pIoCtxParent=%p\n",
4173 pIoCtx->Type.Child.cbTransferParent, pIoCtxParent));
4174
4175 /* Update the parent state. */
4176 Assert(pIoCtxParent->Req.Io.cbTransferLeft >= pIoCtx->Type.Child.cbTransferParent);
4177 ASMAtomicSubU32(&pIoCtxParent->Req.Io.cbTransferLeft, (uint32_t)pIoCtx->Type.Child.cbTransferParent);
4178 }
4179 else
4180 Assert(pIoCtx->enmTxDir == VDIOCTXTXDIR_FLUSH);
4181
4182 /*
4183 * A completed child write means that we finished growing the image.
4184 * We have to process any pending writes now.
4185 */
4186 vdIoCtxUnlockDisk(pDisk, pIoCtxParent, false /* fProcessDeferredReqs */);
4187
4188 /* Unblock the parent */
4189 pIoCtxParent->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
4190
4191 rc = vdIoCtxProcessLocked(pIoCtxParent);
4192
4193 if ( rc == VINF_VD_ASYNC_IO_FINISHED
4194 && ASMAtomicCmpXchgBool(&pIoCtxParent->fComplete, true, false))
4195 {
4196 LogFlowFunc(("Parent I/O context completed pIoCtxParent=%#p rcReq=%Rrc\n", pIoCtxParent, pIoCtxParent->rcReq));
4197 vdIoCtxRootComplete(pDisk, pIoCtxParent);
4198 vdThreadFinishWrite(pDisk);
4199 vdIoCtxFree(pDisk, pIoCtxParent);
4200 vdDiskProcessBlockedIoCtx(pDisk);
4201 }
4202 else if (!vdIoCtxIsDiskLockOwner(pDisk, pIoCtx))
4203 {
4204 /* Process any pending writes if the current request didn't caused another growing. */
4205 vdDiskProcessBlockedIoCtx(pDisk);
4206 }
4207 }
4208 else
4209 {
4210 if (pIoCtx->enmTxDir == VDIOCTXTXDIR_FLUSH)
4211 {
4212 vdIoCtxUnlockDisk(pDisk, pIoCtx, true /* fProcessDerredReqs */);
4213 vdThreadFinishWrite(pDisk);
4214 }
4215 else if ( pIoCtx->enmTxDir == VDIOCTXTXDIR_WRITE
4216 || pIoCtx->enmTxDir == VDIOCTXTXDIR_DISCARD)
4217 vdThreadFinishWrite(pDisk);
4218 else
4219 {
4220 Assert(pIoCtx->enmTxDir == VDIOCTXTXDIR_READ);
4221 vdThreadFinishRead(pDisk);
4222 }
4223
4224 LogFlowFunc(("I/O context completed pIoCtx=%#p rcReq=%Rrc\n", pIoCtx, pIoCtx->rcReq));
4225 vdIoCtxRootComplete(pDisk, pIoCtx);
4226 }
4227
4228 vdIoCtxFree(pDisk, pIoCtx);
4229 }
4230 }
4231
4232 return VINF_SUCCESS;
4233}
4234
4235/**
4236 * Internal - Called when user transfer completed.
4237 */
4238static int vdUserXferCompleted(PVDIOSTORAGE pIoStorage, PVDIOCTX pIoCtx,
4239 PFNVDXFERCOMPLETED pfnComplete, void *pvUser,
4240 size_t cbTransfer, int rcReq)
4241{
4242 int rc = VINF_SUCCESS;
4243 PVBOXHDD pDisk = pIoCtx->pDisk;
4244
4245 LogFlowFunc(("pIoStorage=%#p pIoCtx=%#p pfnComplete=%#p pvUser=%#p cbTransfer=%zu rcReq=%Rrc\n",
4246 pIoStorage, pIoCtx, pfnComplete, pvUser, cbTransfer, rcReq));
4247
4248 VD_IS_LOCKED(pDisk);
4249
4250 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbTransfer);
4251 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTransfer); Assert(cbTransfer == (uint32_t)cbTransfer);
4252 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4253
4254 if (pfnComplete)
4255 rc = pfnComplete(pIoStorage->pVDIo->pBackendData, pIoCtx, pvUser, rcReq);
4256
4257 if (RT_SUCCESS(rc))
4258 rc = vdIoCtxContinue(pIoCtx, rcReq);
4259 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4260 rc = VINF_SUCCESS;
4261
4262 return rc;
4263}
4264
4265static void vdIoCtxContinueDeferredList(PVDIOSTORAGE pIoStorage, PRTLISTANCHOR pListWaiting,
4266 PFNVDXFERCOMPLETED pfnComplete, void *pvUser, int rcReq)
4267{
4268 LogFlowFunc(("pIoStorage=%#p pListWaiting=%#p pfnComplete=%#p pvUser=%#p rcReq=%Rrc\n",
4269 pIoStorage, pListWaiting, pfnComplete, pvUser, rcReq));
4270
4271 /* Go through the waiting list and continue the I/O contexts. */
4272 while (!RTListIsEmpty(pListWaiting))
4273 {
4274 int rc = VINF_SUCCESS;
4275 PVDIOCTXDEFERRED pDeferred = RTListGetFirst(pListWaiting, VDIOCTXDEFERRED, NodeDeferred);
4276 PVDIOCTX pIoCtx = pDeferred->pIoCtx;
4277 RTListNodeRemove(&pDeferred->NodeDeferred);
4278
4279 RTMemFree(pDeferred);
4280 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
4281
4282 if (pfnComplete)
4283 rc = pfnComplete(pIoStorage->pVDIo->pBackendData, pIoCtx, pvUser, rcReq);
4284
4285 LogFlow(("Completion callback for I/O context %#p returned %Rrc\n", pIoCtx, rc));
4286
4287 if (RT_SUCCESS(rc))
4288 {
4289 rc = vdIoCtxContinue(pIoCtx, rcReq);
4290 AssertRC(rc);
4291 }
4292 else
4293 Assert(rc == VERR_VD_ASYNC_IO_IN_PROGRESS);
4294 }
4295}
4296
4297/**
4298 * Internal - Called when a meta transfer completed.
4299 */
4300static int vdMetaXferCompleted(PVDIOSTORAGE pIoStorage, PFNVDXFERCOMPLETED pfnComplete, void *pvUser,
4301 PVDMETAXFER pMetaXfer, int rcReq)
4302{
4303 PVBOXHDD pDisk = pIoStorage->pVDIo->pDisk;
4304 RTLISTNODE ListIoCtxWaiting;
4305 bool fFlush;
4306
4307 LogFlowFunc(("pIoStorage=%#p pfnComplete=%#p pvUser=%#p pMetaXfer=%#p rcReq=%Rrc\n",
4308 pIoStorage, pfnComplete, pvUser, pMetaXfer, rcReq));
4309
4310 VD_IS_LOCKED(pDisk);
4311
4312 fFlush = VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_FLUSH;
4313
4314 if (!fFlush)
4315 {
4316 RTListMove(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxWaiting);
4317
4318 if (RT_FAILURE(rcReq))
4319 {
4320 /* Remove from the AVL tree. */
4321 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
4322 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
4323 Assert(fRemoved); NOREF(fRemoved);
4324 /* If this was a write check if there is a shadow buffer with updated data. */
4325 if (pMetaXfer->pbDataShw)
4326 {
4327 Assert(VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
4328 Assert(!RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites));
4329 RTListConcatenate(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxShwWrites);
4330 RTMemFree(pMetaXfer->pbDataShw);
4331 pMetaXfer->pbDataShw = NULL;
4332 }
4333 RTMemFree(pMetaXfer);
4334 }
4335 else
4336 {
4337 /* Increase the reference counter to make sure it doesn't go away before the last context is processed. */
4338 pMetaXfer->cRefs++;
4339 }
4340 }
4341 else
4342 RTListMove(&ListIoCtxWaiting, &pMetaXfer->ListIoCtxWaiting);
4343
4344 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
4345 vdIoCtxContinueDeferredList(pIoStorage, &ListIoCtxWaiting, pfnComplete, pvUser, rcReq);
4346
4347 /*
4348 * If there is a shadow buffer and the previous write was successful update with the
4349 * new data and trigger a new write.
4350 */
4351 if ( pMetaXfer->pbDataShw
4352 && RT_SUCCESS(rcReq)
4353 && VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
4354 {
4355 LogFlowFunc(("pMetaXfer=%#p Updating from shadow buffer and triggering new write\n", pMetaXfer));
4356 memcpy(pMetaXfer->abData, pMetaXfer->pbDataShw, pMetaXfer->cbMeta);
4357 RTMemFree(pMetaXfer->pbDataShw);
4358 pMetaXfer->pbDataShw = NULL;
4359 Assert(!RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites));
4360
4361 /* Setup a new I/O write. */
4362 PVDIOTASK pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvUser, pMetaXfer);
4363 if (RT_LIKELY(pIoTask))
4364 {
4365 void *pvTask = NULL;
4366 RTSGSEG Seg;
4367
4368 Seg.cbSeg = pMetaXfer->cbMeta;
4369 Seg.pvSeg = pMetaXfer->abData;
4370
4371 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_WRITE);
4372 rcReq = pIoStorage->pVDIo->pInterfaceIo->pfnWriteAsync(pIoStorage->pVDIo->pInterfaceIo->Core.pvUser,
4373 pIoStorage->pStorage,
4374 pMetaXfer->Core.Key, &Seg, 1,
4375 pMetaXfer->cbMeta, pIoTask,
4376 &pvTask);
4377 if ( RT_SUCCESS(rcReq)
4378 || rcReq != VERR_VD_ASYNC_IO_IN_PROGRESS)
4379 {
4380 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
4381 vdIoTaskFree(pDisk, pIoTask);
4382 }
4383 else
4384 RTListMove(&pMetaXfer->ListIoCtxWaiting, &pMetaXfer->ListIoCtxShwWrites);
4385 }
4386 else
4387 rcReq = VERR_NO_MEMORY;
4388
4389 /* Cleanup if there was an error or the request completed already. */
4390 if (rcReq != VERR_VD_ASYNC_IO_IN_PROGRESS)
4391 vdIoCtxContinueDeferredList(pIoStorage, &pMetaXfer->ListIoCtxShwWrites, pfnComplete, pvUser, rcReq);
4392 }
4393
4394 /* Remove if not used anymore. */
4395 if (!fFlush)
4396 {
4397 pMetaXfer->cRefs--;
4398 if (!pMetaXfer->cRefs && RTListIsEmpty(&pMetaXfer->ListIoCtxWaiting))
4399 {
4400 /* Remove from the AVL tree. */
4401 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
4402 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
4403 Assert(fRemoved); NOREF(fRemoved);
4404 RTMemFree(pMetaXfer);
4405 }
4406 }
4407 else if (fFlush)
4408 RTMemFree(pMetaXfer);
4409
4410 return VINF_SUCCESS;
4411}
4412
4413/**
4414 * Processes a list of waiting I/O tasks. The disk lock must be held by caller.
4415 *
4416 * @returns nothing.
4417 * @param pDisk The disk to process the list for.
4418 */
4419static void vdIoTaskProcessWaitingList(PVBOXHDD pDisk)
4420{
4421 LogFlowFunc(("pDisk=%#p\n", pDisk));
4422
4423 VD_IS_LOCKED(pDisk);
4424
4425 PVDIOTASK pHead = ASMAtomicXchgPtrT(&pDisk->pIoTasksPendingHead, NULL, PVDIOTASK);
4426
4427 Log(("I/O task list cleared\n"));
4428
4429 /* Reverse order. */
4430 PVDIOTASK pCur = pHead;
4431 pHead = NULL;
4432 while (pCur)
4433 {
4434 PVDIOTASK pInsert = pCur;
4435 pCur = pCur->pNext;
4436 pInsert->pNext = pHead;
4437 pHead = pInsert;
4438 }
4439
4440 while (pHead)
4441 {
4442 PVDIOSTORAGE pIoStorage = pHead->pIoStorage;
4443
4444 if (!pHead->fMeta)
4445 vdUserXferCompleted(pIoStorage, pHead->Type.User.pIoCtx,
4446 pHead->pfnComplete, pHead->pvUser,
4447 pHead->Type.User.cbTransfer, pHead->rcReq);
4448 else
4449 vdMetaXferCompleted(pIoStorage, pHead->pfnComplete, pHead->pvUser,
4450 pHead->Type.Meta.pMetaXfer, pHead->rcReq);
4451
4452 pCur = pHead;
4453 pHead = pHead->pNext;
4454 vdIoTaskFree(pDisk, pCur);
4455 }
4456}
4457
4458/**
4459 * Process any I/O context on the halted list.
4460 *
4461 * @returns nothing.
4462 * @param pDisk The disk.
4463 */
4464static void vdIoCtxProcessHaltedList(PVBOXHDD pDisk)
4465{
4466 LogFlowFunc(("pDisk=%#p\n", pDisk));
4467
4468 VD_IS_LOCKED(pDisk);
4469
4470 /* Get the waiting list and process it in FIFO order. */
4471 PVDIOCTX pIoCtxHead = ASMAtomicXchgPtrT(&pDisk->pIoCtxHaltedHead, NULL, PVDIOCTX);
4472
4473 /* Reverse it. */
4474 PVDIOCTX pCur = pIoCtxHead;
4475 pIoCtxHead = NULL;
4476 while (pCur)
4477 {
4478 PVDIOCTX pInsert = pCur;
4479 pCur = pCur->pIoCtxNext;
4480 pInsert->pIoCtxNext = pIoCtxHead;
4481 pIoCtxHead = pInsert;
4482 }
4483
4484 /* Process now. */
4485 pCur = pIoCtxHead;
4486 while (pCur)
4487 {
4488 PVDIOCTX pTmp = pCur;
4489
4490 pCur = pCur->pIoCtxNext;
4491 pTmp->pIoCtxNext = NULL;
4492
4493 /* Continue */
4494 pTmp->fFlags &= ~VDIOCTX_FLAGS_BLOCKED;
4495 vdIoCtxContinue(pTmp, pTmp->rcReq);
4496 }
4497}
4498
4499/**
4500 * Unlock the disk and process pending tasks.
4501 *
4502 * @returns VBox status code.
4503 * @param pDisk The disk to unlock.
4504 * @param pIoCtxRc The I/O context to get the status code from, optional.
4505 */
4506static int vdDiskUnlock(PVBOXHDD pDisk, PVDIOCTX pIoCtxRc)
4507{
4508 int rc = VINF_SUCCESS;
4509
4510 VD_IS_LOCKED(pDisk);
4511
4512 /*
4513 * Process the list of waiting I/O tasks first
4514 * because they might complete I/O contexts.
4515 * Same for the list of halted I/O contexts.
4516 * Afterwards comes the list of new I/O contexts.
4517 */
4518 vdIoTaskProcessWaitingList(pDisk);
4519 vdIoCtxProcessHaltedList(pDisk);
4520 rc = vdDiskProcessWaitingIoCtx(pDisk, pIoCtxRc);
4521 ASMAtomicXchgBool(&pDisk->fLocked, false);
4522
4523 /*
4524 * Need to check for new I/O tasks and waiting I/O contexts now
4525 * again as other threads might added them while we processed
4526 * previous lists.
4527 */
4528 while ( ASMAtomicUoReadPtrT(&pDisk->pIoCtxHead, PVDIOCTX) != NULL
4529 || ASMAtomicUoReadPtrT(&pDisk->pIoTasksPendingHead, PVDIOTASK) != NULL
4530 || ASMAtomicUoReadPtrT(&pDisk->pIoCtxHaltedHead, PVDIOCTX) != NULL)
4531 {
4532 /* Try lock disk again. */
4533 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
4534 {
4535 vdIoTaskProcessWaitingList(pDisk);
4536 vdIoCtxProcessHaltedList(pDisk);
4537 vdDiskProcessWaitingIoCtx(pDisk, NULL);
4538 ASMAtomicXchgBool(&pDisk->fLocked, false);
4539 }
4540 else /* Let the other thread everything when he unlocks the disk. */
4541 break;
4542 }
4543
4544 return rc;
4545}
4546
4547/**
4548 * Try to lock the disk to complete pressing of the I/O task.
4549 * The completion is deferred if the disk is locked already.
4550 *
4551 * @returns nothing.
4552 * @param pIoTask The I/O task to complete.
4553 */
4554static void vdXferTryLockDiskDeferIoTask(PVDIOTASK pIoTask)
4555{
4556 PVDIOSTORAGE pIoStorage = pIoTask->pIoStorage;
4557 PVBOXHDD pDisk = pIoStorage->pVDIo->pDisk;
4558
4559 Log(("Deferring I/O task pIoTask=%p\n", pIoTask));
4560
4561 /* Put it on the waiting list. */
4562 PVDIOTASK pNext = ASMAtomicUoReadPtrT(&pDisk->pIoTasksPendingHead, PVDIOTASK);
4563 PVDIOTASK pHeadOld;
4564 pIoTask->pNext = pNext;
4565 while (!ASMAtomicCmpXchgExPtr(&pDisk->pIoTasksPendingHead, pIoTask, pNext, &pHeadOld))
4566 {
4567 pNext = pHeadOld;
4568 Assert(pNext != pIoTask);
4569 pIoTask->pNext = pNext;
4570 ASMNopPause();
4571 }
4572
4573 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
4574 {
4575 /* Release disk lock, it will take care of processing all lists. */
4576 vdDiskUnlock(pDisk, NULL);
4577 }
4578}
4579
4580static DECLCALLBACK(int) vdIOIntReqCompleted(void *pvUser, int rcReq)
4581{
4582 PVDIOTASK pIoTask = (PVDIOTASK)pvUser;
4583
4584 LogFlowFunc(("Task completed pIoTask=%#p\n", pIoTask));
4585
4586 pIoTask->rcReq = rcReq;
4587 vdXferTryLockDiskDeferIoTask(pIoTask);
4588 return VINF_SUCCESS;
4589}
4590
4591/**
4592 * VD I/O interface callback for opening a file.
4593 */
4594static DECLCALLBACK(int) vdIOIntOpen(void *pvUser, const char *pszLocation,
4595 unsigned uOpenFlags, PPVDIOSTORAGE ppIoStorage)
4596{
4597 int rc = VINF_SUCCESS;
4598 PVDIO pVDIo = (PVDIO)pvUser;
4599 PVDIOSTORAGE pIoStorage = (PVDIOSTORAGE)RTMemAllocZ(sizeof(VDIOSTORAGE));
4600
4601 if (!pIoStorage)
4602 return VERR_NO_MEMORY;
4603
4604 /* Create the AVl tree. */
4605 pIoStorage->pTreeMetaXfers = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE));
4606 if (pIoStorage->pTreeMetaXfers)
4607 {
4608 rc = pVDIo->pInterfaceIo->pfnOpen(pVDIo->pInterfaceIo->Core.pvUser,
4609 pszLocation, uOpenFlags,
4610 vdIOIntReqCompleted,
4611 &pIoStorage->pStorage);
4612 if (RT_SUCCESS(rc))
4613 {
4614 pIoStorage->pVDIo = pVDIo;
4615 *ppIoStorage = pIoStorage;
4616 return VINF_SUCCESS;
4617 }
4618
4619 RTMemFree(pIoStorage->pTreeMetaXfers);
4620 }
4621 else
4622 rc = VERR_NO_MEMORY;
4623
4624 RTMemFree(pIoStorage);
4625 return rc;
4626}
4627
4628static DECLCALLBACK(int) vdIOIntTreeMetaXferDestroy(PAVLRFOFFNODECORE pNode, void *pvUser)
4629{
4630 RT_NOREF2(pNode, pvUser);
4631 AssertMsgFailed(("Tree should be empty at this point!\n"));
4632 return VINF_SUCCESS;
4633}
4634
4635static DECLCALLBACK(int) vdIOIntClose(void *pvUser, PVDIOSTORAGE pIoStorage)
4636{
4637 int rc = VINF_SUCCESS;
4638 PVDIO pVDIo = (PVDIO)pvUser;
4639
4640 /* We free everything here, even if closing the file failed for some reason. */
4641 rc = pVDIo->pInterfaceIo->pfnClose(pVDIo->pInterfaceIo->Core.pvUser, pIoStorage->pStorage);
4642 RTAvlrFileOffsetDestroy(pIoStorage->pTreeMetaXfers, vdIOIntTreeMetaXferDestroy, NULL);
4643 RTMemFree(pIoStorage->pTreeMetaXfers);
4644 RTMemFree(pIoStorage);
4645 return rc;
4646}
4647
4648static DECLCALLBACK(int) vdIOIntDelete(void *pvUser, const char *pcszFilename)
4649{
4650 PVDIO pVDIo = (PVDIO)pvUser;
4651 return pVDIo->pInterfaceIo->pfnDelete(pVDIo->pInterfaceIo->Core.pvUser,
4652 pcszFilename);
4653}
4654
4655static DECLCALLBACK(int) vdIOIntMove(void *pvUser, const char *pcszSrc, const char *pcszDst,
4656 unsigned fMove)
4657{
4658 PVDIO pVDIo = (PVDIO)pvUser;
4659 return pVDIo->pInterfaceIo->pfnMove(pVDIo->pInterfaceIo->Core.pvUser,
4660 pcszSrc, pcszDst, fMove);
4661}
4662
4663static DECLCALLBACK(int) vdIOIntGetFreeSpace(void *pvUser, const char *pcszFilename,
4664 int64_t *pcbFreeSpace)
4665{
4666 PVDIO pVDIo = (PVDIO)pvUser;
4667 return pVDIo->pInterfaceIo->pfnGetFreeSpace(pVDIo->pInterfaceIo->Core.pvUser,
4668 pcszFilename, pcbFreeSpace);
4669}
4670
4671static DECLCALLBACK(int) vdIOIntGetModificationTime(void *pvUser, const char *pcszFilename,
4672 PRTTIMESPEC pModificationTime)
4673{
4674 PVDIO pVDIo = (PVDIO)pvUser;
4675 return pVDIo->pInterfaceIo->pfnGetModificationTime(pVDIo->pInterfaceIo->Core.pvUser,
4676 pcszFilename, pModificationTime);
4677}
4678
4679static DECLCALLBACK(int) vdIOIntGetSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4680 uint64_t *pcbSize)
4681{
4682 PVDIO pVDIo = (PVDIO)pvUser;
4683 return pVDIo->pInterfaceIo->pfnGetSize(pVDIo->pInterfaceIo->Core.pvUser,
4684 pIoStorage->pStorage, pcbSize);
4685}
4686
4687static DECLCALLBACK(int) vdIOIntSetSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4688 uint64_t cbSize)
4689{
4690 PVDIO pVDIo = (PVDIO)pvUser;
4691 return pVDIo->pInterfaceIo->pfnSetSize(pVDIo->pInterfaceIo->Core.pvUser,
4692 pIoStorage->pStorage, cbSize);
4693}
4694
4695static DECLCALLBACK(int) vdIOIntSetAllocationSize(void *pvUser, PVDIOSTORAGE pIoStorage,
4696 uint64_t cbSize, uint32_t fFlags,
4697 PVDINTERFACEPROGRESS pIfProgress,
4698 unsigned uPercentStart, unsigned uPercentSpan)
4699{
4700 PVDIO pVDIo = (PVDIO)pvUser;
4701 int rc = pVDIo->pInterfaceIo->pfnSetAllocationSize(pVDIo->pInterfaceIo->Core.pvUser,
4702 pIoStorage->pStorage, cbSize, fFlags);
4703 if (rc == VERR_NOT_SUPPORTED)
4704 {
4705 /* Fallback if the underlying medium does not support optimized storage allocation. */
4706 uint64_t cbSizeCur = 0;
4707 rc = pVDIo->pInterfaceIo->pfnGetSize(pVDIo->pInterfaceIo->Core.pvUser,
4708 pIoStorage->pStorage, &cbSizeCur);
4709 if (RT_SUCCESS(rc))
4710 {
4711 if (cbSizeCur < cbSize)
4712 {
4713 const size_t cbBuf = 128 * _1K;
4714 void *pvBuf = RTMemTmpAllocZ(cbBuf);
4715 if (RT_LIKELY(pvBuf))
4716 {
4717 uint64_t cbFill = cbSize - cbSizeCur;
4718 uint64_t uOff = 0;
4719
4720 /* Write data to all blocks. */
4721 while ( uOff < cbFill
4722 && RT_SUCCESS(rc))
4723 {
4724 size_t cbChunk = (size_t)RT_MIN(cbFill - uOff, cbBuf);
4725
4726 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
4727 pIoStorage->pStorage, cbSizeCur + uOff,
4728 pvBuf, cbChunk, NULL);
4729 if (RT_SUCCESS(rc))
4730 {
4731 uOff += cbChunk;
4732
4733 rc = vdIfProgress(pIfProgress, uPercentStart + uOff * uPercentSpan / cbFill);
4734 }
4735 }
4736
4737 RTMemTmpFree(pvBuf);
4738 }
4739 else
4740 rc = VERR_NO_MEMORY;
4741 }
4742 else if (cbSizeCur > cbSize)
4743 rc = pVDIo->pInterfaceIo->pfnSetSize(pVDIo->pInterfaceIo->Core.pvUser,
4744 pIoStorage->pStorage, cbSize);
4745 }
4746 }
4747
4748 if (RT_SUCCESS(rc))
4749 rc = vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
4750
4751 return rc;
4752}
4753
4754static DECLCALLBACK(int) vdIOIntReadUser(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4755 PVDIOCTX pIoCtx, size_t cbRead)
4756{
4757 int rc = VINF_SUCCESS;
4758 PVDIO pVDIo = (PVDIO)pvUser;
4759 PVBOXHDD pDisk = pVDIo->pDisk;
4760
4761 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pIoCtx=%#p cbRead=%u\n",
4762 pvUser, pIoStorage, uOffset, pIoCtx, cbRead));
4763
4764 /** @todo Enable check for sync I/O later. */
4765 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4766 VD_IS_LOCKED(pDisk);
4767
4768 Assert(cbRead > 0);
4769
4770 if (pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4771 {
4772 RTSGSEG Seg;
4773 unsigned cSegments = 1;
4774 size_t cbTaskRead = 0;
4775
4776 /* Synchronous I/O contexts only have one buffer segment. */
4777 AssertMsgReturn(pIoCtx->Req.Io.SgBuf.cSegs == 1,
4778 ("Invalid number of buffer segments for synchronous I/O context"),
4779 VERR_INVALID_PARAMETER);
4780
4781 cbTaskRead = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, &Seg, &cSegments, cbRead);
4782 Assert(cbRead == cbTaskRead);
4783 Assert(cSegments == 1);
4784 rc = pVDIo->pInterfaceIo->pfnReadSync(pVDIo->pInterfaceIo->Core.pvUser,
4785 pIoStorage->pStorage, uOffset,
4786 Seg.pvSeg, cbRead, NULL);
4787 if (RT_SUCCESS(rc))
4788 {
4789 Assert(cbRead == (uint32_t)cbRead);
4790 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbRead);
4791 }
4792 }
4793 else
4794 {
4795 /* Build the S/G array and spawn a new I/O task */
4796 while (cbRead)
4797 {
4798 RTSGSEG aSeg[VD_IO_TASK_SEGMENTS_MAX];
4799 unsigned cSegments = VD_IO_TASK_SEGMENTS_MAX;
4800 size_t cbTaskRead = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, aSeg, &cSegments, cbRead);
4801
4802 Assert(cSegments > 0);
4803 Assert(cbTaskRead > 0);
4804 AssertMsg(cbTaskRead <= cbRead, ("Invalid number of bytes to read\n"));
4805
4806 LogFlow(("Reading %u bytes into %u segments\n", cbTaskRead, cSegments));
4807
4808#ifdef RT_STRICT
4809 for (unsigned i = 0; i < cSegments; i++)
4810 AssertMsg(aSeg[i].pvSeg && !(aSeg[i].cbSeg % 512),
4811 ("Segment %u is invalid\n", i));
4812#endif
4813
4814 Assert(cbTaskRead == (uint32_t)cbTaskRead);
4815 PVDIOTASK pIoTask = vdIoTaskUserAlloc(pIoStorage, NULL, NULL, pIoCtx, (uint32_t)cbTaskRead);
4816
4817 if (!pIoTask)
4818 return VERR_NO_MEMORY;
4819
4820 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
4821
4822 void *pvTask;
4823 Log(("Spawning pIoTask=%p pIoCtx=%p\n", pIoTask, pIoCtx));
4824 rc = pVDIo->pInterfaceIo->pfnReadAsync(pVDIo->pInterfaceIo->Core.pvUser,
4825 pIoStorage->pStorage, uOffset,
4826 aSeg, cSegments, cbTaskRead, pIoTask,
4827 &pvTask);
4828 if (RT_SUCCESS(rc))
4829 {
4830 AssertMsg(cbTaskRead <= pIoCtx->Req.Io.cbTransferLeft, ("Impossible!\n"));
4831 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTaskRead);
4832 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4833 vdIoTaskFree(pDisk, pIoTask);
4834 }
4835 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
4836 {
4837 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4838 vdIoTaskFree(pDisk, pIoTask);
4839 break;
4840 }
4841
4842 uOffset += cbTaskRead;
4843 cbRead -= cbTaskRead;
4844 }
4845 }
4846
4847 LogFlowFunc(("returns rc=%Rrc\n", rc));
4848 return rc;
4849}
4850
4851static DECLCALLBACK(int) vdIOIntWriteUser(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4852 PVDIOCTX pIoCtx, size_t cbWrite, PFNVDXFERCOMPLETED pfnComplete,
4853 void *pvCompleteUser)
4854{
4855 int rc = VINF_SUCCESS;
4856 PVDIO pVDIo = (PVDIO)pvUser;
4857 PVBOXHDD pDisk = pVDIo->pDisk;
4858
4859 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pIoCtx=%#p cbWrite=%u\n",
4860 pvUser, pIoStorage, uOffset, pIoCtx, cbWrite));
4861
4862 /** @todo Enable check for sync I/O later. */
4863 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4864 VD_IS_LOCKED(pDisk);
4865
4866 Assert(cbWrite > 0);
4867
4868 if (pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4869 {
4870 RTSGSEG Seg;
4871 unsigned cSegments = 1;
4872 size_t cbTaskWrite = 0;
4873
4874 /* Synchronous I/O contexts only have one buffer segment. */
4875 AssertMsgReturn(pIoCtx->Req.Io.SgBuf.cSegs == 1,
4876 ("Invalid number of buffer segments for synchronous I/O context"),
4877 VERR_INVALID_PARAMETER);
4878
4879 cbTaskWrite = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, &Seg, &cSegments, cbWrite);
4880 Assert(cbWrite == cbTaskWrite);
4881 Assert(cSegments == 1);
4882 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
4883 pIoStorage->pStorage, uOffset,
4884 Seg.pvSeg, cbWrite, NULL);
4885 if (RT_SUCCESS(rc))
4886 {
4887 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbWrite);
4888 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbWrite);
4889 }
4890 }
4891 else
4892 {
4893 /* Build the S/G array and spawn a new I/O task */
4894 while (cbWrite)
4895 {
4896 RTSGSEG aSeg[VD_IO_TASK_SEGMENTS_MAX];
4897 unsigned cSegments = VD_IO_TASK_SEGMENTS_MAX;
4898 size_t cbTaskWrite = 0;
4899
4900 cbTaskWrite = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, aSeg, &cSegments, cbWrite);
4901
4902 Assert(cSegments > 0);
4903 Assert(cbTaskWrite > 0);
4904 AssertMsg(cbTaskWrite <= cbWrite, ("Invalid number of bytes to write\n"));
4905
4906 LogFlow(("Writing %u bytes from %u segments\n", cbTaskWrite, cSegments));
4907
4908#ifdef DEBUG
4909 for (unsigned i = 0; i < cSegments; i++)
4910 AssertMsg(aSeg[i].pvSeg && !(aSeg[i].cbSeg % 512),
4911 ("Segment %u is invalid\n", i));
4912#endif
4913
4914 Assert(cbTaskWrite == (uint32_t)cbTaskWrite);
4915 PVDIOTASK pIoTask = vdIoTaskUserAlloc(pIoStorage, pfnComplete, pvCompleteUser, pIoCtx, (uint32_t)cbTaskWrite);
4916
4917 if (!pIoTask)
4918 return VERR_NO_MEMORY;
4919
4920 ASMAtomicIncU32(&pIoCtx->cDataTransfersPending);
4921
4922 void *pvTask;
4923 Log(("Spawning pIoTask=%p pIoCtx=%p\n", pIoTask, pIoCtx));
4924 rc = pVDIo->pInterfaceIo->pfnWriteAsync(pVDIo->pInterfaceIo->Core.pvUser,
4925 pIoStorage->pStorage,
4926 uOffset, aSeg, cSegments,
4927 cbTaskWrite, pIoTask, &pvTask);
4928 if (RT_SUCCESS(rc))
4929 {
4930 AssertMsg(cbTaskWrite <= pIoCtx->Req.Io.cbTransferLeft, ("Impossible!\n"));
4931 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbTaskWrite);
4932 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4933 vdIoTaskFree(pDisk, pIoTask);
4934 }
4935 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
4936 {
4937 ASMAtomicDecU32(&pIoCtx->cDataTransfersPending);
4938 vdIoTaskFree(pDisk, pIoTask);
4939 break;
4940 }
4941
4942 uOffset += cbTaskWrite;
4943 cbWrite -= cbTaskWrite;
4944 }
4945 }
4946
4947 LogFlowFunc(("returns rc=%Rrc\n", rc));
4948 return rc;
4949}
4950
4951static DECLCALLBACK(int) vdIOIntReadMeta(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
4952 void *pvBuf, size_t cbRead, PVDIOCTX pIoCtx,
4953 PPVDMETAXFER ppMetaXfer, PFNVDXFERCOMPLETED pfnComplete,
4954 void *pvCompleteUser)
4955{
4956 PVDIO pVDIo = (PVDIO)pvUser;
4957 PVBOXHDD pDisk = pVDIo->pDisk;
4958 int rc = VINF_SUCCESS;
4959 RTSGSEG Seg;
4960 PVDIOTASK pIoTask;
4961 PVDMETAXFER pMetaXfer = NULL;
4962 void *pvTask = NULL;
4963
4964 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pvBuf=%#p cbRead=%u\n",
4965 pvUser, pIoStorage, uOffset, pvBuf, cbRead));
4966
4967 AssertMsgReturn( pIoCtx
4968 || (!ppMetaXfer && !pfnComplete && !pvCompleteUser),
4969 ("A synchronous metadata read is requested but the parameters are wrong\n"),
4970 VERR_INVALID_POINTER);
4971
4972 /** @todo Enable check for sync I/O later. */
4973 if ( pIoCtx
4974 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
4975 VD_IS_LOCKED(pDisk);
4976
4977 if ( !pIoCtx
4978 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
4979 {
4980 /* Handle synchronous metadata I/O. */
4981 /** @todo Integrate with metadata transfers below. */
4982 rc = pVDIo->pInterfaceIo->pfnReadSync(pVDIo->pInterfaceIo->Core.pvUser,
4983 pIoStorage->pStorage, uOffset,
4984 pvBuf, cbRead, NULL);
4985 if (ppMetaXfer)
4986 *ppMetaXfer = NULL;
4987 }
4988 else
4989 {
4990 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGet(pIoStorage->pTreeMetaXfers, uOffset);
4991 if (!pMetaXfer)
4992 {
4993#ifdef RT_STRICT
4994 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGetBestFit(pIoStorage->pTreeMetaXfers, uOffset, false /* fAbove */);
4995 AssertMsg(!pMetaXfer || (pMetaXfer->Core.Key + (RTFOFF)pMetaXfer->cbMeta <= (RTFOFF)uOffset),
4996 ("Overlapping meta transfers!\n"));
4997#endif
4998
4999 /* Allocate a new meta transfer. */
5000 pMetaXfer = vdMetaXferAlloc(pIoStorage, uOffset, cbRead);
5001 if (!pMetaXfer)
5002 return VERR_NO_MEMORY;
5003
5004 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvCompleteUser, pMetaXfer);
5005 if (!pIoTask)
5006 {
5007 RTMemFree(pMetaXfer);
5008 return VERR_NO_MEMORY;
5009 }
5010
5011 Seg.cbSeg = cbRead;
5012 Seg.pvSeg = pMetaXfer->abData;
5013
5014 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_READ);
5015 rc = pVDIo->pInterfaceIo->pfnReadAsync(pVDIo->pInterfaceIo->Core.pvUser,
5016 pIoStorage->pStorage,
5017 uOffset, &Seg, 1,
5018 cbRead, pIoTask, &pvTask);
5019
5020 if (RT_SUCCESS(rc) || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5021 {
5022 bool fInserted = RTAvlrFileOffsetInsert(pIoStorage->pTreeMetaXfers, &pMetaXfer->Core);
5023 Assert(fInserted); NOREF(fInserted);
5024 }
5025 else
5026 RTMemFree(pMetaXfer);
5027
5028 if (RT_SUCCESS(rc))
5029 {
5030 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5031 vdIoTaskFree(pDisk, pIoTask);
5032 }
5033 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS && !pfnComplete)
5034 rc = VERR_VD_NOT_ENOUGH_METADATA;
5035 }
5036
5037 Assert(VALID_PTR(pMetaXfer) || RT_FAILURE(rc));
5038
5039 if (RT_SUCCESS(rc) || rc == VERR_VD_NOT_ENOUGH_METADATA || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5040 {
5041 /* If it is pending add the request to the list. */
5042 if (VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_READ)
5043 {
5044 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5045 AssertPtr(pDeferred);
5046
5047 RTListInit(&pDeferred->NodeDeferred);
5048 pDeferred->pIoCtx = pIoCtx;
5049
5050 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5051 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5052 rc = VERR_VD_NOT_ENOUGH_METADATA;
5053 }
5054 else
5055 {
5056 /* Transfer the data. */
5057 pMetaXfer->cRefs++;
5058 Assert(pMetaXfer->cbMeta >= cbRead);
5059 Assert(pMetaXfer->Core.Key == (RTFOFF)uOffset);
5060 if (pMetaXfer->pbDataShw)
5061 memcpy(pvBuf, pMetaXfer->pbDataShw, cbRead);
5062 else
5063 memcpy(pvBuf, pMetaXfer->abData, cbRead);
5064 *ppMetaXfer = pMetaXfer;
5065 }
5066 }
5067 }
5068
5069 LogFlowFunc(("returns rc=%Rrc\n", rc));
5070 return rc;
5071}
5072
5073static DECLCALLBACK(int) vdIOIntWriteMeta(void *pvUser, PVDIOSTORAGE pIoStorage, uint64_t uOffset,
5074 const void *pvBuf, size_t cbWrite, PVDIOCTX pIoCtx,
5075 PFNVDXFERCOMPLETED pfnComplete, void *pvCompleteUser)
5076{
5077 PVDIO pVDIo = (PVDIO)pvUser;
5078 PVBOXHDD pDisk = pVDIo->pDisk;
5079 int rc = VINF_SUCCESS;
5080 RTSGSEG Seg;
5081 PVDIOTASK pIoTask;
5082 PVDMETAXFER pMetaXfer = NULL;
5083 bool fInTree = false;
5084 void *pvTask = NULL;
5085
5086 LogFlowFunc(("pvUser=%#p pIoStorage=%#p uOffset=%llu pvBuf=%#p cbWrite=%u\n",
5087 pvUser, pIoStorage, uOffset, pvBuf, cbWrite));
5088
5089 AssertMsgReturn( pIoCtx
5090 || (!pfnComplete && !pvCompleteUser),
5091 ("A synchronous metadata write is requested but the parameters are wrong\n"),
5092 VERR_INVALID_POINTER);
5093
5094 /** @todo Enable check for sync I/O later. */
5095 if ( pIoCtx
5096 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5097 VD_IS_LOCKED(pDisk);
5098
5099 if ( !pIoCtx
5100 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
5101 {
5102 /* Handle synchronous metadata I/O. */
5103 /** @todo Integrate with metadata transfers below. */
5104 rc = pVDIo->pInterfaceIo->pfnWriteSync(pVDIo->pInterfaceIo->Core.pvUser,
5105 pIoStorage->pStorage, uOffset,
5106 pvBuf, cbWrite, NULL);
5107 }
5108 else
5109 {
5110 pMetaXfer = (PVDMETAXFER)RTAvlrFileOffsetGet(pIoStorage->pTreeMetaXfers, uOffset);
5111 if (!pMetaXfer)
5112 {
5113 /* Allocate a new meta transfer. */
5114 pMetaXfer = vdMetaXferAlloc(pIoStorage, uOffset, cbWrite);
5115 if (!pMetaXfer)
5116 return VERR_NO_MEMORY;
5117 }
5118 else
5119 {
5120 Assert(pMetaXfer->cbMeta >= cbWrite);
5121 Assert(pMetaXfer->Core.Key == (RTFOFF)uOffset);
5122 fInTree = true;
5123 }
5124
5125 if (VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
5126 {
5127 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvCompleteUser, pMetaXfer);
5128 if (!pIoTask)
5129 {
5130 RTMemFree(pMetaXfer);
5131 return VERR_NO_MEMORY;
5132 }
5133
5134 memcpy(pMetaXfer->abData, pvBuf, cbWrite);
5135 Seg.cbSeg = cbWrite;
5136 Seg.pvSeg = pMetaXfer->abData;
5137
5138 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5139
5140 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_WRITE);
5141 rc = pVDIo->pInterfaceIo->pfnWriteAsync(pVDIo->pInterfaceIo->Core.pvUser,
5142 pIoStorage->pStorage,
5143 uOffset, &Seg, 1, cbWrite, pIoTask,
5144 &pvTask);
5145 if (RT_SUCCESS(rc))
5146 {
5147 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5148 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
5149 vdIoTaskFree(pDisk, pIoTask);
5150 if (fInTree && !pMetaXfer->cRefs)
5151 {
5152 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
5153 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
5154 AssertMsg(fRemoved, ("Metadata transfer wasn't removed\n")); NOREF(fRemoved);
5155 RTMemFree(pMetaXfer);
5156 pMetaXfer = NULL;
5157 }
5158 }
5159 else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5160 {
5161 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5162 AssertPtr(pDeferred);
5163
5164 RTListInit(&pDeferred->NodeDeferred);
5165 pDeferred->pIoCtx = pIoCtx;
5166
5167 if (!fInTree)
5168 {
5169 bool fInserted = RTAvlrFileOffsetInsert(pIoStorage->pTreeMetaXfers, &pMetaXfer->Core);
5170 Assert(fInserted); NOREF(fInserted);
5171 }
5172
5173 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5174 }
5175 else
5176 {
5177 RTMemFree(pMetaXfer);
5178 pMetaXfer = NULL;
5179 }
5180 }
5181 else
5182 {
5183 /* I/O is in progress, update shadow buffer and add to waiting list. */
5184 Assert(VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
5185 if (!pMetaXfer->pbDataShw)
5186 {
5187 /* Allocate shadow buffer and set initial state. */
5188 LogFlowFunc(("pMetaXfer=%#p Creating shadow buffer\n", pMetaXfer));
5189 pMetaXfer->pbDataShw = (uint8_t *)RTMemAlloc(pMetaXfer->cbMeta);
5190 if (RT_LIKELY(pMetaXfer->pbDataShw))
5191 memcpy(pMetaXfer->pbDataShw, pMetaXfer->abData, pMetaXfer->cbMeta);
5192 else
5193 rc = VERR_NO_MEMORY;
5194 }
5195
5196 if (RT_SUCCESS(rc))
5197 {
5198 /* Update with written data and append to waiting list. */
5199 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5200 if (pDeferred)
5201 {
5202 LogFlowFunc(("pMetaXfer=%#p Updating shadow buffer\n", pMetaXfer));
5203
5204 RTListInit(&pDeferred->NodeDeferred);
5205 pDeferred->pIoCtx = pIoCtx;
5206 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5207 memcpy(pMetaXfer->pbDataShw, pvBuf, cbWrite);
5208 RTListAppend(&pMetaXfer->ListIoCtxShwWrites, &pDeferred->NodeDeferred);
5209 }
5210 else
5211 {
5212 /*
5213 * Free shadow buffer if there is no one depending on it, i.e.
5214 * we just allocated it.
5215 */
5216 if (RTListIsEmpty(&pMetaXfer->ListIoCtxShwWrites))
5217 {
5218 RTMemFree(pMetaXfer->pbDataShw);
5219 pMetaXfer->pbDataShw = NULL;
5220 }
5221 rc = VERR_NO_MEMORY;
5222 }
5223 }
5224 }
5225 }
5226
5227 LogFlowFunc(("returns rc=%Rrc\n", rc));
5228 return rc;
5229}
5230
5231static DECLCALLBACK(void) vdIOIntMetaXferRelease(void *pvUser, PVDMETAXFER pMetaXfer)
5232{
5233 PVDIO pVDIo = (PVDIO)pvUser;
5234 PVBOXHDD pDisk = pVDIo->pDisk;
5235 PVDIOSTORAGE pIoStorage;
5236
5237 /*
5238 * It is possible that we get called with a NULL metadata xfer handle
5239 * for synchronous I/O. Just exit.
5240 */
5241 if (!pMetaXfer)
5242 return;
5243
5244 pIoStorage = pMetaXfer->pIoStorage;
5245
5246 VD_IS_LOCKED(pDisk);
5247
5248 Assert( VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE
5249 || VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_WRITE);
5250 Assert(pMetaXfer->cRefs > 0);
5251
5252 pMetaXfer->cRefs--;
5253 if ( !pMetaXfer->cRefs
5254 && RTListIsEmpty(&pMetaXfer->ListIoCtxWaiting)
5255 && VDMETAXFER_TXDIR_GET(pMetaXfer->fFlags) == VDMETAXFER_TXDIR_NONE)
5256 {
5257 /* Free the meta data entry. */
5258 LogFlow(("Removing meta xfer=%#p\n", pMetaXfer));
5259 bool fRemoved = RTAvlrFileOffsetRemove(pIoStorage->pTreeMetaXfers, pMetaXfer->Core.Key) != NULL;
5260 AssertMsg(fRemoved, ("Metadata transfer wasn't removed\n")); NOREF(fRemoved);
5261
5262 RTMemFree(pMetaXfer);
5263 }
5264}
5265
5266static DECLCALLBACK(int) vdIOIntFlush(void *pvUser, PVDIOSTORAGE pIoStorage, PVDIOCTX pIoCtx,
5267 PFNVDXFERCOMPLETED pfnComplete, void *pvCompleteUser)
5268{
5269 PVDIO pVDIo = (PVDIO)pvUser;
5270 PVBOXHDD pDisk = pVDIo->pDisk;
5271 int rc = VINF_SUCCESS;
5272 PVDIOTASK pIoTask;
5273 PVDMETAXFER pMetaXfer = NULL;
5274 void *pvTask = NULL;
5275
5276 LogFlowFunc(("pvUser=%#p pIoStorage=%#p pIoCtx=%#p\n",
5277 pvUser, pIoStorage, pIoCtx));
5278
5279 AssertMsgReturn( pIoCtx
5280 || (!pfnComplete && !pvCompleteUser),
5281 ("A synchronous metadata write is requested but the parameters are wrong\n"),
5282 VERR_INVALID_POINTER);
5283
5284 /** @todo Enable check for sync I/O later. */
5285 if ( pIoCtx
5286 && !(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5287 VD_IS_LOCKED(pDisk);
5288
5289 if (pVDIo->fIgnoreFlush)
5290 return VINF_SUCCESS;
5291
5292 if ( !pIoCtx
5293 || pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC)
5294 {
5295 /* Handle synchronous flushes. */
5296 /** @todo Integrate with metadata transfers below. */
5297 rc = pVDIo->pInterfaceIo->pfnFlushSync(pVDIo->pInterfaceIo->Core.pvUser,
5298 pIoStorage->pStorage);
5299 }
5300 else
5301 {
5302 /* Allocate a new meta transfer. */
5303 pMetaXfer = vdMetaXferAlloc(pIoStorage, 0, 0);
5304 if (!pMetaXfer)
5305 return VERR_NO_MEMORY;
5306
5307 pIoTask = vdIoTaskMetaAlloc(pIoStorage, pfnComplete, pvUser, pMetaXfer);
5308 if (!pIoTask)
5309 {
5310 RTMemFree(pMetaXfer);
5311 return VERR_NO_MEMORY;
5312 }
5313
5314 ASMAtomicIncU32(&pIoCtx->cMetaTransfersPending);
5315
5316 PVDIOCTXDEFERRED pDeferred = (PVDIOCTXDEFERRED)RTMemAllocZ(sizeof(VDIOCTXDEFERRED));
5317 AssertPtr(pDeferred);
5318
5319 RTListInit(&pDeferred->NodeDeferred);
5320 pDeferred->pIoCtx = pIoCtx;
5321
5322 RTListAppend(&pMetaXfer->ListIoCtxWaiting, &pDeferred->NodeDeferred);
5323 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_FLUSH);
5324 rc = pVDIo->pInterfaceIo->pfnFlushAsync(pVDIo->pInterfaceIo->Core.pvUser,
5325 pIoStorage->pStorage,
5326 pIoTask, &pvTask);
5327 if (RT_SUCCESS(rc))
5328 {
5329 VDMETAXFER_TXDIR_SET(pMetaXfer->fFlags, VDMETAXFER_TXDIR_NONE);
5330 ASMAtomicDecU32(&pIoCtx->cMetaTransfersPending);
5331 vdIoTaskFree(pDisk, pIoTask);
5332 RTMemFree(pDeferred);
5333 RTMemFree(pMetaXfer);
5334 }
5335 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
5336 RTMemFree(pMetaXfer);
5337 }
5338
5339 LogFlowFunc(("returns rc=%Rrc\n", rc));
5340 return rc;
5341}
5342
5343static DECLCALLBACK(size_t) vdIOIntIoCtxCopyTo(void *pvUser, PVDIOCTX pIoCtx,
5344 const void *pvBuf, size_t cbBuf)
5345{
5346 PVDIO pVDIo = (PVDIO)pvUser;
5347 PVBOXHDD pDisk = pVDIo->pDisk;
5348 size_t cbCopied = 0;
5349
5350 /** @todo Enable check for sync I/O later. */
5351 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5352 VD_IS_LOCKED(pDisk);
5353
5354 cbCopied = vdIoCtxCopyTo(pIoCtx, (uint8_t *)pvBuf, cbBuf);
5355 Assert(cbCopied == cbBuf);
5356
5357 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft >= cbCopied); - triggers with vdCopyHelper/dmgRead.
5358 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCopied);
5359
5360 return cbCopied;
5361}
5362
5363static DECLCALLBACK(size_t) vdIOIntIoCtxCopyFrom(void *pvUser, PVDIOCTX pIoCtx,
5364 void *pvBuf, size_t cbBuf)
5365{
5366 PVDIO pVDIo = (PVDIO)pvUser;
5367 PVBOXHDD pDisk = pVDIo->pDisk;
5368 size_t cbCopied = 0;
5369
5370 /** @todo Enable check for sync I/O later. */
5371 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5372 VD_IS_LOCKED(pDisk);
5373
5374 cbCopied = vdIoCtxCopyFrom(pIoCtx, (uint8_t *)pvBuf, cbBuf);
5375 Assert(cbCopied == cbBuf);
5376
5377 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft > cbCopied); - triggers with vdCopyHelper/dmgRead.
5378 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCopied);
5379
5380 return cbCopied;
5381}
5382
5383static DECLCALLBACK(size_t) vdIOIntIoCtxSet(void *pvUser, PVDIOCTX pIoCtx, int ch, size_t cb)
5384{
5385 PVDIO pVDIo = (PVDIO)pvUser;
5386 PVBOXHDD pDisk = pVDIo->pDisk;
5387 size_t cbSet = 0;
5388
5389 /** @todo Enable check for sync I/O later. */
5390 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5391 VD_IS_LOCKED(pDisk);
5392
5393 cbSet = vdIoCtxSet(pIoCtx, ch, cb);
5394 Assert(cbSet == cb);
5395
5396 /// @todo Assert(pIoCtx->Req.Io.cbTransferLeft >= cbSet); - triggers with vdCopyHelper/dmgRead.
5397 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbSet);
5398
5399 return cbSet;
5400}
5401
5402static DECLCALLBACK(size_t) vdIOIntIoCtxSegArrayCreate(void *pvUser, PVDIOCTX pIoCtx,
5403 PRTSGSEG paSeg, unsigned *pcSeg,
5404 size_t cbData)
5405{
5406 PVDIO pVDIo = (PVDIO)pvUser;
5407 PVBOXHDD pDisk = pVDIo->pDisk;
5408 size_t cbCreated = 0;
5409
5410 /** @todo It is possible that this gets called from a filter plugin
5411 * outside of the disk lock. Refine assertion or remove completely. */
5412#if 0
5413 /** @todo Enable check for sync I/O later. */
5414 if (!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC))
5415 VD_IS_LOCKED(pDisk);
5416#else
5417 NOREF(pDisk);
5418#endif
5419
5420 cbCreated = RTSgBufSegArrayCreate(&pIoCtx->Req.Io.SgBuf, paSeg, pcSeg, cbData);
5421 Assert(!paSeg || cbData == cbCreated);
5422
5423 return cbCreated;
5424}
5425
5426static DECLCALLBACK(void) vdIOIntIoCtxCompleted(void *pvUser, PVDIOCTX pIoCtx, int rcReq,
5427 size_t cbCompleted)
5428{
5429 PVDIO pVDIo = (PVDIO)pvUser;
5430 PVBOXHDD pDisk = pVDIo->pDisk;
5431
5432 LogFlowFunc(("pvUser=%#p pIoCtx=%#p rcReq=%Rrc cbCompleted=%zu\n",
5433 pvUser, pIoCtx, rcReq, cbCompleted));
5434
5435 /*
5436 * Grab the disk critical section to avoid races with other threads which
5437 * might still modify the I/O context.
5438 * Example is that iSCSI is doing an asynchronous write but calls us already
5439 * while the other thread is still hanging in vdWriteHelperAsync and couldn't update
5440 * the blocked state yet.
5441 * It can overwrite the state to true before we call vdIoCtxContinue and the
5442 * the request would hang indefinite.
5443 */
5444 ASMAtomicCmpXchgS32(&pIoCtx->rcReq, rcReq, VINF_SUCCESS);
5445 Assert(pIoCtx->Req.Io.cbTransferLeft >= cbCompleted);
5446 ASMAtomicSubU32(&pIoCtx->Req.Io.cbTransferLeft, (uint32_t)cbCompleted);
5447
5448 /* Set next transfer function if the current one finished.
5449 * @todo: Find a better way to prevent vdIoCtxContinue from calling the current helper again. */
5450 if (!pIoCtx->Req.Io.cbTransferLeft)
5451 {
5452 pIoCtx->pfnIoCtxTransfer = pIoCtx->pfnIoCtxTransferNext;
5453 pIoCtx->pfnIoCtxTransferNext = NULL;
5454 }
5455
5456 vdIoCtxAddToWaitingList(&pDisk->pIoCtxHaltedHead, pIoCtx);
5457 if (ASMAtomicCmpXchgBool(&pDisk->fLocked, true, false))
5458 {
5459 /* Immediately drop the lock again, it will take care of processing the list. */
5460 vdDiskUnlock(pDisk, NULL);
5461 }
5462}
5463
5464static DECLCALLBACK(bool) vdIOIntIoCtxIsSynchronous(void *pvUser, PVDIOCTX pIoCtx)
5465{
5466 NOREF(pvUser);
5467 return !!(pIoCtx->fFlags & VDIOCTX_FLAGS_SYNC);
5468}
5469
5470static DECLCALLBACK(bool) vdIOIntIoCtxIsZero(void *pvUser, PVDIOCTX pIoCtx, size_t cbCheck,
5471 bool fAdvance)
5472{
5473 NOREF(pvUser);
5474
5475 bool fIsZero = RTSgBufIsZero(&pIoCtx->Req.Io.SgBuf, cbCheck);
5476 if (fIsZero && fAdvance)
5477 RTSgBufAdvance(&pIoCtx->Req.Io.SgBuf, cbCheck);
5478
5479 return fIsZero;
5480}
5481
5482static DECLCALLBACK(size_t) vdIOIntIoCtxGetDataUnitSize(void *pvUser, PVDIOCTX pIoCtx)
5483{
5484 RT_NOREF1(pIoCtx);
5485 PVDIO pVDIo = (PVDIO)pvUser;
5486 PVBOXHDD pDisk = pVDIo->pDisk;
5487
5488 PVDIMAGE pImage = vdGetImageByNumber(pDisk, VD_LAST_IMAGE);
5489 AssertPtrReturn(pImage, 0);
5490 return pImage->Backend->pfnGetSectorSize(pImage->pBackendData);
5491}
5492
5493/**
5494 * VD I/O interface callback for opening a file (limited version for VDGetFormat).
5495 */
5496static DECLCALLBACK(int) vdIOIntOpenLimited(void *pvUser, const char *pszLocation,
5497 uint32_t fOpen, PPVDIOSTORAGE ppIoStorage)
5498{
5499 int rc = VINF_SUCCESS;
5500 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5501 PVDIOSTORAGE pIoStorage = (PVDIOSTORAGE)RTMemAllocZ(sizeof(VDIOSTORAGE));
5502
5503 if (!pIoStorage)
5504 return VERR_NO_MEMORY;
5505
5506 rc = pInterfaceIo->pfnOpen(NULL, pszLocation, fOpen, NULL, &pIoStorage->pStorage);
5507 if (RT_SUCCESS(rc))
5508 *ppIoStorage = pIoStorage;
5509 else
5510 RTMemFree(pIoStorage);
5511
5512 return rc;
5513}
5514
5515static DECLCALLBACK(int) vdIOIntCloseLimited(void *pvUser, PVDIOSTORAGE pIoStorage)
5516{
5517 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5518 int rc = pInterfaceIo->pfnClose(NULL, pIoStorage->pStorage);
5519
5520 RTMemFree(pIoStorage);
5521 return rc;
5522}
5523
5524static DECLCALLBACK(int) vdIOIntDeleteLimited(void *pvUser, const char *pcszFilename)
5525{
5526 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5527 return pInterfaceIo->pfnDelete(NULL, pcszFilename);
5528}
5529
5530static DECLCALLBACK(int) vdIOIntMoveLimited(void *pvUser, const char *pcszSrc,
5531 const char *pcszDst, unsigned fMove)
5532{
5533 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5534 return pInterfaceIo->pfnMove(NULL, pcszSrc, pcszDst, fMove);
5535}
5536
5537static DECLCALLBACK(int) vdIOIntGetFreeSpaceLimited(void *pvUser, const char *pcszFilename,
5538 int64_t *pcbFreeSpace)
5539{
5540 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5541 return pInterfaceIo->pfnGetFreeSpace(NULL, pcszFilename, pcbFreeSpace);
5542}
5543
5544static DECLCALLBACK(int) vdIOIntGetModificationTimeLimited(void *pvUser,
5545 const char *pcszFilename,
5546 PRTTIMESPEC pModificationTime)
5547{
5548 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5549 return pInterfaceIo->pfnGetModificationTime(NULL, pcszFilename, pModificationTime);
5550}
5551
5552static DECLCALLBACK(int) vdIOIntGetSizeLimited(void *pvUser, PVDIOSTORAGE pIoStorage,
5553 uint64_t *pcbSize)
5554{
5555 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5556 return pInterfaceIo->pfnGetSize(NULL, pIoStorage->pStorage, pcbSize);
5557}
5558
5559static DECLCALLBACK(int) vdIOIntSetSizeLimited(void *pvUser, PVDIOSTORAGE pIoStorage,
5560 uint64_t cbSize)
5561{
5562 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5563 return pInterfaceIo->pfnSetSize(NULL, pIoStorage->pStorage, cbSize);
5564}
5565
5566static DECLCALLBACK(int) vdIOIntWriteUserLimited(void *pvUser, PVDIOSTORAGE pStorage,
5567 uint64_t uOffset, PVDIOCTX pIoCtx,
5568 size_t cbWrite,
5569 PFNVDXFERCOMPLETED pfnComplete,
5570 void *pvCompleteUser)
5571{
5572 NOREF(pvUser);
5573 NOREF(pStorage);
5574 NOREF(uOffset);
5575 NOREF(pIoCtx);
5576 NOREF(cbWrite);
5577 NOREF(pfnComplete);
5578 NOREF(pvCompleteUser);
5579 AssertMsgFailedReturn(("This needs to be implemented when called\n"), VERR_NOT_IMPLEMENTED);
5580}
5581
5582static DECLCALLBACK(int) vdIOIntReadUserLimited(void *pvUser, PVDIOSTORAGE pStorage,
5583 uint64_t uOffset, PVDIOCTX pIoCtx,
5584 size_t cbRead)
5585{
5586 NOREF(pvUser);
5587 NOREF(pStorage);
5588 NOREF(uOffset);
5589 NOREF(pIoCtx);
5590 NOREF(cbRead);
5591 AssertMsgFailedReturn(("This needs to be implemented when called\n"), VERR_NOT_IMPLEMENTED);
5592}
5593
5594static DECLCALLBACK(int) vdIOIntWriteMetaLimited(void *pvUser, PVDIOSTORAGE pStorage,
5595 uint64_t uOffset, const void *pvBuffer,
5596 size_t cbBuffer, PVDIOCTX pIoCtx,
5597 PFNVDXFERCOMPLETED pfnComplete,
5598 void *pvCompleteUser)
5599{
5600 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5601
5602 AssertMsgReturn(!pIoCtx && !pfnComplete && !pvCompleteUser,
5603 ("Async I/O not implemented for the limited interface"),
5604 VERR_NOT_SUPPORTED);
5605
5606 return pInterfaceIo->pfnWriteSync(NULL, pStorage->pStorage, uOffset, pvBuffer, cbBuffer, NULL);
5607}
5608
5609static DECLCALLBACK(int) vdIOIntReadMetaLimited(void *pvUser, PVDIOSTORAGE pStorage,
5610 uint64_t uOffset, void *pvBuffer,
5611 size_t cbBuffer, PVDIOCTX pIoCtx,
5612 PPVDMETAXFER ppMetaXfer,
5613 PFNVDXFERCOMPLETED pfnComplete,
5614 void *pvCompleteUser)
5615{
5616 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5617
5618 AssertMsgReturn(!pIoCtx && !ppMetaXfer && !pfnComplete && !pvCompleteUser,
5619 ("Async I/O not implemented for the limited interface"),
5620 VERR_NOT_SUPPORTED);
5621
5622 return pInterfaceIo->pfnReadSync(NULL, pStorage->pStorage, uOffset, pvBuffer, cbBuffer, NULL);
5623}
5624
5625#if 0 /* unsed */
5626static int vdIOIntMetaXferReleaseLimited(void *pvUser, PVDMETAXFER pMetaXfer)
5627{
5628 /* This is a NOP in this case. */
5629 NOREF(pvUser);
5630 NOREF(pMetaXfer);
5631 return VINF_SUCCESS;
5632}
5633#endif
5634
5635static DECLCALLBACK(int) vdIOIntFlushLimited(void *pvUser, PVDIOSTORAGE pStorage,
5636 PVDIOCTX pIoCtx,
5637 PFNVDXFERCOMPLETED pfnComplete,
5638 void *pvCompleteUser)
5639{
5640 PVDINTERFACEIO pInterfaceIo = (PVDINTERFACEIO)pvUser;
5641
5642 AssertMsgReturn(!pIoCtx && !pfnComplete && !pvCompleteUser,
5643 ("Async I/O not implemented for the limited interface"),
5644 VERR_NOT_SUPPORTED);
5645
5646 return pInterfaceIo->pfnFlushSync(NULL, pStorage->pStorage);
5647}
5648
5649/**
5650 * internal: send output to the log (unconditionally).
5651 */
5652static DECLCALLBACK(int) vdLogMessage(void *pvUser, const char *pszFormat, va_list args)
5653{
5654 NOREF(pvUser);
5655 RTLogPrintfV(pszFormat, args);
5656 return VINF_SUCCESS;
5657}
5658
5659DECLINLINE(int) vdMessageWrapper(PVBOXHDD pDisk, const char *pszFormat, ...)
5660{
5661 va_list va;
5662 va_start(va, pszFormat);
5663 int rc = pDisk->pInterfaceError->pfnMessage(pDisk->pInterfaceError->Core.pvUser,
5664 pszFormat, va);
5665 va_end(va);
5666 return rc;
5667}
5668
5669
5670/**
5671 * internal: adjust PCHS geometry
5672 */
5673static void vdFixupPCHSGeometry(PVDGEOMETRY pPCHS, uint64_t cbSize)
5674{
5675 /* Fix broken PCHS geometry. Can happen for two reasons: either the backend
5676 * mixes up PCHS and LCHS, or the application used to create the source
5677 * image has put garbage in it. Additionally, if the PCHS geometry covers
5678 * more than the image size, set it back to the default. */
5679 if ( pPCHS->cHeads > 16
5680 || pPCHS->cSectors > 63
5681 || pPCHS->cCylinders == 0
5682 || (uint64_t)pPCHS->cHeads * pPCHS->cSectors * pPCHS->cCylinders * 512 > cbSize)
5683 {
5684 Assert(!(RT_MIN(cbSize / 512 / 16 / 63, 16383) - (uint32_t)RT_MIN(cbSize / 512 / 16 / 63, 16383)));
5685 pPCHS->cCylinders = (uint32_t)RT_MIN(cbSize / 512 / 16 / 63, 16383);
5686 pPCHS->cHeads = 16;
5687 pPCHS->cSectors = 63;
5688 }
5689}
5690
5691/**
5692 * internal: adjust PCHS geometry
5693 */
5694static void vdFixupLCHSGeometry(PVDGEOMETRY pLCHS, uint64_t cbSize)
5695{
5696 /* Fix broken LCHS geometry. Can happen for two reasons: either the backend
5697 * mixes up PCHS and LCHS, or the application used to create the source
5698 * image has put garbage in it. The fix in this case is to clear the LCHS
5699 * geometry to trigger autodetection when it is used next. If the geometry
5700 * already says "please autodetect" (cylinders=0) keep it. */
5701 if ( ( pLCHS->cHeads > 255
5702 || pLCHS->cHeads == 0
5703 || pLCHS->cSectors > 63
5704 || pLCHS->cSectors == 0)
5705 && pLCHS->cCylinders != 0)
5706 {
5707 pLCHS->cCylinders = 0;
5708 pLCHS->cHeads = 0;
5709 pLCHS->cSectors = 0;
5710 }
5711 /* Always recompute the number of cylinders stored in the LCHS
5712 * geometry if it isn't set to "autotedetect" at the moment.
5713 * This is very useful if the destination image size is
5714 * larger or smaller than the source image size. Do not modify
5715 * the number of heads and sectors. Windows guests hate it. */
5716 if ( pLCHS->cCylinders != 0
5717 && pLCHS->cHeads != 0 /* paranoia */
5718 && pLCHS->cSectors != 0 /* paranoia */)
5719 {
5720 Assert(!(RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024) - (uint32_t)RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024)));
5721 pLCHS->cCylinders = (uint32_t)RT_MIN(cbSize / 512 / pLCHS->cHeads / pLCHS->cSectors, 1024);
5722 }
5723}
5724
5725/**
5726 * Sets the I/O callbacks of the given interface to the fallback methods
5727 *
5728 * @returns nothing.
5729 * @param pIfIo The I/O interface to setup.
5730 */
5731static void vdIfIoFallbackCallbacksSetup(PVDINTERFACEIO pIfIo)
5732{
5733 pIfIo->pfnOpen = vdIOOpenFallback;
5734 pIfIo->pfnClose = vdIOCloseFallback;
5735 pIfIo->pfnDelete = vdIODeleteFallback;
5736 pIfIo->pfnMove = vdIOMoveFallback;
5737 pIfIo->pfnGetFreeSpace = vdIOGetFreeSpaceFallback;
5738 pIfIo->pfnGetModificationTime = vdIOGetModificationTimeFallback;
5739 pIfIo->pfnGetSize = vdIOGetSizeFallback;
5740 pIfIo->pfnSetSize = vdIOSetSizeFallback;
5741 pIfIo->pfnSetAllocationSize = vdIOSetAllocationSizeFallback;
5742 pIfIo->pfnReadSync = vdIOReadSyncFallback;
5743 pIfIo->pfnWriteSync = vdIOWriteSyncFallback;
5744 pIfIo->pfnFlushSync = vdIOFlushSyncFallback;
5745 pIfIo->pfnReadAsync = vdIOReadAsyncFallback;
5746 pIfIo->pfnWriteAsync = vdIOWriteAsyncFallback;
5747 pIfIo->pfnFlushAsync = vdIOFlushAsyncFallback;
5748}
5749
5750/**
5751 * Sets the internal I/O callbacks of the given interface.
5752 *
5753 * @returns nothing.
5754 * @param pIfIoInt The internal I/O interface to setup.
5755 */
5756static void vdIfIoIntCallbacksSetup(PVDINTERFACEIOINT pIfIoInt)
5757{
5758 pIfIoInt->pfnOpen = vdIOIntOpen;
5759 pIfIoInt->pfnClose = vdIOIntClose;
5760 pIfIoInt->pfnDelete = vdIOIntDelete;
5761 pIfIoInt->pfnMove = vdIOIntMove;
5762 pIfIoInt->pfnGetFreeSpace = vdIOIntGetFreeSpace;
5763 pIfIoInt->pfnGetModificationTime = vdIOIntGetModificationTime;
5764 pIfIoInt->pfnGetSize = vdIOIntGetSize;
5765 pIfIoInt->pfnSetSize = vdIOIntSetSize;
5766 pIfIoInt->pfnSetAllocationSize = vdIOIntSetAllocationSize;
5767 pIfIoInt->pfnReadUser = vdIOIntReadUser;
5768 pIfIoInt->pfnWriteUser = vdIOIntWriteUser;
5769 pIfIoInt->pfnReadMeta = vdIOIntReadMeta;
5770 pIfIoInt->pfnWriteMeta = vdIOIntWriteMeta;
5771 pIfIoInt->pfnMetaXferRelease = vdIOIntMetaXferRelease;
5772 pIfIoInt->pfnFlush = vdIOIntFlush;
5773 pIfIoInt->pfnIoCtxCopyFrom = vdIOIntIoCtxCopyFrom;
5774 pIfIoInt->pfnIoCtxCopyTo = vdIOIntIoCtxCopyTo;
5775 pIfIoInt->pfnIoCtxSet = vdIOIntIoCtxSet;
5776 pIfIoInt->pfnIoCtxSegArrayCreate = vdIOIntIoCtxSegArrayCreate;
5777 pIfIoInt->pfnIoCtxCompleted = vdIOIntIoCtxCompleted;
5778 pIfIoInt->pfnIoCtxIsSynchronous = vdIOIntIoCtxIsSynchronous;
5779 pIfIoInt->pfnIoCtxIsZero = vdIOIntIoCtxIsZero;
5780 pIfIoInt->pfnIoCtxGetDataUnitSize = vdIOIntIoCtxGetDataUnitSize;
5781}
5782
5783/**
5784 * Internally used completion handler for synchronous I/O contexts.
5785 */
5786static DECLCALLBACK(void) vdIoCtxSyncComplete(void *pvUser1, void *pvUser2, int rcReq)
5787{
5788 RT_NOREF2(pvUser1, rcReq);
5789 RTSEMEVENT hEvent = (RTSEMEVENT)pvUser2;
5790
5791 RTSemEventSignal(hEvent);
5792}
5793
5794/**
5795 * Initializes HDD backends.
5796 *
5797 * @returns VBox status code.
5798 */
5799VBOXDDU_DECL(int) VDInit(void)
5800{
5801 int rc = vdAddBackends(NIL_RTLDRMOD, aStaticBackends, RT_ELEMENTS(aStaticBackends));
5802 if (RT_SUCCESS(rc))
5803 {
5804 rc = vdAddCacheBackends(NIL_RTLDRMOD, aStaticCacheBackends, RT_ELEMENTS(aStaticCacheBackends));
5805 if (RT_SUCCESS(rc))
5806 {
5807 RTListInit(&g_ListPluginsLoaded);
5808 rc = vdLoadDynamicBackends();
5809 }
5810 }
5811 LogRel(("VD: VDInit finished\n"));
5812 return rc;
5813}
5814
5815/**
5816 * Destroys loaded HDD backends.
5817 *
5818 * @returns VBox status code.
5819 */
5820VBOXDDU_DECL(int) VDShutdown(void)
5821{
5822 if (!g_apBackends)
5823 return VERR_INTERNAL_ERROR;
5824
5825 if (g_apCacheBackends)
5826 RTMemFree(g_apCacheBackends);
5827 RTMemFree(g_apBackends);
5828
5829 g_cBackends = 0;
5830 g_apBackends = NULL;
5831
5832 /* Clear the supported cache backends. */
5833 g_cCacheBackends = 0;
5834 g_apCacheBackends = NULL;
5835
5836#ifndef VBOX_HDD_NO_DYNAMIC_BACKENDS
5837 PVDPLUGIN pPlugin, pPluginNext;
5838
5839 RTListForEachSafe(&g_ListPluginsLoaded, pPlugin, pPluginNext, VDPLUGIN, NodePlugin)
5840 {
5841 RTLdrClose(pPlugin->hPlugin);
5842 RTStrFree(pPlugin->pszFilename);
5843 RTListNodeRemove(&pPlugin->NodePlugin);
5844 RTMemFree(pPlugin);
5845 }
5846#endif
5847
5848 return VINF_SUCCESS;
5849}
5850
5851/**
5852 * Loads a single plugin given by filename.
5853 *
5854 * @returns VBox status code.
5855 * @param pszFilename The plugin filename to load.
5856 */
5857VBOXDDU_DECL(int) VDPluginLoadFromFilename(const char *pszFilename)
5858{
5859 if (!g_apBackends)
5860 {
5861 int rc = VDInit();
5862 if (RT_FAILURE(rc))
5863 return rc;
5864 }
5865
5866 return vdPluginLoadFromFilename(pszFilename);
5867}
5868
5869/**
5870 * Load all plugins from a given path.
5871 *
5872 * @returns VBox statuse code.
5873 * @param pszPath The path to load plugins from.
5874 */
5875VBOXDDU_DECL(int) VDPluginLoadFromPath(const char *pszPath)
5876{
5877 if (!g_apBackends)
5878 {
5879 int rc = VDInit();
5880 if (RT_FAILURE(rc))
5881 return rc;
5882 }
5883
5884 return vdPluginLoadFromPath(pszPath);
5885}
5886
5887/**
5888 * Unloads a single plugin given by filename.
5889 *
5890 * @returns VBox status code.
5891 * @param pszFilename The plugin filename to unload.
5892 */
5893VBOXDDU_DECL(int) VDPluginUnloadFromFilename(const char *pszFilename)
5894{
5895 if (!g_apBackends)
5896 {
5897 int rc = VDInit();
5898 if (RT_FAILURE(rc))
5899 return rc;
5900 }
5901
5902 return vdPluginUnloadFromFilename(pszFilename);
5903}
5904
5905/**
5906 * Unload all plugins from a given path.
5907 *
5908 * @returns VBox statuse code.
5909 * @param pszPath The path to unload plugins from.
5910 */
5911VBOXDDU_DECL(int) VDPluginUnloadFromPath(const char *pszPath)
5912{
5913 if (!g_apBackends)
5914 {
5915 int rc = VDInit();
5916 if (RT_FAILURE(rc))
5917 return rc;
5918 }
5919
5920 return vdPluginUnloadFromPath(pszPath);
5921}
5922
5923/**
5924 * Lists all HDD backends and their capabilities in a caller-provided buffer.
5925 *
5926 * @returns VBox status code.
5927 * VERR_BUFFER_OVERFLOW if not enough space is passed.
5928 * @param cEntriesAlloc Number of list entries available.
5929 * @param pEntries Pointer to array for the entries.
5930 * @param pcEntriesUsed Number of entries returned.
5931 */
5932VBOXDDU_DECL(int) VDBackendInfo(unsigned cEntriesAlloc, PVDBACKENDINFO pEntries,
5933 unsigned *pcEntriesUsed)
5934{
5935 int rc = VINF_SUCCESS;
5936
5937 LogFlowFunc(("cEntriesAlloc=%u pEntries=%#p pcEntriesUsed=%#p\n", cEntriesAlloc, pEntries, pcEntriesUsed));
5938 /* Check arguments. */
5939 AssertMsgReturn(cEntriesAlloc,
5940 ("cEntriesAlloc=%u\n", cEntriesAlloc),
5941 VERR_INVALID_PARAMETER);
5942 AssertMsgReturn(VALID_PTR(pEntries),
5943 ("pEntries=%#p\n", pEntries),
5944 VERR_INVALID_PARAMETER);
5945 AssertMsgReturn(VALID_PTR(pcEntriesUsed),
5946 ("pcEntriesUsed=%#p\n", pcEntriesUsed),
5947 VERR_INVALID_PARAMETER);
5948 if (!g_apBackends)
5949 VDInit();
5950
5951 if (cEntriesAlloc < g_cBackends)
5952 {
5953 *pcEntriesUsed = g_cBackends;
5954 return VERR_BUFFER_OVERFLOW;
5955 }
5956
5957 for (unsigned i = 0; i < g_cBackends; i++)
5958 {
5959 pEntries[i].pszBackend = g_apBackends[i]->pszBackendName;
5960 pEntries[i].uBackendCaps = g_apBackends[i]->uBackendCaps;
5961 pEntries[i].paFileExtensions = g_apBackends[i]->paFileExtensions;
5962 pEntries[i].paConfigInfo = g_apBackends[i]->paConfigInfo;
5963 pEntries[i].pfnComposeLocation = g_apBackends[i]->pfnComposeLocation;
5964 pEntries[i].pfnComposeName = g_apBackends[i]->pfnComposeName;
5965 }
5966
5967 LogFlowFunc(("returns %Rrc *pcEntriesUsed=%u\n", rc, g_cBackends));
5968 *pcEntriesUsed = g_cBackends;
5969 return rc;
5970}
5971
5972/**
5973 * Lists the capabilities of a backend identified by its name.
5974 *
5975 * @returns VBox status code.
5976 * @param pszBackend The backend name.
5977 * @param pEntry Pointer to an entry.
5978 */
5979VBOXDDU_DECL(int) VDBackendInfoOne(const char *pszBackend, PVDBACKENDINFO pEntry)
5980{
5981 LogFlowFunc(("pszBackend=%#p pEntry=%#p\n", pszBackend, pEntry));
5982 /* Check arguments. */
5983 AssertMsgReturn(VALID_PTR(pszBackend),
5984 ("pszBackend=%#p\n", pszBackend),
5985 VERR_INVALID_PARAMETER);
5986 AssertMsgReturn(VALID_PTR(pEntry),
5987 ("pEntry=%#p\n", pEntry),
5988 VERR_INVALID_PARAMETER);
5989 if (!g_apBackends)
5990 VDInit();
5991
5992 /* Go through loaded backends. */
5993 for (unsigned i = 0; i < g_cBackends; i++)
5994 {
5995 if (!RTStrICmp(pszBackend, g_apBackends[i]->pszBackendName))
5996 {
5997 pEntry->pszBackend = g_apBackends[i]->pszBackendName;
5998 pEntry->uBackendCaps = g_apBackends[i]->uBackendCaps;
5999 pEntry->paFileExtensions = g_apBackends[i]->paFileExtensions;
6000 pEntry->paConfigInfo = g_apBackends[i]->paConfigInfo;
6001 return VINF_SUCCESS;
6002 }
6003 }
6004
6005 return VERR_NOT_FOUND;
6006}
6007
6008/**
6009 * Lists all filters and their capabilities in a caller-provided buffer.
6010 *
6011 * @return VBox status code.
6012 * VERR_BUFFER_OVERFLOW if not enough space is passed.
6013 * @param cEntriesAlloc Number of list entries available.
6014 * @param pEntries Pointer to array for the entries.
6015 * @param pcEntriesUsed Number of entries returned.
6016 */
6017VBOXDDU_DECL(int) VDFilterInfo(unsigned cEntriesAlloc, PVDFILTERINFO pEntries,
6018 unsigned *pcEntriesUsed)
6019{
6020 int rc = VINF_SUCCESS;
6021
6022 LogFlowFunc(("cEntriesAlloc=%u pEntries=%#p pcEntriesUsed=%#p\n", cEntriesAlloc, pEntries, pcEntriesUsed));
6023 /* Check arguments. */
6024 AssertMsgReturn(cEntriesAlloc,
6025 ("cEntriesAlloc=%u\n", cEntriesAlloc),
6026 VERR_INVALID_PARAMETER);
6027 AssertMsgReturn(VALID_PTR(pEntries),
6028 ("pEntries=%#p\n", pEntries),
6029 VERR_INVALID_PARAMETER);
6030 AssertMsgReturn(VALID_PTR(pcEntriesUsed),
6031 ("pcEntriesUsed=%#p\n", pcEntriesUsed),
6032 VERR_INVALID_PARAMETER);
6033 if (!g_apBackends)
6034 VDInit();
6035
6036 if (cEntriesAlloc < g_cFilterBackends)
6037 {
6038 *pcEntriesUsed = g_cFilterBackends;
6039 return VERR_BUFFER_OVERFLOW;
6040 }
6041
6042 for (unsigned i = 0; i < g_cFilterBackends; i++)
6043 {
6044 pEntries[i].pszFilter = g_apFilterBackends[i]->pszBackendName;
6045 pEntries[i].paConfigInfo = g_apFilterBackends[i]->paConfigInfo;
6046 }
6047
6048 LogFlowFunc(("returns %Rrc *pcEntriesUsed=%u\n", rc, g_cFilterBackends));
6049 *pcEntriesUsed = g_cFilterBackends;
6050 return rc;
6051}
6052
6053/**
6054 * Lists the capabilities of a filter identified by its name.
6055 *
6056 * @return VBox status code.
6057 * @param pszFilter The filter name (case insensitive).
6058 * @param pEntry Pointer to an entry.
6059 */
6060VBOXDDU_DECL(int) VDFilterInfoOne(const char *pszFilter, PVDFILTERINFO pEntry)
6061{
6062 LogFlowFunc(("pszFilter=%#p pEntry=%#p\n", pszFilter, pEntry));
6063 /* Check arguments. */
6064 AssertMsgReturn(VALID_PTR(pszFilter),
6065 ("pszFilter=%#p\n", pszFilter),
6066 VERR_INVALID_PARAMETER);
6067 AssertMsgReturn(VALID_PTR(pEntry),
6068 ("pEntry=%#p\n", pEntry),
6069 VERR_INVALID_PARAMETER);
6070 if (!g_apBackends)
6071 VDInit();
6072
6073 /* Go through loaded backends. */
6074 for (unsigned i = 0; i < g_cFilterBackends; i++)
6075 {
6076 if (!RTStrICmp(pszFilter, g_apFilterBackends[i]->pszBackendName))
6077 {
6078 pEntry->pszFilter = g_apFilterBackends[i]->pszBackendName;
6079 pEntry->paConfigInfo = g_apFilterBackends[i]->paConfigInfo;
6080 return VINF_SUCCESS;
6081 }
6082 }
6083
6084 return VERR_NOT_FOUND;
6085}
6086
6087/**
6088 * Allocates and initializes an empty HDD container.
6089 * No image files are opened.
6090 *
6091 * @returns VBox status code.
6092 * @param pVDIfsDisk Pointer to the per-disk VD interface list.
6093 * @param enmType Type of the image container.
6094 * @param ppDisk Where to store the reference to HDD container.
6095 */
6096VBOXDDU_DECL(int) VDCreate(PVDINTERFACE pVDIfsDisk, VDTYPE enmType, PVBOXHDD *ppDisk)
6097{
6098 int rc = VINF_SUCCESS;
6099 PVBOXHDD pDisk = NULL;
6100
6101 LogFlowFunc(("pVDIfsDisk=%#p\n", pVDIfsDisk));
6102 do
6103 {
6104 /* Check arguments. */
6105 AssertMsgBreakStmt(VALID_PTR(ppDisk),
6106 ("ppDisk=%#p\n", ppDisk),
6107 rc = VERR_INVALID_PARAMETER);
6108
6109 pDisk = (PVBOXHDD)RTMemAllocZ(sizeof(VBOXHDD));
6110 if (pDisk)
6111 {
6112 pDisk->u32Signature = VBOXHDDDISK_SIGNATURE;
6113 pDisk->enmType = enmType;
6114 pDisk->cImages = 0;
6115 pDisk->pBase = NULL;
6116 pDisk->pLast = NULL;
6117 pDisk->cbSize = 0;
6118 pDisk->PCHSGeometry.cCylinders = 0;
6119 pDisk->PCHSGeometry.cHeads = 0;
6120 pDisk->PCHSGeometry.cSectors = 0;
6121 pDisk->LCHSGeometry.cCylinders = 0;
6122 pDisk->LCHSGeometry.cHeads = 0;
6123 pDisk->LCHSGeometry.cSectors = 0;
6124 pDisk->pVDIfsDisk = pVDIfsDisk;
6125 pDisk->pInterfaceError = NULL;
6126 pDisk->pInterfaceThreadSync = NULL;
6127 pDisk->pIoCtxLockOwner = NULL;
6128 pDisk->pIoCtxHead = NULL;
6129 pDisk->fLocked = false;
6130 pDisk->hMemCacheIoCtx = NIL_RTMEMCACHE;
6131 pDisk->hMemCacheIoTask = NIL_RTMEMCACHE;
6132 RTListInit(&pDisk->ListFilterChainWrite);
6133 RTListInit(&pDisk->ListFilterChainRead);
6134
6135 /* Create the I/O ctx cache */
6136 rc = RTMemCacheCreate(&pDisk->hMemCacheIoCtx, sizeof(VDIOCTX), 0, UINT32_MAX,
6137 NULL, NULL, NULL, 0);
6138 if (RT_FAILURE(rc))
6139 break;
6140
6141 /* Create the I/O task cache */
6142 rc = RTMemCacheCreate(&pDisk->hMemCacheIoTask, sizeof(VDIOTASK), 0, UINT32_MAX,
6143 NULL, NULL, NULL, 0);
6144 if (RT_FAILURE(rc))
6145 break;
6146
6147 pDisk->pInterfaceError = VDIfErrorGet(pVDIfsDisk);
6148 pDisk->pInterfaceThreadSync = VDIfThreadSyncGet(pVDIfsDisk);
6149
6150 *ppDisk = pDisk;
6151 }
6152 else
6153 {
6154 rc = VERR_NO_MEMORY;
6155 break;
6156 }
6157 } while (0);
6158
6159 if ( RT_FAILURE(rc)
6160 && pDisk)
6161 {
6162 if (pDisk->hMemCacheIoCtx != NIL_RTMEMCACHE)
6163 RTMemCacheDestroy(pDisk->hMemCacheIoCtx);
6164 if (pDisk->hMemCacheIoTask != NIL_RTMEMCACHE)
6165 RTMemCacheDestroy(pDisk->hMemCacheIoTask);
6166 }
6167
6168 LogFlowFunc(("returns %Rrc (pDisk=%#p)\n", rc, pDisk));
6169 return rc;
6170}
6171
6172/**
6173 * Destroys HDD container.
6174 * If container has opened image files they will be closed.
6175 *
6176 * @returns VBox status code.
6177 * @param pDisk Pointer to HDD container.
6178 */
6179VBOXDDU_DECL(int) VDDestroy(PVBOXHDD pDisk)
6180{
6181 int rc = VINF_SUCCESS;
6182 LogFlowFunc(("pDisk=%#p\n", pDisk));
6183 do
6184 {
6185 /* sanity check */
6186 AssertPtrBreak(pDisk);
6187 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6188 Assert(!pDisk->fLocked);
6189
6190 rc = VDCloseAll(pDisk);
6191 int rc2 = VDFilterRemoveAll(pDisk);
6192 if (RT_SUCCESS(rc))
6193 rc = rc2;
6194
6195 RTMemCacheDestroy(pDisk->hMemCacheIoCtx);
6196 RTMemCacheDestroy(pDisk->hMemCacheIoTask);
6197 RTMemFree(pDisk);
6198 } while (0);
6199 LogFlowFunc(("returns %Rrc\n", rc));
6200 return rc;
6201}
6202
6203/**
6204 * Try to get the backend name which can use this image.
6205 *
6206 * @returns VBox status code.
6207 * VINF_SUCCESS if a plugin was found.
6208 * ppszFormat contains the string which can be used as backend name.
6209 * VERR_NOT_SUPPORTED if no backend was found.
6210 * @param pVDIfsDisk Pointer to the per-disk VD interface list.
6211 * @param pVDIfsImage Pointer to the per-image VD interface list.
6212 * @param pszFilename Name of the image file for which the backend is queried.
6213 * @param ppszFormat Receives pointer of the UTF-8 string which contains the format name.
6214 * The returned pointer must be freed using RTStrFree().
6215 */
6216VBOXDDU_DECL(int) VDGetFormat(PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6217 const char *pszFilename, char **ppszFormat, VDTYPE *penmType)
6218{
6219 int rc = VERR_NOT_SUPPORTED;
6220 VDINTERFACEIOINT VDIfIoInt;
6221 VDINTERFACEIO VDIfIoFallback;
6222 PVDINTERFACEIO pInterfaceIo;
6223
6224 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
6225 /* Check arguments. */
6226 AssertMsgReturn(VALID_PTR(pszFilename) && *pszFilename,
6227 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6228 VERR_INVALID_PARAMETER);
6229 AssertMsgReturn(VALID_PTR(ppszFormat),
6230 ("ppszFormat=%#p\n", ppszFormat),
6231 VERR_INVALID_PARAMETER);
6232 AssertMsgReturn(VALID_PTR(penmType),
6233 ("penmType=%#p\n", penmType),
6234 VERR_INVALID_PARAMETER);
6235
6236 if (!g_apBackends)
6237 VDInit();
6238
6239 pInterfaceIo = VDIfIoGet(pVDIfsImage);
6240 if (!pInterfaceIo)
6241 {
6242 /*
6243 * Caller doesn't provide an I/O interface, create our own using the
6244 * native file API.
6245 */
6246 vdIfIoFallbackCallbacksSetup(&VDIfIoFallback);
6247 pInterfaceIo = &VDIfIoFallback;
6248 }
6249
6250 /* Set up the internal I/O interface. */
6251 AssertReturn(!VDIfIoIntGet(pVDIfsImage), VERR_INVALID_PARAMETER);
6252 VDIfIoInt.pfnOpen = vdIOIntOpenLimited;
6253 VDIfIoInt.pfnClose = vdIOIntCloseLimited;
6254 VDIfIoInt.pfnDelete = vdIOIntDeleteLimited;
6255 VDIfIoInt.pfnMove = vdIOIntMoveLimited;
6256 VDIfIoInt.pfnGetFreeSpace = vdIOIntGetFreeSpaceLimited;
6257 VDIfIoInt.pfnGetModificationTime = vdIOIntGetModificationTimeLimited;
6258 VDIfIoInt.pfnGetSize = vdIOIntGetSizeLimited;
6259 VDIfIoInt.pfnSetSize = vdIOIntSetSizeLimited;
6260 VDIfIoInt.pfnReadUser = vdIOIntReadUserLimited;
6261 VDIfIoInt.pfnWriteUser = vdIOIntWriteUserLimited;
6262 VDIfIoInt.pfnReadMeta = vdIOIntReadMetaLimited;
6263 VDIfIoInt.pfnWriteMeta = vdIOIntWriteMetaLimited;
6264 VDIfIoInt.pfnFlush = vdIOIntFlushLimited;
6265 rc = VDInterfaceAdd(&VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6266 pInterfaceIo, sizeof(VDINTERFACEIOINT), &pVDIfsImage);
6267 AssertRC(rc);
6268
6269 /* Find the backend supporting this file format. */
6270 for (unsigned i = 0; i < g_cBackends; i++)
6271 {
6272 if (g_apBackends[i]->pfnProbe)
6273 {
6274 rc = g_apBackends[i]->pfnProbe(pszFilename, pVDIfsDisk, pVDIfsImage, penmType);
6275 if ( RT_SUCCESS(rc)
6276 /* The correct backend has been found, but there is a small
6277 * incompatibility so that the file cannot be used. Stop here
6278 * and signal success - the actual open will of course fail,
6279 * but that will create a really sensible error message. */
6280 || ( rc != VERR_VD_GEN_INVALID_HEADER
6281 && rc != VERR_VD_VDI_INVALID_HEADER
6282 && rc != VERR_VD_VMDK_INVALID_HEADER
6283 && rc != VERR_VD_ISCSI_INVALID_HEADER
6284 && rc != VERR_VD_VHD_INVALID_HEADER
6285 && rc != VERR_VD_RAW_INVALID_HEADER
6286 && rc != VERR_VD_RAW_SIZE_MODULO_512
6287 && rc != VERR_VD_RAW_SIZE_MODULO_2048
6288 && rc != VERR_VD_RAW_SIZE_OPTICAL_TOO_SMALL
6289 && rc != VERR_VD_RAW_SIZE_FLOPPY_TOO_BIG
6290 && rc != VERR_VD_PARALLELS_INVALID_HEADER
6291 && rc != VERR_VD_DMG_INVALID_HEADER))
6292 {
6293 /* Copy the name into the new string. */
6294 char *pszFormat = RTStrDup(g_apBackends[i]->pszBackendName);
6295 if (!pszFormat)
6296 {
6297 rc = VERR_NO_MEMORY;
6298 break;
6299 }
6300 *ppszFormat = pszFormat;
6301 /* Do not consider the typical file access errors as success,
6302 * which allows the caller to deal with such issues. */
6303 if ( rc != VERR_ACCESS_DENIED
6304 && rc != VERR_PATH_NOT_FOUND
6305 && rc != VERR_FILE_NOT_FOUND)
6306 rc = VINF_SUCCESS;
6307 break;
6308 }
6309 rc = VERR_NOT_SUPPORTED;
6310 }
6311 }
6312
6313 /* Try the cache backends. */
6314 if (rc == VERR_NOT_SUPPORTED)
6315 {
6316 for (unsigned i = 0; i < g_cCacheBackends; i++)
6317 {
6318 if (g_apCacheBackends[i]->pfnProbe)
6319 {
6320 rc = g_apCacheBackends[i]->pfnProbe(pszFilename, pVDIfsDisk,
6321 pVDIfsImage);
6322 if ( RT_SUCCESS(rc)
6323 || (rc != VERR_VD_GEN_INVALID_HEADER))
6324 {
6325 /* Copy the name into the new string. */
6326 char *pszFormat = RTStrDup(g_apBackends[i]->pszBackendName);
6327 if (!pszFormat)
6328 {
6329 rc = VERR_NO_MEMORY;
6330 break;
6331 }
6332 *ppszFormat = pszFormat;
6333 rc = VINF_SUCCESS;
6334 break;
6335 }
6336 rc = VERR_NOT_SUPPORTED;
6337 }
6338 }
6339 }
6340
6341 LogFlowFunc(("returns %Rrc *ppszFormat=\"%s\"\n", rc, *ppszFormat));
6342 return rc;
6343}
6344
6345/**
6346 * Opens an image file.
6347 *
6348 * The first opened image file in HDD container must have a base image type,
6349 * others (next opened images) must be a differencing or undo images.
6350 * Linkage is checked for differencing image to be in consistence with the previously opened image.
6351 * When another differencing image is opened and the last image was opened in read/write access
6352 * mode, then the last image is reopened in read-only with deny write sharing mode. This allows
6353 * other processes to use images in read-only mode too.
6354 *
6355 * Note that the image is opened in read-only mode if a read/write open is not possible.
6356 * Use VDIsReadOnly to check open mode.
6357 *
6358 * @returns VBox status code.
6359 * @param pDisk Pointer to HDD container.
6360 * @param pszBackend Name of the image file backend to use.
6361 * @param pszFilename Name of the image file to open.
6362 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
6363 * @param pVDIfsImage Pointer to the per-image VD interface list.
6364 */
6365VBOXDDU_DECL(int) VDOpen(PVBOXHDD pDisk, const char *pszBackend,
6366 const char *pszFilename, unsigned uOpenFlags,
6367 PVDINTERFACE pVDIfsImage)
6368{
6369 int rc = VINF_SUCCESS;
6370 int rc2;
6371 bool fLockWrite = false;
6372 PVDIMAGE pImage = NULL;
6373
6374 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uOpenFlags=%#x, pVDIfsImage=%#p\n",
6375 pDisk, pszBackend, pszFilename, uOpenFlags, pVDIfsImage));
6376
6377 do
6378 {
6379 /* sanity check */
6380 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6381 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6382
6383 /* Check arguments. */
6384 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
6385 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
6386 rc = VERR_INVALID_PARAMETER);
6387 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
6388 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6389 rc = VERR_INVALID_PARAMETER);
6390 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
6391 ("uOpenFlags=%#x\n", uOpenFlags),
6392 rc = VERR_INVALID_PARAMETER);
6393 AssertMsgBreakStmt( !(uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)
6394 || (uOpenFlags & VD_OPEN_FLAGS_READONLY),
6395 ("uOpenFlags=%#x\n", uOpenFlags),
6396 rc = VERR_INVALID_PARAMETER);
6397
6398 /*
6399 * Destroy the current discard state first which might still have pending blocks
6400 * for the currently opened image which will be switched to readonly mode.
6401 */
6402 /* Lock disk for writing, as we modify pDisk information below. */
6403 rc2 = vdThreadStartWrite(pDisk);
6404 AssertRC(rc2);
6405 fLockWrite = true;
6406 rc = vdDiscardStateDestroy(pDisk);
6407 if (RT_FAILURE(rc))
6408 break;
6409 rc2 = vdThreadFinishWrite(pDisk);
6410 AssertRC(rc2);
6411 fLockWrite = false;
6412
6413 /* Set up image descriptor. */
6414 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
6415 if (!pImage)
6416 {
6417 rc = VERR_NO_MEMORY;
6418 break;
6419 }
6420 pImage->pszFilename = RTStrDup(pszFilename);
6421 if (!pImage->pszFilename)
6422 {
6423 rc = VERR_NO_MEMORY;
6424 break;
6425 }
6426
6427 pImage->VDIo.pDisk = pDisk;
6428 pImage->pVDIfsImage = pVDIfsImage;
6429
6430 rc = vdFindBackend(pszBackend, &pImage->Backend);
6431 if (RT_FAILURE(rc))
6432 break;
6433 if (!pImage->Backend)
6434 {
6435 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6436 N_("VD: unknown backend name '%s'"), pszBackend);
6437 break;
6438 }
6439
6440 /*
6441 * Fail if the backend can't do async I/O but the
6442 * flag is set.
6443 */
6444 if ( !(pImage->Backend->uBackendCaps & VD_CAP_ASYNC)
6445 && (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO))
6446 {
6447 rc = vdError(pDisk, VERR_NOT_SUPPORTED, RT_SRC_POS,
6448 N_("VD: Backend '%s' does not support async I/O"), pszBackend);
6449 break;
6450 }
6451
6452 /*
6453 * Fail if the backend doesn't support the discard operation but the
6454 * flag is set.
6455 */
6456 if ( !(pImage->Backend->uBackendCaps & VD_CAP_DISCARD)
6457 && (uOpenFlags & VD_OPEN_FLAGS_DISCARD))
6458 {
6459 rc = vdError(pDisk, VERR_VD_DISCARD_NOT_SUPPORTED, RT_SRC_POS,
6460 N_("VD: Backend '%s' does not support discard"), pszBackend);
6461 break;
6462 }
6463
6464 /* Set up the I/O interface. */
6465 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
6466 if (!pImage->VDIo.pInterfaceIo)
6467 {
6468 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
6469 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
6470 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
6471 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
6472 }
6473
6474 /* Set up the internal I/O interface. */
6475 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
6476 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
6477 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6478 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
6479 AssertRC(rc);
6480
6481 pImage->uOpenFlags = uOpenFlags & (VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_DISCARD | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS);
6482 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
6483 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6484 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS),
6485 pDisk->pVDIfsDisk,
6486 pImage->pVDIfsImage,
6487 pDisk->enmType,
6488 &pImage->pBackendData);
6489 /*
6490 * If the image is corrupted and there is a repair method try to repair it
6491 * first if it was openend in read-write mode and open again afterwards.
6492 */
6493 if ( RT_UNLIKELY(rc == VERR_VD_IMAGE_CORRUPTED)
6494 && !(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6495 && pImage->Backend->pfnRepair)
6496 {
6497 rc = pImage->Backend->pfnRepair(pszFilename, pDisk->pVDIfsDisk, pImage->pVDIfsImage, 0 /* fFlags */);
6498 if (RT_SUCCESS(rc))
6499 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6500 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS),
6501 pDisk->pVDIfsDisk,
6502 pImage->pVDIfsImage,
6503 pDisk->enmType,
6504 &pImage->pBackendData);
6505 else
6506 {
6507 rc = vdError(pDisk, rc, RT_SRC_POS,
6508 N_("VD: error %Rrc repairing corrupted image file '%s'"), rc, pszFilename);
6509 break;
6510 }
6511 }
6512 else if (RT_UNLIKELY(rc == VERR_VD_IMAGE_CORRUPTED))
6513 {
6514 rc = vdError(pDisk, rc, RT_SRC_POS,
6515 N_("VD: Image file '%s' is corrupted and can't be opened"), pszFilename);
6516 break;
6517 }
6518
6519 /* If the open in read-write mode failed, retry in read-only mode. */
6520 if (RT_FAILURE(rc))
6521 {
6522 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6523 && ( rc == VERR_ACCESS_DENIED
6524 || rc == VERR_PERMISSION_DENIED
6525 || rc == VERR_WRITE_PROTECT
6526 || rc == VERR_SHARING_VIOLATION
6527 || rc == VERR_FILE_LOCK_FAILED))
6528 rc = pImage->Backend->pfnOpen(pImage->pszFilename,
6529 (uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS))
6530 | VD_OPEN_FLAGS_READONLY,
6531 pDisk->pVDIfsDisk,
6532 pImage->pVDIfsImage,
6533 pDisk->enmType,
6534 &pImage->pBackendData);
6535 if (RT_FAILURE(rc))
6536 {
6537 rc = vdError(pDisk, rc, RT_SRC_POS,
6538 N_("VD: error %Rrc opening image file '%s'"), rc, pszFilename);
6539 break;
6540 }
6541 }
6542
6543 /* Lock disk for writing, as we modify pDisk information below. */
6544 rc2 = vdThreadStartWrite(pDisk);
6545 AssertRC(rc2);
6546 fLockWrite = true;
6547
6548 pImage->VDIo.pBackendData = pImage->pBackendData;
6549
6550 /* Check image type. As the image itself has only partial knowledge
6551 * whether it's a base image or not, this info is derived here. The
6552 * base image can be fixed or normal, all others must be normal or
6553 * diff images. Some image formats don't distinguish between normal
6554 * and diff images, so this must be corrected here. */
6555 unsigned uImageFlags;
6556 uImageFlags = pImage->Backend->pfnGetImageFlags(pImage->pBackendData);
6557 if (RT_FAILURE(rc))
6558 uImageFlags = VD_IMAGE_FLAGS_NONE;
6559 if ( RT_SUCCESS(rc)
6560 && !(uOpenFlags & VD_OPEN_FLAGS_INFO))
6561 {
6562 if ( pDisk->cImages == 0
6563 && (uImageFlags & VD_IMAGE_FLAGS_DIFF))
6564 {
6565 rc = VERR_VD_INVALID_TYPE;
6566 break;
6567 }
6568 else if (pDisk->cImages != 0)
6569 {
6570 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
6571 {
6572 rc = VERR_VD_INVALID_TYPE;
6573 break;
6574 }
6575 else
6576 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
6577 }
6578 }
6579
6580 /* Ensure we always get correct diff information, even if the backend
6581 * doesn't actually have a stored flag for this. It must not return
6582 * bogus information for the parent UUID if it is not a diff image. */
6583 RTUUID parentUuid;
6584 RTUuidClear(&parentUuid);
6585 rc2 = pImage->Backend->pfnGetParentUuid(pImage->pBackendData, &parentUuid);
6586 if (RT_SUCCESS(rc2) && !RTUuidIsNull(&parentUuid))
6587 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
6588
6589 pImage->uImageFlags = uImageFlags;
6590
6591 /* Force sane optimization settings. It's not worth avoiding writes
6592 * to fixed size images. The overhead would have almost no payback. */
6593 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
6594 pImage->uOpenFlags |= VD_OPEN_FLAGS_HONOR_SAME;
6595
6596 /** @todo optionally check UUIDs */
6597
6598 /* Cache disk information. */
6599 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
6600
6601 /* Cache PCHS geometry. */
6602 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
6603 &pDisk->PCHSGeometry);
6604 if (RT_FAILURE(rc2))
6605 {
6606 pDisk->PCHSGeometry.cCylinders = 0;
6607 pDisk->PCHSGeometry.cHeads = 0;
6608 pDisk->PCHSGeometry.cSectors = 0;
6609 }
6610 else
6611 {
6612 /* Make sure the PCHS geometry is properly clipped. */
6613 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
6614 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
6615 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
6616 }
6617
6618 /* Cache LCHS geometry. */
6619 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
6620 &pDisk->LCHSGeometry);
6621 if (RT_FAILURE(rc2))
6622 {
6623 pDisk->LCHSGeometry.cCylinders = 0;
6624 pDisk->LCHSGeometry.cHeads = 0;
6625 pDisk->LCHSGeometry.cSectors = 0;
6626 }
6627 else
6628 {
6629 /* Make sure the LCHS geometry is properly clipped. */
6630 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
6631 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
6632 }
6633
6634 if (pDisk->cImages != 0)
6635 {
6636 /* Switch previous image to read-only mode. */
6637 unsigned uOpenFlagsPrevImg;
6638 uOpenFlagsPrevImg = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
6639 if (!(uOpenFlagsPrevImg & VD_OPEN_FLAGS_READONLY))
6640 {
6641 uOpenFlagsPrevImg |= VD_OPEN_FLAGS_READONLY;
6642 rc = pDisk->pLast->Backend->pfnSetOpenFlags(pDisk->pLast->pBackendData, uOpenFlagsPrevImg);
6643 }
6644 }
6645
6646 if (RT_SUCCESS(rc))
6647 {
6648 /* Image successfully opened, make it the last image. */
6649 vdAddImageToList(pDisk, pImage);
6650 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
6651 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
6652 }
6653 else
6654 {
6655 /* Error detected, but image opened. Close image. */
6656 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, false);
6657 AssertRC(rc2);
6658 pImage->pBackendData = NULL;
6659 }
6660 } while (0);
6661
6662 if (RT_UNLIKELY(fLockWrite))
6663 {
6664 rc2 = vdThreadFinishWrite(pDisk);
6665 AssertRC(rc2);
6666 }
6667
6668 if (RT_FAILURE(rc))
6669 {
6670 if (pImage)
6671 {
6672 if (pImage->pszFilename)
6673 RTStrFree(pImage->pszFilename);
6674 RTMemFree(pImage);
6675 }
6676 }
6677
6678 LogFlowFunc(("returns %Rrc\n", rc));
6679 return rc;
6680}
6681
6682/**
6683 * Opens a cache image.
6684 *
6685 * @return VBox status code.
6686 * @param pDisk Pointer to the HDD container which should use the cache image.
6687 * @param pszBackend Name of the cache file backend to use (case insensitive).
6688 * @param pszFilename Name of the cache image to open.
6689 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
6690 * @param pVDIfsCache Pointer to the per-cache VD interface list.
6691 */
6692VBOXDDU_DECL(int) VDCacheOpen(PVBOXHDD pDisk, const char *pszBackend,
6693 const char *pszFilename, unsigned uOpenFlags,
6694 PVDINTERFACE pVDIfsCache)
6695{
6696 int rc = VINF_SUCCESS;
6697 int rc2;
6698 bool fLockWrite = false;
6699 PVDCACHE pCache = NULL;
6700
6701 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uOpenFlags=%#x, pVDIfsCache=%#p\n",
6702 pDisk, pszBackend, pszFilename, uOpenFlags, pVDIfsCache));
6703
6704 do
6705 {
6706 /* sanity check */
6707 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6708 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6709
6710 /* Check arguments. */
6711 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
6712 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
6713 rc = VERR_INVALID_PARAMETER);
6714 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
6715 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
6716 rc = VERR_INVALID_PARAMETER);
6717 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
6718 ("uOpenFlags=%#x\n", uOpenFlags),
6719 rc = VERR_INVALID_PARAMETER);
6720
6721 /* Set up image descriptor. */
6722 pCache = (PVDCACHE)RTMemAllocZ(sizeof(VDCACHE));
6723 if (!pCache)
6724 {
6725 rc = VERR_NO_MEMORY;
6726 break;
6727 }
6728 pCache->pszFilename = RTStrDup(pszFilename);
6729 if (!pCache->pszFilename)
6730 {
6731 rc = VERR_NO_MEMORY;
6732 break;
6733 }
6734
6735 pCache->VDIo.pDisk = pDisk;
6736 pCache->pVDIfsCache = pVDIfsCache;
6737
6738 rc = vdFindCacheBackend(pszBackend, &pCache->Backend);
6739 if (RT_FAILURE(rc))
6740 break;
6741 if (!pCache->Backend)
6742 {
6743 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6744 N_("VD: unknown backend name '%s'"), pszBackend);
6745 break;
6746 }
6747
6748 /* Set up the I/O interface. */
6749 pCache->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsCache);
6750 if (!pCache->VDIo.pInterfaceIo)
6751 {
6752 vdIfIoFallbackCallbacksSetup(&pCache->VDIo.VDIfIo);
6753 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
6754 pDisk, sizeof(VDINTERFACEIO), &pVDIfsCache);
6755 pCache->VDIo.pInterfaceIo = &pCache->VDIo.VDIfIo;
6756 }
6757
6758 /* Set up the internal I/O interface. */
6759 AssertBreakStmt(!VDIfIoIntGet(pVDIfsCache), rc = VERR_INVALID_PARAMETER);
6760 vdIfIoIntCallbacksSetup(&pCache->VDIo.VDIfIoInt);
6761 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6762 &pCache->VDIo, sizeof(VDINTERFACEIOINT), &pCache->pVDIfsCache);
6763 AssertRC(rc);
6764
6765 pCache->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
6766 rc = pCache->Backend->pfnOpen(pCache->pszFilename,
6767 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
6768 pDisk->pVDIfsDisk,
6769 pCache->pVDIfsCache,
6770 &pCache->pBackendData);
6771 /* If the open in read-write mode failed, retry in read-only mode. */
6772 if (RT_FAILURE(rc))
6773 {
6774 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY)
6775 && ( rc == VERR_ACCESS_DENIED
6776 || rc == VERR_PERMISSION_DENIED
6777 || rc == VERR_WRITE_PROTECT
6778 || rc == VERR_SHARING_VIOLATION
6779 || rc == VERR_FILE_LOCK_FAILED))
6780 rc = pCache->Backend->pfnOpen(pCache->pszFilename,
6781 (uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME)
6782 | VD_OPEN_FLAGS_READONLY,
6783 pDisk->pVDIfsDisk,
6784 pCache->pVDIfsCache,
6785 &pCache->pBackendData);
6786 if (RT_FAILURE(rc))
6787 {
6788 rc = vdError(pDisk, rc, RT_SRC_POS,
6789 N_("VD: error %Rrc opening image file '%s'"), rc, pszFilename);
6790 break;
6791 }
6792 }
6793
6794 /* Lock disk for writing, as we modify pDisk information below. */
6795 rc2 = vdThreadStartWrite(pDisk);
6796 AssertRC(rc2);
6797 fLockWrite = true;
6798
6799 /*
6800 * Check that the modification UUID of the cache and last image
6801 * match. If not the image was modified in-between without the cache.
6802 * The cache might contain stale data.
6803 */
6804 RTUUID UuidImage, UuidCache;
6805
6806 rc = pCache->Backend->pfnGetModificationUuid(pCache->pBackendData,
6807 &UuidCache);
6808 if (RT_SUCCESS(rc))
6809 {
6810 rc = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
6811 &UuidImage);
6812 if (RT_SUCCESS(rc))
6813 {
6814 if (RTUuidCompare(&UuidImage, &UuidCache))
6815 rc = VERR_VD_CACHE_NOT_UP_TO_DATE;
6816 }
6817 }
6818
6819 /*
6820 * We assume that the user knows what he is doing if one of the images
6821 * doesn't support the modification uuid.
6822 */
6823 if (rc == VERR_NOT_SUPPORTED)
6824 rc = VINF_SUCCESS;
6825
6826 if (RT_SUCCESS(rc))
6827 {
6828 /* Cache successfully opened, make it the current one. */
6829 if (!pDisk->pCache)
6830 pDisk->pCache = pCache;
6831 else
6832 rc = VERR_VD_CACHE_ALREADY_EXISTS;
6833 }
6834
6835 if (RT_FAILURE(rc))
6836 {
6837 /* Error detected, but image opened. Close image. */
6838 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, false);
6839 AssertRC(rc2);
6840 pCache->pBackendData = NULL;
6841 }
6842 } while (0);
6843
6844 if (RT_UNLIKELY(fLockWrite))
6845 {
6846 rc2 = vdThreadFinishWrite(pDisk);
6847 AssertRC(rc2);
6848 }
6849
6850 if (RT_FAILURE(rc))
6851 {
6852 if (pCache)
6853 {
6854 if (pCache->pszFilename)
6855 RTStrFree(pCache->pszFilename);
6856 RTMemFree(pCache);
6857 }
6858 }
6859
6860 LogFlowFunc(("returns %Rrc\n", rc));
6861 return rc;
6862}
6863
6864VBOXDDU_DECL(int) VDFilterAdd(PVBOXHDD pDisk, const char *pszFilter, uint32_t fFlags,
6865 PVDINTERFACE pVDIfsFilter)
6866{
6867 int rc = VINF_SUCCESS;
6868 int rc2;
6869 bool fLockWrite = false;
6870 PVDFILTER pFilter = NULL;
6871
6872 LogFlowFunc(("pDisk=%#p pszFilter=\"%s\" pVDIfsFilter=%#p\n",
6873 pDisk, pszFilter, pVDIfsFilter));
6874
6875 do
6876 {
6877 /* sanity check */
6878 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
6879 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
6880
6881 /* Check arguments. */
6882 AssertMsgBreakStmt(VALID_PTR(pszFilter) && *pszFilter,
6883 ("pszFilter=%#p \"%s\"\n", pszFilter, pszFilter),
6884 rc = VERR_INVALID_PARAMETER);
6885
6886 AssertMsgBreakStmt(!(fFlags & ~VD_FILTER_FLAGS_MASK),
6887 ("Invalid flags set (fFlags=%#x)\n", fFlags),
6888 rc = VERR_INVALID_PARAMETER);
6889
6890 /* Set up image descriptor. */
6891 pFilter = (PVDFILTER)RTMemAllocZ(sizeof(VDFILTER));
6892 if (!pFilter)
6893 {
6894 rc = VERR_NO_MEMORY;
6895 break;
6896 }
6897
6898 rc = vdFindFilterBackend(pszFilter, &pFilter->pBackend);
6899 if (RT_FAILURE(rc))
6900 break;
6901 if (!pFilter->pBackend)
6902 {
6903 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
6904 N_("VD: unknown filter backend name '%s'"), pszFilter);
6905 break;
6906 }
6907
6908 pFilter->VDIo.pDisk = pDisk;
6909 pFilter->pVDIfsFilter = pVDIfsFilter;
6910
6911 /* Set up the internal I/O interface. */
6912 AssertBreakStmt(!VDIfIoIntGet(pVDIfsFilter), rc = VERR_INVALID_PARAMETER);
6913 vdIfIoIntCallbacksSetup(&pFilter->VDIo.VDIfIoInt);
6914 rc = VDInterfaceAdd(&pFilter->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
6915 &pFilter->VDIo, sizeof(VDINTERFACEIOINT), &pFilter->pVDIfsFilter);
6916 AssertRC(rc);
6917
6918 rc = pFilter->pBackend->pfnCreate(pDisk->pVDIfsDisk, fFlags & VD_FILTER_FLAGS_INFO,
6919 pFilter->pVDIfsFilter, &pFilter->pvBackendData);
6920 if (RT_FAILURE(rc))
6921 break;
6922
6923 /* Lock disk for writing, as we modify pDisk information below. */
6924 rc2 = vdThreadStartWrite(pDisk);
6925 AssertRC(rc2);
6926 fLockWrite = true;
6927
6928 /* Add filter to chains. */
6929 if (fFlags & VD_FILTER_FLAGS_WRITE)
6930 {
6931 RTListAppend(&pDisk->ListFilterChainWrite, &pFilter->ListNodeChainWrite);
6932 vdFilterRetain(pFilter);
6933 }
6934
6935 if (fFlags & VD_FILTER_FLAGS_READ)
6936 {
6937 RTListAppend(&pDisk->ListFilterChainRead, &pFilter->ListNodeChainRead);
6938 vdFilterRetain(pFilter);
6939 }
6940 } while (0);
6941
6942 if (RT_UNLIKELY(fLockWrite))
6943 {
6944 rc2 = vdThreadFinishWrite(pDisk);
6945 AssertRC(rc2);
6946 }
6947
6948 if (RT_FAILURE(rc))
6949 {
6950 if (pFilter)
6951 RTMemFree(pFilter);
6952 }
6953
6954 LogFlowFunc(("returns %Rrc\n", rc));
6955 return rc;
6956}
6957
6958/**
6959 * Creates and opens a new base image file.
6960 *
6961 * @returns VBox status code.
6962 * @param pDisk Pointer to HDD container.
6963 * @param pszBackend Name of the image file backend to use.
6964 * @param pszFilename Name of the image file to create.
6965 * @param cbSize Image size in bytes.
6966 * @param uImageFlags Flags specifying special image features.
6967 * @param pszComment Pointer to image comment. NULL is ok.
6968 * @param pPCHSGeometry Pointer to physical disk geometry <= (16383,16,63). Not NULL.
6969 * @param pLCHSGeometry Pointer to logical disk geometry <= (x,255,63). Not NULL.
6970 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
6971 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
6972 * @param pVDIfsImage Pointer to the per-image VD interface list.
6973 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
6974 */
6975VBOXDDU_DECL(int) VDCreateBase(PVBOXHDD pDisk, const char *pszBackend,
6976 const char *pszFilename, uint64_t cbSize,
6977 unsigned uImageFlags, const char *pszComment,
6978 PCVDGEOMETRY pPCHSGeometry,
6979 PCVDGEOMETRY pLCHSGeometry,
6980 PCRTUUID pUuid, unsigned uOpenFlags,
6981 PVDINTERFACE pVDIfsImage,
6982 PVDINTERFACE pVDIfsOperation)
6983{
6984 int rc = VINF_SUCCESS;
6985 int rc2;
6986 bool fLockWrite = false, fLockRead = false;
6987 PVDIMAGE pImage = NULL;
6988 RTUUID uuid;
6989
6990 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" PCHS=%u/%u/%u LCHS=%u/%u/%u Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
6991 pDisk, pszBackend, pszFilename, cbSize, uImageFlags, pszComment,
6992 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
6993 pPCHSGeometry->cSectors, pLCHSGeometry->cCylinders,
6994 pLCHSGeometry->cHeads, pLCHSGeometry->cSectors, pUuid,
6995 uOpenFlags, pVDIfsImage, pVDIfsOperation));
6996
6997 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
6998
6999 do
7000 {
7001 /* sanity check */
7002 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7003 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7004
7005 /* Check arguments. */
7006 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7007 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7008 rc = VERR_INVALID_PARAMETER);
7009 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7010 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7011 rc = VERR_INVALID_PARAMETER);
7012 AssertMsgBreakStmt(cbSize,
7013 ("cbSize=%llu\n", cbSize),
7014 rc = VERR_INVALID_PARAMETER);
7015 AssertMsgBreakStmt(!(cbSize % 512),
7016 ("cbSize=%llu\n", cbSize),
7017 rc = VERR_VD_INVALID_SIZE);
7018 AssertMsgBreakStmt( ((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0)
7019 || ((uImageFlags & (VD_IMAGE_FLAGS_FIXED | VD_IMAGE_FLAGS_DIFF)) != VD_IMAGE_FLAGS_FIXED),
7020 ("uImageFlags=%#x\n", uImageFlags),
7021 rc = VERR_INVALID_PARAMETER);
7022 /* The PCHS geometry fields may be 0 to leave it for later. */
7023 AssertMsgBreakStmt( VALID_PTR(pPCHSGeometry)
7024 && pPCHSGeometry->cHeads <= 16
7025 && pPCHSGeometry->cSectors <= 63,
7026 ("pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pPCHSGeometry,
7027 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
7028 pPCHSGeometry->cSectors),
7029 rc = VERR_INVALID_PARAMETER);
7030 /* The LCHS geometry fields may be 0 to leave it to later autodetection. */
7031 AssertMsgBreakStmt( VALID_PTR(pLCHSGeometry)
7032 && pLCHSGeometry->cHeads <= 255
7033 && pLCHSGeometry->cSectors <= 63,
7034 ("pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pLCHSGeometry,
7035 pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads,
7036 pLCHSGeometry->cSectors),
7037 rc = VERR_INVALID_PARAMETER);
7038 /* The UUID may be NULL. */
7039 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7040 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7041 rc = VERR_INVALID_PARAMETER);
7042 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7043 ("uOpenFlags=%#x\n", uOpenFlags),
7044 rc = VERR_INVALID_PARAMETER);
7045
7046 /* Check state. Needs a temporary read lock. Holding the write lock
7047 * all the time would be blocking other activities for too long. */
7048 rc2 = vdThreadStartRead(pDisk);
7049 AssertRC(rc2);
7050 fLockRead = true;
7051 AssertMsgBreakStmt(pDisk->cImages == 0,
7052 ("Create base image cannot be done with other images open\n"),
7053 rc = VERR_VD_INVALID_STATE);
7054 rc2 = vdThreadFinishRead(pDisk);
7055 AssertRC(rc2);
7056 fLockRead = false;
7057
7058 /* Set up image descriptor. */
7059 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
7060 if (!pImage)
7061 {
7062 rc = VERR_NO_MEMORY;
7063 break;
7064 }
7065 pImage->pszFilename = RTStrDup(pszFilename);
7066 if (!pImage->pszFilename)
7067 {
7068 rc = VERR_NO_MEMORY;
7069 break;
7070 }
7071 pImage->VDIo.pDisk = pDisk;
7072 pImage->pVDIfsImage = pVDIfsImage;
7073
7074 /* Set up the I/O interface. */
7075 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
7076 if (!pImage->VDIo.pInterfaceIo)
7077 {
7078 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
7079 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7080 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
7081 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
7082 }
7083
7084 /* Set up the internal I/O interface. */
7085 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
7086 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
7087 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7088 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
7089 AssertRC(rc);
7090
7091 rc = vdFindBackend(pszBackend, &pImage->Backend);
7092 if (RT_FAILURE(rc))
7093 break;
7094 if (!pImage->Backend)
7095 {
7096 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7097 N_("VD: unknown backend name '%s'"), pszBackend);
7098 break;
7099 }
7100 if (!(pImage->Backend->uBackendCaps & ( VD_CAP_CREATE_FIXED
7101 | VD_CAP_CREATE_DYNAMIC)))
7102 {
7103 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7104 N_("VD: backend '%s' cannot create base images"), pszBackend);
7105 break;
7106 }
7107 if ( ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
7108 && !(pImage->Backend->uBackendCaps & VD_CAP_CREATE_SPLIT_2G))
7109 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7110 && RTStrICmp(pszBackend, "VMDK")))
7111 {
7112 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7113 N_("VD: backend '%s' does not support the selected image variant"), pszBackend);
7114 break;
7115 }
7116
7117 /* Create UUID if the caller didn't specify one. */
7118 if (!pUuid)
7119 {
7120 rc = RTUuidCreate(&uuid);
7121 if (RT_FAILURE(rc))
7122 {
7123 rc = vdError(pDisk, rc, RT_SRC_POS,
7124 N_("VD: cannot generate UUID for image '%s'"),
7125 pszFilename);
7126 break;
7127 }
7128 pUuid = &uuid;
7129 }
7130
7131 pImage->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7132 uImageFlags &= ~VD_IMAGE_FLAGS_DIFF;
7133 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7134 rc = pImage->Backend->pfnCreate(pImage->pszFilename, cbSize,
7135 uImageFlags, pszComment, pPCHSGeometry,
7136 pLCHSGeometry, pUuid,
7137 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7138 0, 99,
7139 pDisk->pVDIfsDisk,
7140 pImage->pVDIfsImage,
7141 pVDIfsOperation,
7142 pDisk->enmType,
7143 &pImage->pBackendData);
7144
7145 if (RT_SUCCESS(rc))
7146 {
7147 pImage->VDIo.pBackendData = pImage->pBackendData;
7148 pImage->uImageFlags = uImageFlags;
7149
7150 /* Force sane optimization settings. It's not worth avoiding writes
7151 * to fixed size images. The overhead would have almost no payback. */
7152 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
7153 pImage->uOpenFlags |= VD_OPEN_FLAGS_HONOR_SAME;
7154
7155 /* Lock disk for writing, as we modify pDisk information below. */
7156 rc2 = vdThreadStartWrite(pDisk);
7157 AssertRC(rc2);
7158 fLockWrite = true;
7159
7160 /** @todo optionally check UUIDs */
7161
7162 /* Re-check state, as the lock wasn't held and another image
7163 * creation call could have been done by another thread. */
7164 AssertMsgStmt(pDisk->cImages == 0,
7165 ("Create base image cannot be done with other images open\n"),
7166 rc = VERR_VD_INVALID_STATE);
7167 }
7168
7169 if (RT_SUCCESS(rc))
7170 {
7171 /* Cache disk information. */
7172 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
7173
7174 /* Cache PCHS geometry. */
7175 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
7176 &pDisk->PCHSGeometry);
7177 if (RT_FAILURE(rc2))
7178 {
7179 pDisk->PCHSGeometry.cCylinders = 0;
7180 pDisk->PCHSGeometry.cHeads = 0;
7181 pDisk->PCHSGeometry.cSectors = 0;
7182 }
7183 else
7184 {
7185 /* Make sure the CHS geometry is properly clipped. */
7186 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
7187 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
7188 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
7189 }
7190
7191 /* Cache LCHS geometry. */
7192 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
7193 &pDisk->LCHSGeometry);
7194 if (RT_FAILURE(rc2))
7195 {
7196 pDisk->LCHSGeometry.cCylinders = 0;
7197 pDisk->LCHSGeometry.cHeads = 0;
7198 pDisk->LCHSGeometry.cSectors = 0;
7199 }
7200 else
7201 {
7202 /* Make sure the CHS geometry is properly clipped. */
7203 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
7204 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
7205 }
7206
7207 /* Image successfully opened, make it the last image. */
7208 vdAddImageToList(pDisk, pImage);
7209 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
7210 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
7211 }
7212 else
7213 {
7214 /* Error detected, image may or may not be opened. Close and delete
7215 * image if it was opened. */
7216 if (pImage->pBackendData)
7217 {
7218 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, true);
7219 AssertRC(rc2);
7220 pImage->pBackendData = NULL;
7221 }
7222 }
7223 } while (0);
7224
7225 if (RT_UNLIKELY(fLockWrite))
7226 {
7227 rc2 = vdThreadFinishWrite(pDisk);
7228 AssertRC(rc2);
7229 }
7230 else if (RT_UNLIKELY(fLockRead))
7231 {
7232 rc2 = vdThreadFinishRead(pDisk);
7233 AssertRC(rc2);
7234 }
7235
7236 if (RT_FAILURE(rc))
7237 {
7238 if (pImage)
7239 {
7240 if (pImage->pszFilename)
7241 RTStrFree(pImage->pszFilename);
7242 RTMemFree(pImage);
7243 }
7244 }
7245
7246 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7247 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7248
7249 LogFlowFunc(("returns %Rrc\n", rc));
7250 return rc;
7251}
7252
7253/**
7254 * Creates and opens a new differencing image file in HDD container.
7255 * See comments for VDOpen function about differencing images.
7256 *
7257 * @returns VBox status code.
7258 * @param pDisk Pointer to HDD container.
7259 * @param pszBackend Name of the image file backend to use.
7260 * @param pszFilename Name of the differencing image file to create.
7261 * @param uImageFlags Flags specifying special image features.
7262 * @param pszComment Pointer to image comment. NULL is ok.
7263 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
7264 * @param pParentUuid New parent UUID of the image. If NULL, the UUID is queried automatically.
7265 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
7266 * @param pVDIfsImage Pointer to the per-image VD interface list.
7267 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7268 */
7269VBOXDDU_DECL(int) VDCreateDiff(PVBOXHDD pDisk, const char *pszBackend,
7270 const char *pszFilename, unsigned uImageFlags,
7271 const char *pszComment, PCRTUUID pUuid,
7272 PCRTUUID pParentUuid, unsigned uOpenFlags,
7273 PVDINTERFACE pVDIfsImage,
7274 PVDINTERFACE pVDIfsOperation)
7275{
7276 int rc = VINF_SUCCESS;
7277 int rc2;
7278 bool fLockWrite = false, fLockRead = false;
7279 PVDIMAGE pImage = NULL;
7280 RTUUID uuid;
7281
7282 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" uImageFlags=%#x pszComment=\"%s\" Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
7283 pDisk, pszBackend, pszFilename, uImageFlags, pszComment, pUuid, uOpenFlags, pVDIfsImage, pVDIfsOperation));
7284
7285 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7286
7287 do
7288 {
7289 /* sanity check */
7290 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7291 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7292
7293 /* Check arguments. */
7294 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7295 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7296 rc = VERR_INVALID_PARAMETER);
7297 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7298 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7299 rc = VERR_INVALID_PARAMETER);
7300 AssertMsgBreakStmt((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0,
7301 ("uImageFlags=%#x\n", uImageFlags),
7302 rc = VERR_INVALID_PARAMETER);
7303 /* The UUID may be NULL. */
7304 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7305 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7306 rc = VERR_INVALID_PARAMETER);
7307 /* The parent UUID may be NULL. */
7308 AssertMsgBreakStmt(pParentUuid == NULL || VALID_PTR(pParentUuid),
7309 ("pParentUuid=%#p ParentUUID=%RTuuid\n", pParentUuid, pParentUuid),
7310 rc = VERR_INVALID_PARAMETER);
7311 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7312 ("uOpenFlags=%#x\n", uOpenFlags),
7313 rc = VERR_INVALID_PARAMETER);
7314
7315 /* Check state. Needs a temporary read lock. Holding the write lock
7316 * all the time would be blocking other activities for too long. */
7317 rc2 = vdThreadStartRead(pDisk);
7318 AssertRC(rc2);
7319 fLockRead = true;
7320 AssertMsgBreakStmt(pDisk->cImages != 0,
7321 ("Create diff image cannot be done without other images open\n"),
7322 rc = VERR_VD_INVALID_STATE);
7323 rc2 = vdThreadFinishRead(pDisk);
7324 AssertRC(rc2);
7325 fLockRead = false;
7326
7327 /*
7328 * Destroy the current discard state first which might still have pending blocks
7329 * for the currently opened image which will be switched to readonly mode.
7330 */
7331 /* Lock disk for writing, as we modify pDisk information below. */
7332 rc2 = vdThreadStartWrite(pDisk);
7333 AssertRC(rc2);
7334 fLockWrite = true;
7335 rc = vdDiscardStateDestroy(pDisk);
7336 if (RT_FAILURE(rc))
7337 break;
7338 rc2 = vdThreadFinishWrite(pDisk);
7339 AssertRC(rc2);
7340 fLockWrite = false;
7341
7342 /* Set up image descriptor. */
7343 pImage = (PVDIMAGE)RTMemAllocZ(sizeof(VDIMAGE));
7344 if (!pImage)
7345 {
7346 rc = VERR_NO_MEMORY;
7347 break;
7348 }
7349 pImage->pszFilename = RTStrDup(pszFilename);
7350 if (!pImage->pszFilename)
7351 {
7352 rc = VERR_NO_MEMORY;
7353 break;
7354 }
7355
7356 rc = vdFindBackend(pszBackend, &pImage->Backend);
7357 if (RT_FAILURE(rc))
7358 break;
7359 if (!pImage->Backend)
7360 {
7361 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7362 N_("VD: unknown backend name '%s'"), pszBackend);
7363 break;
7364 }
7365 if ( !(pImage->Backend->uBackendCaps & VD_CAP_DIFF)
7366 || !(pImage->Backend->uBackendCaps & ( VD_CAP_CREATE_FIXED
7367 | VD_CAP_CREATE_DYNAMIC)))
7368 {
7369 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7370 N_("VD: backend '%s' cannot create diff images"), pszBackend);
7371 break;
7372 }
7373
7374 pImage->VDIo.pDisk = pDisk;
7375 pImage->pVDIfsImage = pVDIfsImage;
7376
7377 /* Set up the I/O interface. */
7378 pImage->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsImage);
7379 if (!pImage->VDIo.pInterfaceIo)
7380 {
7381 vdIfIoFallbackCallbacksSetup(&pImage->VDIo.VDIfIo);
7382 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7383 pDisk, sizeof(VDINTERFACEIO), &pVDIfsImage);
7384 pImage->VDIo.pInterfaceIo = &pImage->VDIo.VDIfIo;
7385 }
7386
7387 /* Set up the internal I/O interface. */
7388 AssertBreakStmt(!VDIfIoIntGet(pVDIfsImage), rc = VERR_INVALID_PARAMETER);
7389 vdIfIoIntCallbacksSetup(&pImage->VDIo.VDIfIoInt);
7390 rc = VDInterfaceAdd(&pImage->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7391 &pImage->VDIo, sizeof(VDINTERFACEIOINT), &pImage->pVDIfsImage);
7392 AssertRC(rc);
7393
7394 /* Create UUID if the caller didn't specify one. */
7395 if (!pUuid)
7396 {
7397 rc = RTUuidCreate(&uuid);
7398 if (RT_FAILURE(rc))
7399 {
7400 rc = vdError(pDisk, rc, RT_SRC_POS,
7401 N_("VD: cannot generate UUID for image '%s'"),
7402 pszFilename);
7403 break;
7404 }
7405 pUuid = &uuid;
7406 }
7407
7408 pImage->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7409 pImage->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7410 uImageFlags |= VD_IMAGE_FLAGS_DIFF;
7411 rc = pImage->Backend->pfnCreate(pImage->pszFilename, pDisk->cbSize,
7412 uImageFlags | VD_IMAGE_FLAGS_DIFF,
7413 pszComment, &pDisk->PCHSGeometry,
7414 &pDisk->LCHSGeometry, pUuid,
7415 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7416 0, 99,
7417 pDisk->pVDIfsDisk,
7418 pImage->pVDIfsImage,
7419 pVDIfsOperation,
7420 pDisk->enmType,
7421 &pImage->pBackendData);
7422
7423 if (RT_SUCCESS(rc))
7424 {
7425 pImage->VDIo.pBackendData = pImage->pBackendData;
7426 pImage->uImageFlags = uImageFlags;
7427
7428 /* Lock disk for writing, as we modify pDisk information below. */
7429 rc2 = vdThreadStartWrite(pDisk);
7430 AssertRC(rc2);
7431 fLockWrite = true;
7432
7433 /* Switch previous image to read-only mode. */
7434 unsigned uOpenFlagsPrevImg;
7435 uOpenFlagsPrevImg = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
7436 if (!(uOpenFlagsPrevImg & VD_OPEN_FLAGS_READONLY))
7437 {
7438 uOpenFlagsPrevImg |= VD_OPEN_FLAGS_READONLY;
7439 rc = pDisk->pLast->Backend->pfnSetOpenFlags(pDisk->pLast->pBackendData, uOpenFlagsPrevImg);
7440 }
7441
7442 /** @todo optionally check UUIDs */
7443
7444 /* Re-check state, as the lock wasn't held and another image
7445 * creation call could have been done by another thread. */
7446 AssertMsgStmt(pDisk->cImages != 0,
7447 ("Create diff image cannot be done without other images open\n"),
7448 rc = VERR_VD_INVALID_STATE);
7449 }
7450
7451 if (RT_SUCCESS(rc))
7452 {
7453 RTUUID Uuid;
7454 RTTIMESPEC ts;
7455
7456 if (pParentUuid && !RTUuidIsNull(pParentUuid))
7457 {
7458 Uuid = *pParentUuid;
7459 pImage->Backend->pfnSetParentUuid(pImage->pBackendData, &Uuid);
7460 }
7461 else
7462 {
7463 rc2 = pDisk->pLast->Backend->pfnGetUuid(pDisk->pLast->pBackendData,
7464 &Uuid);
7465 if (RT_SUCCESS(rc2))
7466 pImage->Backend->pfnSetParentUuid(pImage->pBackendData, &Uuid);
7467 }
7468 rc2 = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
7469 &Uuid);
7470 if (RT_SUCCESS(rc2))
7471 pImage->Backend->pfnSetParentModificationUuid(pImage->pBackendData,
7472 &Uuid);
7473 if (pDisk->pLast->Backend->pfnGetTimestamp)
7474 rc2 = pDisk->pLast->Backend->pfnGetTimestamp(pDisk->pLast->pBackendData,
7475 &ts);
7476 else
7477 rc2 = VERR_NOT_IMPLEMENTED;
7478 if (RT_SUCCESS(rc2) && pImage->Backend->pfnSetParentTimestamp)
7479 pImage->Backend->pfnSetParentTimestamp(pImage->pBackendData, &ts);
7480
7481 if (pImage->Backend->pfnSetParentFilename)
7482 rc2 = pImage->Backend->pfnSetParentFilename(pImage->pBackendData, pDisk->pLast->pszFilename);
7483 }
7484
7485 if (RT_SUCCESS(rc))
7486 {
7487 /* Image successfully opened, make it the last image. */
7488 vdAddImageToList(pDisk, pImage);
7489 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
7490 pDisk->uModified = VD_IMAGE_MODIFIED_FIRST;
7491 }
7492 else
7493 {
7494 /* Error detected, but image opened. Close and delete image. */
7495 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, true);
7496 AssertRC(rc2);
7497 pImage->pBackendData = NULL;
7498 }
7499 } while (0);
7500
7501 if (RT_UNLIKELY(fLockWrite))
7502 {
7503 rc2 = vdThreadFinishWrite(pDisk);
7504 AssertRC(rc2);
7505 }
7506 else if (RT_UNLIKELY(fLockRead))
7507 {
7508 rc2 = vdThreadFinishRead(pDisk);
7509 AssertRC(rc2);
7510 }
7511
7512 if (RT_FAILURE(rc))
7513 {
7514 if (pImage)
7515 {
7516 if (pImage->pszFilename)
7517 RTStrFree(pImage->pszFilename);
7518 RTMemFree(pImage);
7519 }
7520 }
7521
7522 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7523 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7524
7525 LogFlowFunc(("returns %Rrc\n", rc));
7526 return rc;
7527}
7528
7529
7530/**
7531 * Creates and opens new cache image file in HDD container.
7532 *
7533 * @return VBox status code.
7534 * @param pDisk Name of the cache file backend to use (case insensitive).
7535 * @param pszFilename Name of the differencing cache file to create.
7536 * @param cbSize Maximum size of the cache.
7537 * @param uImageFlags Flags specifying special cache features.
7538 * @param pszComment Pointer to image comment. NULL is ok.
7539 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
7540 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
7541 * @param pVDIfsCache Pointer to the per-cache VD interface list.
7542 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7543 */
7544VBOXDDU_DECL(int) VDCreateCache(PVBOXHDD pDisk, const char *pszBackend,
7545 const char *pszFilename, uint64_t cbSize,
7546 unsigned uImageFlags, const char *pszComment,
7547 PCRTUUID pUuid, unsigned uOpenFlags,
7548 PVDINTERFACE pVDIfsCache, PVDINTERFACE pVDIfsOperation)
7549{
7550 int rc = VINF_SUCCESS;
7551 int rc2;
7552 bool fLockWrite = false, fLockRead = false;
7553 PVDCACHE pCache = NULL;
7554 RTUUID uuid;
7555
7556 LogFlowFunc(("pDisk=%#p pszBackend=\"%s\" pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" Uuid=%RTuuid uOpenFlags=%#x pVDIfsImage=%#p pVDIfsOperation=%#p\n",
7557 pDisk, pszBackend, pszFilename, cbSize, uImageFlags, pszComment, pUuid, uOpenFlags, pVDIfsCache, pVDIfsOperation));
7558
7559 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7560
7561 do
7562 {
7563 /* sanity check */
7564 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7565 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7566
7567 /* Check arguments. */
7568 AssertMsgBreakStmt(VALID_PTR(pszBackend) && *pszBackend,
7569 ("pszBackend=%#p \"%s\"\n", pszBackend, pszBackend),
7570 rc = VERR_INVALID_PARAMETER);
7571 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
7572 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
7573 rc = VERR_INVALID_PARAMETER);
7574 AssertMsgBreakStmt(cbSize,
7575 ("cbSize=%llu\n", cbSize),
7576 rc = VERR_INVALID_PARAMETER);
7577 AssertMsgBreakStmt((uImageFlags & ~VD_IMAGE_FLAGS_MASK) == 0,
7578 ("uImageFlags=%#x\n", uImageFlags),
7579 rc = VERR_INVALID_PARAMETER);
7580 /* The UUID may be NULL. */
7581 AssertMsgBreakStmt(pUuid == NULL || VALID_PTR(pUuid),
7582 ("pUuid=%#p UUID=%RTuuid\n", pUuid, pUuid),
7583 rc = VERR_INVALID_PARAMETER);
7584 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
7585 ("uOpenFlags=%#x\n", uOpenFlags),
7586 rc = VERR_INVALID_PARAMETER);
7587
7588 /* Check state. Needs a temporary read lock. Holding the write lock
7589 * all the time would be blocking other activities for too long. */
7590 rc2 = vdThreadStartRead(pDisk);
7591 AssertRC(rc2);
7592 fLockRead = true;
7593 AssertMsgBreakStmt(!pDisk->pCache,
7594 ("Create cache image cannot be done with a cache already attached\n"),
7595 rc = VERR_VD_CACHE_ALREADY_EXISTS);
7596 rc2 = vdThreadFinishRead(pDisk);
7597 AssertRC(rc2);
7598 fLockRead = false;
7599
7600 /* Set up image descriptor. */
7601 pCache = (PVDCACHE)RTMemAllocZ(sizeof(VDCACHE));
7602 if (!pCache)
7603 {
7604 rc = VERR_NO_MEMORY;
7605 break;
7606 }
7607 pCache->pszFilename = RTStrDup(pszFilename);
7608 if (!pCache->pszFilename)
7609 {
7610 rc = VERR_NO_MEMORY;
7611 break;
7612 }
7613
7614 rc = vdFindCacheBackend(pszBackend, &pCache->Backend);
7615 if (RT_FAILURE(rc))
7616 break;
7617 if (!pCache->Backend)
7618 {
7619 rc = vdError(pDisk, VERR_INVALID_PARAMETER, RT_SRC_POS,
7620 N_("VD: unknown backend name '%s'"), pszBackend);
7621 break;
7622 }
7623
7624 pCache->VDIo.pDisk = pDisk;
7625 pCache->pVDIfsCache = pVDIfsCache;
7626
7627 /* Set up the I/O interface. */
7628 pCache->VDIo.pInterfaceIo = VDIfIoGet(pVDIfsCache);
7629 if (!pCache->VDIo.pInterfaceIo)
7630 {
7631 vdIfIoFallbackCallbacksSetup(&pCache->VDIo.VDIfIo);
7632 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIo.Core, "VD_IO", VDINTERFACETYPE_IO,
7633 pDisk, sizeof(VDINTERFACEIO), &pVDIfsCache);
7634 pCache->VDIo.pInterfaceIo = &pCache->VDIo.VDIfIo;
7635 }
7636
7637 /* Set up the internal I/O interface. */
7638 AssertBreakStmt(!VDIfIoIntGet(pVDIfsCache), rc = VERR_INVALID_PARAMETER);
7639 vdIfIoIntCallbacksSetup(&pCache->VDIo.VDIfIoInt);
7640 rc = VDInterfaceAdd(&pCache->VDIo.VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
7641 &pCache->VDIo, sizeof(VDINTERFACEIOINT), &pCache->pVDIfsCache);
7642 AssertRC(rc);
7643
7644 /* Create UUID if the caller didn't specify one. */
7645 if (!pUuid)
7646 {
7647 rc = RTUuidCreate(&uuid);
7648 if (RT_FAILURE(rc))
7649 {
7650 rc = vdError(pDisk, rc, RT_SRC_POS,
7651 N_("VD: cannot generate UUID for image '%s'"),
7652 pszFilename);
7653 break;
7654 }
7655 pUuid = &uuid;
7656 }
7657
7658 pCache->uOpenFlags = uOpenFlags & VD_OPEN_FLAGS_HONOR_SAME;
7659 pCache->VDIo.fIgnoreFlush = (uOpenFlags & VD_OPEN_FLAGS_IGNORE_FLUSH) != 0;
7660 rc = pCache->Backend->pfnCreate(pCache->pszFilename, cbSize,
7661 uImageFlags,
7662 pszComment, pUuid,
7663 uOpenFlags & ~VD_OPEN_FLAGS_HONOR_SAME,
7664 0, 99,
7665 pDisk->pVDIfsDisk,
7666 pCache->pVDIfsCache,
7667 pVDIfsOperation,
7668 &pCache->pBackendData);
7669
7670 if (RT_SUCCESS(rc))
7671 {
7672 /* Lock disk for writing, as we modify pDisk information below. */
7673 rc2 = vdThreadStartWrite(pDisk);
7674 AssertRC(rc2);
7675 fLockWrite = true;
7676
7677 pCache->VDIo.pBackendData = pCache->pBackendData;
7678
7679 /* Re-check state, as the lock wasn't held and another image
7680 * creation call could have been done by another thread. */
7681 AssertMsgStmt(!pDisk->pCache,
7682 ("Create cache image cannot be done with another cache open\n"),
7683 rc = VERR_VD_CACHE_ALREADY_EXISTS);
7684 }
7685
7686 if ( RT_SUCCESS(rc)
7687 && pDisk->pLast)
7688 {
7689 RTUUID UuidModification;
7690
7691 /* Set same modification Uuid as the last image. */
7692 rc = pDisk->pLast->Backend->pfnGetModificationUuid(pDisk->pLast->pBackendData,
7693 &UuidModification);
7694 if (RT_SUCCESS(rc))
7695 {
7696 rc = pCache->Backend->pfnSetModificationUuid(pCache->pBackendData,
7697 &UuidModification);
7698 }
7699
7700 if (rc == VERR_NOT_SUPPORTED)
7701 rc = VINF_SUCCESS;
7702 }
7703
7704 if (RT_SUCCESS(rc))
7705 {
7706 /* Cache successfully created. */
7707 pDisk->pCache = pCache;
7708 }
7709 else
7710 {
7711 /* Error detected, but image opened. Close and delete image. */
7712 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, true);
7713 AssertRC(rc2);
7714 pCache->pBackendData = NULL;
7715 }
7716 } while (0);
7717
7718 if (RT_UNLIKELY(fLockWrite))
7719 {
7720 rc2 = vdThreadFinishWrite(pDisk);
7721 AssertRC(rc2);
7722 }
7723 else if (RT_UNLIKELY(fLockRead))
7724 {
7725 rc2 = vdThreadFinishRead(pDisk);
7726 AssertRC(rc2);
7727 }
7728
7729 if (RT_FAILURE(rc))
7730 {
7731 if (pCache)
7732 {
7733 if (pCache->pszFilename)
7734 RTStrFree(pCache->pszFilename);
7735 RTMemFree(pCache);
7736 }
7737 }
7738
7739 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
7740 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
7741
7742 LogFlowFunc(("returns %Rrc\n", rc));
7743 return rc;
7744}
7745
7746/**
7747 * Merges two images (not necessarily with direct parent/child relationship).
7748 * As a side effect the source image and potentially the other images which
7749 * are also merged to the destination are deleted from both the disk and the
7750 * images in the HDD container.
7751 *
7752 * @returns VBox status code.
7753 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
7754 * @param pDisk Pointer to HDD container.
7755 * @param nImageFrom Name of the image file to merge from.
7756 * @param nImageTo Name of the image file to merge to.
7757 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
7758 */
7759VBOXDDU_DECL(int) VDMerge(PVBOXHDD pDisk, unsigned nImageFrom,
7760 unsigned nImageTo, PVDINTERFACE pVDIfsOperation)
7761{
7762 int rc = VINF_SUCCESS;
7763 int rc2;
7764 bool fLockWrite = false, fLockRead = false;
7765 void *pvBuf = NULL;
7766
7767 LogFlowFunc(("pDisk=%#p nImageFrom=%u nImageTo=%u pVDIfsOperation=%#p\n",
7768 pDisk, nImageFrom, nImageTo, pVDIfsOperation));
7769
7770 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7771
7772 do
7773 {
7774 /* sanity check */
7775 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
7776 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
7777
7778 /* For simplicity reasons lock for writing as the image reopen below
7779 * might need it. After all the reopen is usually needed. */
7780 rc2 = vdThreadStartWrite(pDisk);
7781 AssertRC(rc2);
7782 fLockWrite = true;
7783 PVDIMAGE pImageFrom = vdGetImageByNumber(pDisk, nImageFrom);
7784 PVDIMAGE pImageTo = vdGetImageByNumber(pDisk, nImageTo);
7785 if (!pImageFrom || !pImageTo)
7786 {
7787 rc = VERR_VD_IMAGE_NOT_FOUND;
7788 break;
7789 }
7790 AssertBreakStmt(pImageFrom != pImageTo, rc = VERR_INVALID_PARAMETER);
7791
7792 /* Make sure destination image is writable. */
7793 unsigned uOpenFlags = pImageTo->Backend->pfnGetOpenFlags(pImageTo->pBackendData);
7794 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7795 {
7796 /*
7797 * Clear skip consistency checks because the image is made writable now and
7798 * skipping consistency checks is only possible for readonly images.
7799 */
7800 uOpenFlags &= ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS);
7801 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
7802 uOpenFlags);
7803 if (RT_FAILURE(rc))
7804 break;
7805 }
7806
7807 /* Get size of destination image. */
7808 uint64_t cbSize = pImageTo->Backend->pfnGetSize(pImageTo->pBackendData);
7809 rc2 = vdThreadFinishWrite(pDisk);
7810 AssertRC(rc2);
7811 fLockWrite = false;
7812
7813 /* Allocate tmp buffer. */
7814 pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
7815 if (!pvBuf)
7816 {
7817 rc = VERR_NO_MEMORY;
7818 break;
7819 }
7820
7821 /* Merging is done directly on the images itself. This potentially
7822 * causes trouble if the disk is full in the middle of operation. */
7823 if (nImageFrom < nImageTo)
7824 {
7825 /* Merge parent state into child. This means writing all not
7826 * allocated blocks in the destination image which are allocated in
7827 * the images to be merged. */
7828 uint64_t uOffset = 0;
7829 uint64_t cbRemaining = cbSize;
7830 do
7831 {
7832 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
7833 RTSGSEG SegmentBuf;
7834 RTSGBUF SgBuf;
7835 VDIOCTX IoCtx;
7836
7837 SegmentBuf.pvSeg = pvBuf;
7838 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
7839 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
7840 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
7841 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
7842
7843 /* Need to hold the write lock during a read-write operation. */
7844 rc2 = vdThreadStartWrite(pDisk);
7845 AssertRC(rc2);
7846 fLockWrite = true;
7847
7848 rc = pImageTo->Backend->pfnRead(pImageTo->pBackendData,
7849 uOffset, cbThisRead,
7850 &IoCtx, &cbThisRead);
7851 if (rc == VERR_VD_BLOCK_FREE)
7852 {
7853 /* Search for image with allocated block. Do not attempt to
7854 * read more than the previous reads marked as valid.
7855 * Otherwise this would return stale data when different
7856 * block sizes are used for the images. */
7857 for (PVDIMAGE pCurrImage = pImageTo->pPrev;
7858 pCurrImage != NULL && pCurrImage != pImageFrom->pPrev && rc == VERR_VD_BLOCK_FREE;
7859 pCurrImage = pCurrImage->pPrev)
7860 {
7861 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
7862 uOffset, cbThisRead,
7863 &IoCtx, &cbThisRead);
7864 }
7865
7866 if (rc != VERR_VD_BLOCK_FREE)
7867 {
7868 if (RT_FAILURE(rc))
7869 break;
7870 /* Updating the cache is required because this might be a live merge. */
7871 rc = vdWriteHelperEx(pDisk, pImageTo, pImageFrom->pPrev,
7872 uOffset, pvBuf, cbThisRead,
7873 VDIOCTX_FLAGS_READ_UPDATE_CACHE, 0);
7874 if (RT_FAILURE(rc))
7875 break;
7876 }
7877 else
7878 rc = VINF_SUCCESS;
7879 }
7880 else if (RT_FAILURE(rc))
7881 break;
7882
7883 rc2 = vdThreadFinishWrite(pDisk);
7884 AssertRC(rc2);
7885 fLockWrite = false;
7886
7887 uOffset += cbThisRead;
7888 cbRemaining -= cbThisRead;
7889
7890 if (pIfProgress && pIfProgress->pfnProgress)
7891 {
7892 /** @todo r=klaus: this can update the progress to the same
7893 * percentage over and over again if the image format makes
7894 * relatively small increments. */
7895 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
7896 uOffset * 99 / cbSize);
7897 if (RT_FAILURE(rc))
7898 break;
7899 }
7900 } while (uOffset < cbSize);
7901 }
7902 else
7903 {
7904 /*
7905 * We may need to update the parent uuid of the child coming after
7906 * the last image to be merged. We have to reopen it read/write.
7907 *
7908 * This is done before we do the actual merge to prevent an
7909 * inconsistent chain if the mode change fails for some reason.
7910 */
7911 if (pImageFrom->pNext)
7912 {
7913 PVDIMAGE pImageChild = pImageFrom->pNext;
7914
7915 /* Take the write lock. */
7916 rc2 = vdThreadStartWrite(pDisk);
7917 AssertRC(rc2);
7918 fLockWrite = true;
7919
7920 /* We need to open the image in read/write mode. */
7921 uOpenFlags = pImageChild->Backend->pfnGetOpenFlags(pImageChild->pBackendData);
7922
7923 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7924 {
7925 uOpenFlags &= ~VD_OPEN_FLAGS_READONLY;
7926 rc = pImageChild->Backend->pfnSetOpenFlags(pImageChild->pBackendData,
7927 uOpenFlags);
7928 if (RT_FAILURE(rc))
7929 break;
7930 }
7931
7932 rc2 = vdThreadFinishWrite(pDisk);
7933 AssertRC(rc2);
7934 fLockWrite = false;
7935 }
7936
7937 /* If the merge is from the last image we have to relay all writes
7938 * to the merge destination as well, so that concurrent writes
7939 * (in case of a live merge) are handled correctly. */
7940 if (!pImageFrom->pNext)
7941 {
7942 /* Take the write lock. */
7943 rc2 = vdThreadStartWrite(pDisk);
7944 AssertRC(rc2);
7945 fLockWrite = true;
7946
7947 pDisk->pImageRelay = pImageTo;
7948
7949 rc2 = vdThreadFinishWrite(pDisk);
7950 AssertRC(rc2);
7951 fLockWrite = false;
7952 }
7953
7954 /* Merge child state into parent. This means writing all blocks
7955 * which are allocated in the image up to the source image to the
7956 * destination image. */
7957 uint64_t uOffset = 0;
7958 uint64_t cbRemaining = cbSize;
7959 do
7960 {
7961 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
7962 RTSGSEG SegmentBuf;
7963 RTSGBUF SgBuf;
7964 VDIOCTX IoCtx;
7965
7966 rc = VERR_VD_BLOCK_FREE;
7967
7968 SegmentBuf.pvSeg = pvBuf;
7969 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
7970 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
7971 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
7972 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
7973
7974 /* Need to hold the write lock during a read-write operation. */
7975 rc2 = vdThreadStartWrite(pDisk);
7976 AssertRC(rc2);
7977 fLockWrite = true;
7978
7979 /* Search for image with allocated block. Do not attempt to
7980 * read more than the previous reads marked as valid. Otherwise
7981 * this would return stale data when different block sizes are
7982 * used for the images. */
7983 for (PVDIMAGE pCurrImage = pImageFrom;
7984 pCurrImage != NULL && pCurrImage != pImageTo && rc == VERR_VD_BLOCK_FREE;
7985 pCurrImage = pCurrImage->pPrev)
7986 {
7987 rc = pCurrImage->Backend->pfnRead(pCurrImage->pBackendData,
7988 uOffset, cbThisRead,
7989 &IoCtx, &cbThisRead);
7990 }
7991
7992 if (rc != VERR_VD_BLOCK_FREE)
7993 {
7994 if (RT_FAILURE(rc))
7995 break;
7996 rc = vdWriteHelper(pDisk, pImageTo, uOffset, pvBuf,
7997 cbThisRead, VDIOCTX_FLAGS_READ_UPDATE_CACHE);
7998 if (RT_FAILURE(rc))
7999 break;
8000 }
8001 else
8002 rc = VINF_SUCCESS;
8003
8004 rc2 = vdThreadFinishWrite(pDisk);
8005 AssertRC(rc2);
8006 fLockWrite = false;
8007
8008 uOffset += cbThisRead;
8009 cbRemaining -= cbThisRead;
8010
8011 if (pIfProgress && pIfProgress->pfnProgress)
8012 {
8013 /** @todo r=klaus: this can update the progress to the same
8014 * percentage over and over again if the image format makes
8015 * relatively small increments. */
8016 rc = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
8017 uOffset * 99 / cbSize);
8018 if (RT_FAILURE(rc))
8019 break;
8020 }
8021 } while (uOffset < cbSize);
8022
8023 /* In case we set up a "write proxy" image above we must clear
8024 * this again now to prevent stray writes. Failure or not. */
8025 if (!pImageFrom->pNext)
8026 {
8027 /* Take the write lock. */
8028 rc2 = vdThreadStartWrite(pDisk);
8029 AssertRC(rc2);
8030 fLockWrite = true;
8031
8032 pDisk->pImageRelay = NULL;
8033
8034 rc2 = vdThreadFinishWrite(pDisk);
8035 AssertRC(rc2);
8036 fLockWrite = false;
8037 }
8038 }
8039
8040 /*
8041 * Leave in case of an error to avoid corrupted data in the image chain
8042 * (includes cancelling the operation by the user).
8043 */
8044 if (RT_FAILURE(rc))
8045 break;
8046
8047 /* Need to hold the write lock while finishing the merge. */
8048 rc2 = vdThreadStartWrite(pDisk);
8049 AssertRC(rc2);
8050 fLockWrite = true;
8051
8052 /* Update parent UUID so that image chain is consistent.
8053 * The two attempts work around the problem that some backends
8054 * (e.g. iSCSI) do not support UUIDs, so we exploit the fact that
8055 * so far there can only be one such image in the chain. */
8056 /** @todo needs a better long-term solution, passing the UUID
8057 * knowledge from the caller or some such */
8058 RTUUID Uuid;
8059 PVDIMAGE pImageChild = NULL;
8060 if (nImageFrom < nImageTo)
8061 {
8062 if (pImageFrom->pPrev)
8063 {
8064 /* plan A: ask the parent itself for its UUID */
8065 rc = pImageFrom->pPrev->Backend->pfnGetUuid(pImageFrom->pPrev->pBackendData,
8066 &Uuid);
8067 if (RT_FAILURE(rc))
8068 {
8069 /* plan B: ask the child of the parent for parent UUID */
8070 rc = pImageFrom->Backend->pfnGetParentUuid(pImageFrom->pBackendData,
8071 &Uuid);
8072 }
8073 AssertRC(rc);
8074 }
8075 else
8076 RTUuidClear(&Uuid);
8077 rc = pImageTo->Backend->pfnSetParentUuid(pImageTo->pBackendData,
8078 &Uuid);
8079 AssertRC(rc);
8080 }
8081 else
8082 {
8083 /* Update the parent uuid of the child of the last merged image. */
8084 if (pImageFrom->pNext)
8085 {
8086 /* plan A: ask the parent itself for its UUID */
8087 rc = pImageTo->Backend->pfnGetUuid(pImageTo->pBackendData,
8088 &Uuid);
8089 if (RT_FAILURE(rc))
8090 {
8091 /* plan B: ask the child of the parent for parent UUID */
8092 rc = pImageTo->pNext->Backend->pfnGetParentUuid(pImageTo->pNext->pBackendData,
8093 &Uuid);
8094 }
8095 AssertRC(rc);
8096
8097 rc = pImageFrom->Backend->pfnSetParentUuid(pImageFrom->pNext->pBackendData,
8098 &Uuid);
8099 AssertRC(rc);
8100
8101 pImageChild = pImageFrom->pNext;
8102 }
8103 }
8104
8105 /* Delete the no longer needed images. */
8106 PVDIMAGE pImg = pImageFrom, pTmp;
8107 while (pImg != pImageTo)
8108 {
8109 if (nImageFrom < nImageTo)
8110 pTmp = pImg->pNext;
8111 else
8112 pTmp = pImg->pPrev;
8113 vdRemoveImageFromList(pDisk, pImg);
8114 pImg->Backend->pfnClose(pImg->pBackendData, true);
8115 RTMemFree(pImg->pszFilename);
8116 RTMemFree(pImg);
8117 pImg = pTmp;
8118 }
8119
8120 /* Make sure destination image is back to read only if necessary. */
8121 if (pImageTo != pDisk->pLast)
8122 {
8123 uOpenFlags = pImageTo->Backend->pfnGetOpenFlags(pImageTo->pBackendData);
8124 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
8125 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
8126 uOpenFlags);
8127 if (RT_FAILURE(rc))
8128 break;
8129 }
8130
8131 /*
8132 * Make sure the child is readonly
8133 * for the child -> parent merge direction
8134 * if necessary.
8135 */
8136 if ( nImageFrom > nImageTo
8137 && pImageChild
8138 && pImageChild != pDisk->pLast)
8139 {
8140 uOpenFlags = pImageChild->Backend->pfnGetOpenFlags(pImageChild->pBackendData);
8141 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
8142 rc = pImageChild->Backend->pfnSetOpenFlags(pImageChild->pBackendData,
8143 uOpenFlags);
8144 if (RT_FAILURE(rc))
8145 break;
8146 }
8147 } while (0);
8148
8149 if (RT_UNLIKELY(fLockWrite))
8150 {
8151 rc2 = vdThreadFinishWrite(pDisk);
8152 AssertRC(rc2);
8153 }
8154 else if (RT_UNLIKELY(fLockRead))
8155 {
8156 rc2 = vdThreadFinishRead(pDisk);
8157 AssertRC(rc2);
8158 }
8159
8160 if (pvBuf)
8161 RTMemTmpFree(pvBuf);
8162
8163 if (RT_SUCCESS(rc) && pIfProgress && pIfProgress->pfnProgress)
8164 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8165
8166 LogFlowFunc(("returns %Rrc\n", rc));
8167 return rc;
8168}
8169
8170/**
8171 * Copies an image from one HDD container to another - extended version.
8172 * The copy is opened in the target HDD container.
8173 * It is possible to convert between different image formats, because the
8174 * backend for the destination may be different from the source.
8175 * If both the source and destination reference the same HDD container,
8176 * then the image is moved (by copying/deleting or renaming) to the new location.
8177 * The source container is unchanged if the move operation fails, otherwise
8178 * the image at the new location is opened in the same way as the old one was.
8179 *
8180 * @note The read/write accesses across disks are not synchronized, just the
8181 * accesses to each disk. Once there is a use case which requires a defined
8182 * read/write behavior in this situation this needs to be extended.
8183 *
8184 * @returns VBox status code.
8185 * @retval VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8186 * @param pDiskFrom Pointer to source HDD container.
8187 * @param nImage Image number, counts from 0. 0 is always base image of container.
8188 * @param pDiskTo Pointer to destination HDD container.
8189 * @param pszBackend Name of the image file backend to use (may be NULL to use the same as the source, case insensitive).
8190 * @param pszFilename New name of the image (may be NULL to specify that the
8191 * copy destination is the destination container, or
8192 * if pDiskFrom == pDiskTo, i.e. when moving).
8193 * @param fMoveByRename If true, attempt to perform a move by renaming (if successful the new size is ignored).
8194 * @param cbSize New image size (0 means leave unchanged).
8195 * @param nImageFromSame todo
8196 * @param nImageToSame todo
8197 * @param uImageFlags Flags specifying special destination image features.
8198 * @param pDstUuid New UUID of the destination image. If NULL, a new UUID is created.
8199 * This parameter is used if and only if a true copy is created.
8200 * In all rename/move cases or copy to existing image cases the modification UUIDs are copied over.
8201 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
8202 * Only used if the destination image is created.
8203 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8204 * @param pDstVDIfsImage Pointer to the per-image VD interface list, for the
8205 * destination image.
8206 * @param pDstVDIfsOperation Pointer to the per-operation VD interface list,
8207 * for the destination operation.
8208 */
8209VBOXDDU_DECL(int) VDCopyEx(PVBOXHDD pDiskFrom, unsigned nImage, PVBOXHDD pDiskTo,
8210 const char *pszBackend, const char *pszFilename,
8211 bool fMoveByRename, uint64_t cbSize,
8212 unsigned nImageFromSame, unsigned nImageToSame,
8213 unsigned uImageFlags, PCRTUUID pDstUuid,
8214 unsigned uOpenFlags, PVDINTERFACE pVDIfsOperation,
8215 PVDINTERFACE pDstVDIfsImage,
8216 PVDINTERFACE pDstVDIfsOperation)
8217{
8218 int rc = VINF_SUCCESS;
8219 int rc2;
8220 bool fLockReadFrom = false, fLockWriteFrom = false, fLockWriteTo = false;
8221 PVDIMAGE pImageTo = NULL;
8222
8223 LogFlowFunc(("pDiskFrom=%#p nImage=%u pDiskTo=%#p pszBackend=\"%s\" pszFilename=\"%s\" fMoveByRename=%d cbSize=%llu nImageFromSame=%u nImageToSame=%u uImageFlags=%#x pDstUuid=%#p uOpenFlags=%#x pVDIfsOperation=%#p pDstVDIfsImage=%#p pDstVDIfsOperation=%#p\n",
8224 pDiskFrom, nImage, pDiskTo, pszBackend, pszFilename, fMoveByRename, cbSize, nImageFromSame, nImageToSame, uImageFlags, pDstUuid, uOpenFlags, pVDIfsOperation, pDstVDIfsImage, pDstVDIfsOperation));
8225
8226 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8227 PVDINTERFACEPROGRESS pDstIfProgress = VDIfProgressGet(pDstVDIfsOperation);
8228
8229 do {
8230 /* Check arguments. */
8231 AssertMsgBreakStmt(VALID_PTR(pDiskFrom), ("pDiskFrom=%#p\n", pDiskFrom),
8232 rc = VERR_INVALID_PARAMETER);
8233 AssertMsg(pDiskFrom->u32Signature == VBOXHDDDISK_SIGNATURE,
8234 ("u32Signature=%08x\n", pDiskFrom->u32Signature));
8235
8236 rc2 = vdThreadStartRead(pDiskFrom);
8237 AssertRC(rc2);
8238 fLockReadFrom = true;
8239 PVDIMAGE pImageFrom = vdGetImageByNumber(pDiskFrom, nImage);
8240 AssertPtrBreakStmt(pImageFrom, rc = VERR_VD_IMAGE_NOT_FOUND);
8241 AssertMsgBreakStmt(VALID_PTR(pDiskTo), ("pDiskTo=%#p\n", pDiskTo),
8242 rc = VERR_INVALID_PARAMETER);
8243 AssertMsg(pDiskTo->u32Signature == VBOXHDDDISK_SIGNATURE,
8244 ("u32Signature=%08x\n", pDiskTo->u32Signature));
8245 AssertMsgBreakStmt( (nImageFromSame < nImage || nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN)
8246 && (nImageToSame < pDiskTo->cImages || nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8247 && ( (nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN && nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8248 || (nImageFromSame != VD_IMAGE_CONTENT_UNKNOWN && nImageToSame != VD_IMAGE_CONTENT_UNKNOWN)),
8249 ("nImageFromSame=%u nImageToSame=%u\n", nImageFromSame, nImageToSame),
8250 rc = VERR_INVALID_PARAMETER);
8251
8252 /* Move the image. */
8253 if (pDiskFrom == pDiskTo)
8254 {
8255 /* Rename only works when backends are the same, are file based
8256 * and the rename method is implemented. */
8257 if ( fMoveByRename
8258 && !RTStrICmp(pszBackend, pImageFrom->Backend->pszBackendName)
8259 && pImageFrom->Backend->uBackendCaps & VD_CAP_FILE
8260 && pImageFrom->Backend->pfnRename)
8261 {
8262 rc2 = vdThreadFinishRead(pDiskFrom);
8263 AssertRC(rc2);
8264 fLockReadFrom = false;
8265
8266 rc2 = vdThreadStartWrite(pDiskFrom);
8267 AssertRC(rc2);
8268 fLockWriteFrom = true;
8269 rc = pImageFrom->Backend->pfnRename(pImageFrom->pBackendData, pszFilename ? pszFilename : pImageFrom->pszFilename);
8270 break;
8271 }
8272
8273 /** @todo Moving (including shrinking/growing) of the image is
8274 * requested, but the rename attempt failed or it wasn't possible.
8275 * Must now copy image to temp location. */
8276 AssertReleaseMsgFailed(("VDCopy: moving by copy/delete not implemented\n"));
8277 }
8278
8279 /* pszFilename is allowed to be NULL, as this indicates copy to the existing image. */
8280 AssertMsgBreakStmt(pszFilename == NULL || (VALID_PTR(pszFilename) && *pszFilename),
8281 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
8282 rc = VERR_INVALID_PARAMETER);
8283
8284 uint64_t cbSizeFrom;
8285 cbSizeFrom = pImageFrom->Backend->pfnGetSize(pImageFrom->pBackendData);
8286 if (cbSizeFrom == 0)
8287 {
8288 rc = VERR_VD_VALUE_NOT_FOUND;
8289 break;
8290 }
8291
8292 VDGEOMETRY PCHSGeometryFrom = {0, 0, 0};
8293 VDGEOMETRY LCHSGeometryFrom = {0, 0, 0};
8294 pImageFrom->Backend->pfnGetPCHSGeometry(pImageFrom->pBackendData, &PCHSGeometryFrom);
8295 pImageFrom->Backend->pfnGetLCHSGeometry(pImageFrom->pBackendData, &LCHSGeometryFrom);
8296
8297 RTUUID ImageUuid, ImageModificationUuid;
8298 if (pDiskFrom != pDiskTo)
8299 {
8300 if (pDstUuid)
8301 ImageUuid = *pDstUuid;
8302 else
8303 RTUuidCreate(&ImageUuid);
8304 }
8305 else
8306 {
8307 rc = pImageFrom->Backend->pfnGetUuid(pImageFrom->pBackendData, &ImageUuid);
8308 if (RT_FAILURE(rc))
8309 RTUuidCreate(&ImageUuid);
8310 }
8311 rc = pImageFrom->Backend->pfnGetModificationUuid(pImageFrom->pBackendData, &ImageModificationUuid);
8312 if (RT_FAILURE(rc))
8313 RTUuidClear(&ImageModificationUuid);
8314
8315 char szComment[1024];
8316 rc = pImageFrom->Backend->pfnGetComment(pImageFrom->pBackendData, szComment, sizeof(szComment));
8317 if (RT_FAILURE(rc))
8318 szComment[0] = '\0';
8319 else
8320 szComment[sizeof(szComment) - 1] = '\0';
8321
8322 rc2 = vdThreadFinishRead(pDiskFrom);
8323 AssertRC(rc2);
8324 fLockReadFrom = false;
8325
8326 rc2 = vdThreadStartRead(pDiskTo);
8327 AssertRC(rc2);
8328 unsigned cImagesTo = pDiskTo->cImages;
8329 rc2 = vdThreadFinishRead(pDiskTo);
8330 AssertRC(rc2);
8331
8332 if (pszFilename)
8333 {
8334 if (cbSize == 0)
8335 cbSize = cbSizeFrom;
8336
8337 /* Create destination image with the properties of source image. */
8338 /** @todo replace the VDCreateDiff/VDCreateBase calls by direct
8339 * calls to the backend. Unifies the code and reduces the API
8340 * dependencies. Would also make the synchronization explicit. */
8341 if (cImagesTo > 0)
8342 {
8343 rc = VDCreateDiff(pDiskTo, pszBackend, pszFilename,
8344 uImageFlags, szComment, &ImageUuid,
8345 NULL /* pParentUuid */,
8346 uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
8347 pDstVDIfsImage, NULL);
8348
8349 rc2 = vdThreadStartWrite(pDiskTo);
8350 AssertRC(rc2);
8351 fLockWriteTo = true;
8352 } else {
8353 /** @todo hack to force creation of a fixed image for
8354 * the RAW backend, which can't handle anything else. */
8355 if (!RTStrICmp(pszBackend, "RAW"))
8356 uImageFlags |= VD_IMAGE_FLAGS_FIXED;
8357
8358 vdFixupPCHSGeometry(&PCHSGeometryFrom, cbSize);
8359 vdFixupLCHSGeometry(&LCHSGeometryFrom, cbSize);
8360
8361 rc = VDCreateBase(pDiskTo, pszBackend, pszFilename, cbSize,
8362 uImageFlags, szComment,
8363 &PCHSGeometryFrom, &LCHSGeometryFrom,
8364 NULL, uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
8365 pDstVDIfsImage, NULL);
8366
8367 rc2 = vdThreadStartWrite(pDiskTo);
8368 AssertRC(rc2);
8369 fLockWriteTo = true;
8370
8371 if (RT_SUCCESS(rc) && !RTUuidIsNull(&ImageUuid))
8372 pDiskTo->pLast->Backend->pfnSetUuid(pDiskTo->pLast->pBackendData, &ImageUuid);
8373 }
8374 if (RT_FAILURE(rc))
8375 break;
8376
8377 pImageTo = pDiskTo->pLast;
8378 AssertPtrBreakStmt(pImageTo, rc = VERR_VD_IMAGE_NOT_FOUND);
8379
8380 cbSize = RT_MIN(cbSize, cbSizeFrom);
8381 }
8382 else
8383 {
8384 pImageTo = pDiskTo->pLast;
8385 AssertPtrBreakStmt(pImageTo, rc = VERR_VD_IMAGE_NOT_FOUND);
8386
8387 uint64_t cbSizeTo;
8388 cbSizeTo = pImageTo->Backend->pfnGetSize(pImageTo->pBackendData);
8389 if (cbSizeTo == 0)
8390 {
8391 rc = VERR_VD_VALUE_NOT_FOUND;
8392 break;
8393 }
8394
8395 if (cbSize == 0)
8396 cbSize = RT_MIN(cbSizeFrom, cbSizeTo);
8397
8398 vdFixupPCHSGeometry(&PCHSGeometryFrom, cbSize);
8399 vdFixupLCHSGeometry(&LCHSGeometryFrom, cbSize);
8400
8401 /* Update the geometry in the destination image. */
8402 pImageTo->Backend->pfnSetPCHSGeometry(pImageTo->pBackendData, &PCHSGeometryFrom);
8403 pImageTo->Backend->pfnSetLCHSGeometry(pImageTo->pBackendData, &LCHSGeometryFrom);
8404 }
8405
8406 rc2 = vdThreadFinishWrite(pDiskTo);
8407 AssertRC(rc2);
8408 fLockWriteTo = false;
8409
8410 /* Whether we can take the optimized copy path (false) or not.
8411 * Don't optimize if the image existed or if it is a child image. */
8412 bool fSuppressRedundantIo = ( !(pszFilename == NULL || cImagesTo > 0)
8413 || (nImageToSame != VD_IMAGE_CONTENT_UNKNOWN));
8414 unsigned cImagesFromReadBack, cImagesToReadBack;
8415
8416 if (nImageFromSame == VD_IMAGE_CONTENT_UNKNOWN)
8417 cImagesFromReadBack = 0;
8418 else
8419 {
8420 if (nImage == VD_LAST_IMAGE)
8421 cImagesFromReadBack = pDiskFrom->cImages - nImageFromSame - 1;
8422 else
8423 cImagesFromReadBack = nImage - nImageFromSame;
8424 }
8425
8426 if (nImageToSame == VD_IMAGE_CONTENT_UNKNOWN)
8427 cImagesToReadBack = 0;
8428 else
8429 cImagesToReadBack = pDiskTo->cImages - nImageToSame - 1;
8430
8431 /* Copy the data. */
8432 rc = vdCopyHelper(pDiskFrom, pImageFrom, pDiskTo, cbSize,
8433 cImagesFromReadBack, cImagesToReadBack,
8434 fSuppressRedundantIo, pIfProgress, pDstIfProgress);
8435
8436 if (RT_SUCCESS(rc))
8437 {
8438 rc2 = vdThreadStartWrite(pDiskTo);
8439 AssertRC(rc2);
8440 fLockWriteTo = true;
8441
8442 /* Only set modification UUID if it is non-null, since the source
8443 * backend might not provide a valid modification UUID. */
8444 if (!RTUuidIsNull(&ImageModificationUuid))
8445 pImageTo->Backend->pfnSetModificationUuid(pImageTo->pBackendData, &ImageModificationUuid);
8446
8447 /* Set the requested open flags if they differ from the value
8448 * required for creating the image and copying the contents. */
8449 if ( pImageTo && pszFilename
8450 && uOpenFlags != (uOpenFlags & ~VD_OPEN_FLAGS_READONLY))
8451 rc = pImageTo->Backend->pfnSetOpenFlags(pImageTo->pBackendData,
8452 uOpenFlags);
8453 }
8454 } while (0);
8455
8456 if (RT_FAILURE(rc) && pImageTo && pszFilename)
8457 {
8458 /* Take the write lock only if it is not taken. Not worth making the
8459 * above code even more complicated. */
8460 if (RT_UNLIKELY(!fLockWriteTo))
8461 {
8462 rc2 = vdThreadStartWrite(pDiskTo);
8463 AssertRC(rc2);
8464 fLockWriteTo = true;
8465 }
8466 /* Error detected, but new image created. Remove image from list. */
8467 vdRemoveImageFromList(pDiskTo, pImageTo);
8468
8469 /* Close and delete image. */
8470 rc2 = pImageTo->Backend->pfnClose(pImageTo->pBackendData, true);
8471 AssertRC(rc2);
8472 pImageTo->pBackendData = NULL;
8473
8474 /* Free remaining resources. */
8475 if (pImageTo->pszFilename)
8476 RTStrFree(pImageTo->pszFilename);
8477
8478 RTMemFree(pImageTo);
8479 }
8480
8481 if (RT_UNLIKELY(fLockWriteTo))
8482 {
8483 rc2 = vdThreadFinishWrite(pDiskTo);
8484 AssertRC(rc2);
8485 }
8486 if (RT_UNLIKELY(fLockWriteFrom))
8487 {
8488 rc2 = vdThreadFinishWrite(pDiskFrom);
8489 AssertRC(rc2);
8490 }
8491 else if (RT_UNLIKELY(fLockReadFrom))
8492 {
8493 rc2 = vdThreadFinishRead(pDiskFrom);
8494 AssertRC(rc2);
8495 }
8496
8497 if (RT_SUCCESS(rc))
8498 {
8499 if (pIfProgress && pIfProgress->pfnProgress)
8500 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8501 if (pDstIfProgress && pDstIfProgress->pfnProgress)
8502 pDstIfProgress->pfnProgress(pDstIfProgress->Core.pvUser, 100);
8503 }
8504
8505 LogFlowFunc(("returns %Rrc\n", rc));
8506 return rc;
8507}
8508
8509/**
8510 * Copies an image from one HDD container to another.
8511 * The copy is opened in the target HDD container.
8512 * It is possible to convert between different image formats, because the
8513 * backend for the destination may be different from the source.
8514 * If both the source and destination reference the same HDD container,
8515 * then the image is moved (by copying/deleting or renaming) to the new location.
8516 * The source container is unchanged if the move operation fails, otherwise
8517 * the image at the new location is opened in the same way as the old one was.
8518 *
8519 * @returns VBox status code.
8520 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8521 * @param pDiskFrom Pointer to source HDD container.
8522 * @param nImage Image number, counts from 0. 0 is always base image of container.
8523 * @param pDiskTo Pointer to destination HDD container.
8524 * @param pszBackend Name of the image file backend to use.
8525 * @param pszFilename New name of the image (may be NULL if pDiskFrom == pDiskTo).
8526 * @param fMoveByRename If true, attempt to perform a move by renaming (if successful the new size is ignored).
8527 * @param cbSize New image size (0 means leave unchanged).
8528 * @param uImageFlags Flags specifying special destination image features.
8529 * @param pDstUuid New UUID of the destination image. If NULL, a new UUID is created.
8530 * This parameter is used if and only if a true copy is created.
8531 * In all rename/move cases the UUIDs are copied over.
8532 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
8533 * Only used if the destination image is created.
8534 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8535 * @param pDstVDIfsImage Pointer to the per-image VD interface list, for the
8536 * destination image.
8537 * @param pDstVDIfsOperation Pointer to the per-image VD interface list,
8538 * for the destination image.
8539 */
8540VBOXDDU_DECL(int) VDCopy(PVBOXHDD pDiskFrom, unsigned nImage, PVBOXHDD pDiskTo,
8541 const char *pszBackend, const char *pszFilename,
8542 bool fMoveByRename, uint64_t cbSize,
8543 unsigned uImageFlags, PCRTUUID pDstUuid,
8544 unsigned uOpenFlags, PVDINTERFACE pVDIfsOperation,
8545 PVDINTERFACE pDstVDIfsImage,
8546 PVDINTERFACE pDstVDIfsOperation)
8547{
8548 return VDCopyEx(pDiskFrom, nImage, pDiskTo, pszBackend, pszFilename, fMoveByRename,
8549 cbSize, VD_IMAGE_CONTENT_UNKNOWN, VD_IMAGE_CONTENT_UNKNOWN,
8550 uImageFlags, pDstUuid, uOpenFlags, pVDIfsOperation,
8551 pDstVDIfsImage, pDstVDIfsOperation);
8552}
8553
8554/**
8555 * Optimizes the storage consumption of an image. Typically the unused blocks
8556 * have to be wiped with zeroes to achieve a substantial reduced storage use.
8557 * Another optimization done is reordering the image blocks, which can provide
8558 * a significant performance boost, as reads and writes tend to use less random
8559 * file offsets.
8560 *
8561 * @return VBox status code.
8562 * @return VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
8563 * @return VERR_VD_IMAGE_READ_ONLY if image is not writable.
8564 * @return VERR_NOT_SUPPORTED if this kind of image can be compacted, but
8565 * the code for this isn't implemented yet.
8566 * @param pDisk Pointer to HDD container.
8567 * @param nImage Image number, counts from 0. 0 is always base image of container.
8568 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8569 */
8570VBOXDDU_DECL(int) VDCompact(PVBOXHDD pDisk, unsigned nImage,
8571 PVDINTERFACE pVDIfsOperation)
8572{
8573 int rc = VINF_SUCCESS;
8574 int rc2;
8575 bool fLockRead = false, fLockWrite = false;
8576 void *pvBuf = NULL;
8577 void *pvTmp = NULL;
8578
8579 LogFlowFunc(("pDisk=%#p nImage=%u pVDIfsOperation=%#p\n",
8580 pDisk, nImage, pVDIfsOperation));
8581
8582 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8583
8584 do {
8585 /* Check arguments. */
8586 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8587 rc = VERR_INVALID_PARAMETER);
8588 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8589 ("u32Signature=%08x\n", pDisk->u32Signature));
8590
8591 rc2 = vdThreadStartRead(pDisk);
8592 AssertRC(rc2);
8593 fLockRead = true;
8594
8595 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
8596 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
8597
8598 /* If there is no compact callback for not file based backends then
8599 * the backend doesn't need compaction. No need to make much fuss about
8600 * this. For file based ones signal this as not yet supported. */
8601 if (!pImage->Backend->pfnCompact)
8602 {
8603 if (pImage->Backend->uBackendCaps & VD_CAP_FILE)
8604 rc = VERR_NOT_SUPPORTED;
8605 else
8606 rc = VINF_SUCCESS;
8607 break;
8608 }
8609
8610 /* Insert interface for reading parent state into per-operation list,
8611 * if there is a parent image. */
8612 VDINTERFACEPARENTSTATE VDIfParent;
8613 VDPARENTSTATEDESC ParentUser;
8614 if (pImage->pPrev)
8615 {
8616 VDIfParent.pfnParentRead = vdParentRead;
8617 ParentUser.pDisk = pDisk;
8618 ParentUser.pImage = pImage->pPrev;
8619 rc = VDInterfaceAdd(&VDIfParent.Core, "VDCompact_ParentState", VDINTERFACETYPE_PARENTSTATE,
8620 &ParentUser, sizeof(VDINTERFACEPARENTSTATE), &pVDIfsOperation);
8621 AssertRC(rc);
8622 }
8623
8624 rc2 = vdThreadFinishRead(pDisk);
8625 AssertRC(rc2);
8626 fLockRead = false;
8627
8628 rc2 = vdThreadStartWrite(pDisk);
8629 AssertRC(rc2);
8630 fLockWrite = true;
8631
8632 rc = pImage->Backend->pfnCompact(pImage->pBackendData,
8633 0, 99,
8634 pDisk->pVDIfsDisk,
8635 pImage->pVDIfsImage,
8636 pVDIfsOperation);
8637 } while (0);
8638
8639 if (RT_UNLIKELY(fLockWrite))
8640 {
8641 rc2 = vdThreadFinishWrite(pDisk);
8642 AssertRC(rc2);
8643 }
8644 else if (RT_UNLIKELY(fLockRead))
8645 {
8646 rc2 = vdThreadFinishRead(pDisk);
8647 AssertRC(rc2);
8648 }
8649
8650 if (pvBuf)
8651 RTMemTmpFree(pvBuf);
8652 if (pvTmp)
8653 RTMemTmpFree(pvTmp);
8654
8655 if (RT_SUCCESS(rc))
8656 {
8657 if (pIfProgress && pIfProgress->pfnProgress)
8658 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8659 }
8660
8661 LogFlowFunc(("returns %Rrc\n", rc));
8662 return rc;
8663}
8664
8665/**
8666 * Resizes the given disk image to the given size.
8667 *
8668 * @return VBox status
8669 * @return VERR_VD_IMAGE_READ_ONLY if image is not writable.
8670 * @return VERR_NOT_SUPPORTED if this kind of image can be compacted, but
8671 *
8672 * @param pDisk Pointer to the HDD container.
8673 * @param cbSize New size of the image.
8674 * @param pPCHSGeometry Pointer to the new physical disk geometry <= (16383,16,63). Not NULL.
8675 * @param pLCHSGeometry Pointer to the new logical disk geometry <= (x,255,63). Not NULL.
8676 * @param pVDIfsOperation Pointer to the per-operation VD interface list.
8677 */
8678VBOXDDU_DECL(int) VDResize(PVBOXHDD pDisk, uint64_t cbSize,
8679 PCVDGEOMETRY pPCHSGeometry,
8680 PCVDGEOMETRY pLCHSGeometry,
8681 PVDINTERFACE pVDIfsOperation)
8682{
8683 /** @todo r=klaus resizing was designed to be part of VDCopy, so having a separate function is not desirable. */
8684 int rc = VINF_SUCCESS;
8685 int rc2;
8686 bool fLockRead = false, fLockWrite = false;
8687
8688 LogFlowFunc(("pDisk=%#p cbSize=%llu pVDIfsOperation=%#p\n",
8689 pDisk, cbSize, pVDIfsOperation));
8690
8691 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8692
8693 do {
8694 /* Check arguments. */
8695 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8696 rc = VERR_INVALID_PARAMETER);
8697 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8698 ("u32Signature=%08x\n", pDisk->u32Signature));
8699
8700 rc2 = vdThreadStartRead(pDisk);
8701 AssertRC(rc2);
8702 fLockRead = true;
8703
8704 /* Must have at least one image in the chain, will resize last. */
8705 AssertMsgBreakStmt(pDisk->cImages >= 1, ("cImages=%u\n", pDisk->cImages),
8706 rc = VERR_NOT_SUPPORTED);
8707
8708 PVDIMAGE pImage = pDisk->pLast;
8709
8710 /* If there is no compact callback for not file based backends then
8711 * the backend doesn't need compaction. No need to make much fuss about
8712 * this. For file based ones signal this as not yet supported. */
8713 if (!pImage->Backend->pfnResize)
8714 {
8715 if (pImage->Backend->uBackendCaps & VD_CAP_FILE)
8716 rc = VERR_NOT_SUPPORTED;
8717 else
8718 rc = VINF_SUCCESS;
8719 break;
8720 }
8721
8722 rc2 = vdThreadFinishRead(pDisk);
8723 AssertRC(rc2);
8724 fLockRead = false;
8725
8726 rc2 = vdThreadStartWrite(pDisk);
8727 AssertRC(rc2);
8728 fLockWrite = true;
8729
8730 VDGEOMETRY PCHSGeometryOld;
8731 VDGEOMETRY LCHSGeometryOld;
8732 PCVDGEOMETRY pPCHSGeometryNew;
8733 PCVDGEOMETRY pLCHSGeometryNew;
8734
8735 if (pPCHSGeometry->cCylinders == 0)
8736 {
8737 /* Auto-detect marker, calculate new value ourself. */
8738 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData, &PCHSGeometryOld);
8739 if (RT_SUCCESS(rc) && (PCHSGeometryOld.cCylinders != 0))
8740 PCHSGeometryOld.cCylinders = RT_MIN(cbSize / 512 / PCHSGeometryOld.cHeads / PCHSGeometryOld.cSectors, 16383);
8741 else if (rc == VERR_VD_GEOMETRY_NOT_SET)
8742 rc = VINF_SUCCESS;
8743
8744 pPCHSGeometryNew = &PCHSGeometryOld;
8745 }
8746 else
8747 pPCHSGeometryNew = pPCHSGeometry;
8748
8749 if (pLCHSGeometry->cCylinders == 0)
8750 {
8751 /* Auto-detect marker, calculate new value ourself. */
8752 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData, &LCHSGeometryOld);
8753 if (RT_SUCCESS(rc) && (LCHSGeometryOld.cCylinders != 0))
8754 LCHSGeometryOld.cCylinders = cbSize / 512 / LCHSGeometryOld.cHeads / LCHSGeometryOld.cSectors;
8755 else if (rc == VERR_VD_GEOMETRY_NOT_SET)
8756 rc = VINF_SUCCESS;
8757
8758 pLCHSGeometryNew = &LCHSGeometryOld;
8759 }
8760 else
8761 pLCHSGeometryNew = pLCHSGeometry;
8762
8763 if (RT_SUCCESS(rc))
8764 rc = pImage->Backend->pfnResize(pImage->pBackendData,
8765 cbSize,
8766 pPCHSGeometryNew,
8767 pLCHSGeometryNew,
8768 0, 99,
8769 pDisk->pVDIfsDisk,
8770 pImage->pVDIfsImage,
8771 pVDIfsOperation);
8772 } while (0);
8773
8774 if (RT_UNLIKELY(fLockWrite))
8775 {
8776 rc2 = vdThreadFinishWrite(pDisk);
8777 AssertRC(rc2);
8778 }
8779 else if (RT_UNLIKELY(fLockRead))
8780 {
8781 rc2 = vdThreadFinishRead(pDisk);
8782 AssertRC(rc2);
8783 }
8784
8785 if (RT_SUCCESS(rc))
8786 {
8787 if (pIfProgress && pIfProgress->pfnProgress)
8788 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8789
8790 pDisk->cbSize = cbSize;
8791 }
8792
8793 LogFlowFunc(("returns %Rrc\n", rc));
8794 return rc;
8795}
8796
8797VBOXDDU_DECL(int) VDPrepareWithFilters(PVBOXHDD pDisk, PVDINTERFACE pVDIfsOperation)
8798{
8799 int rc = VINF_SUCCESS;
8800 int rc2;
8801 bool fLockRead = false, fLockWrite = false;
8802
8803 LogFlowFunc(("pDisk=%#p pVDIfsOperation=%#p\n", pDisk, pVDIfsOperation));
8804
8805 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
8806
8807 do {
8808 /* Check arguments. */
8809 AssertMsgBreakStmt(VALID_PTR(pDisk), ("pDisk=%#p\n", pDisk),
8810 rc = VERR_INVALID_PARAMETER);
8811 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE,
8812 ("u32Signature=%08x\n", pDisk->u32Signature));
8813
8814 rc2 = vdThreadStartRead(pDisk);
8815 AssertRC(rc2);
8816 fLockRead = true;
8817
8818 /* Must have at least one image in the chain. */
8819 AssertMsgBreakStmt(pDisk->cImages >= 1, ("cImages=%u\n", pDisk->cImages),
8820 rc = VERR_VD_NOT_OPENED);
8821
8822 unsigned uOpenFlags = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
8823 AssertMsgBreakStmt(!(uOpenFlags & VD_OPEN_FLAGS_READONLY),
8824 ("Last image should be read write"),
8825 rc = VERR_VD_IMAGE_READ_ONLY);
8826
8827 rc2 = vdThreadFinishRead(pDisk);
8828 AssertRC(rc2);
8829 fLockRead = false;
8830
8831 rc2 = vdThreadStartWrite(pDisk);
8832 AssertRC(rc2);
8833 fLockWrite = true;
8834
8835 /*
8836 * Open all images in the chain in read write mode first to avoid running
8837 * into an error in the middle of the process.
8838 */
8839 PVDIMAGE pImage = pDisk->pBase;
8840
8841 while (pImage)
8842 {
8843 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
8844 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
8845 {
8846 /*
8847 * Clear skip consistency checks because the image is made writable now and
8848 * skipping consistency checks is only possible for readonly images.
8849 */
8850 uOpenFlags &= ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS);
8851 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
8852 if (RT_FAILURE(rc))
8853 break;
8854 }
8855 pImage = pImage->pNext;
8856 }
8857
8858 if (RT_SUCCESS(rc))
8859 {
8860 unsigned cImgCur = 0;
8861 unsigned uPercentStart = 0;
8862 unsigned uPercentSpan = 100 / pDisk->cImages - 1;
8863
8864 /* Allocate tmp buffer. */
8865 void *pvBuf = RTMemTmpAlloc(VD_MERGE_BUFFER_SIZE);
8866 if (!pvBuf)
8867 {
8868 rc = VERR_NO_MEMORY;
8869 break;
8870 }
8871
8872 pImage = pDisk->pBase;
8873 pDisk->fLocked = true;
8874
8875 while ( pImage
8876 && RT_SUCCESS(rc))
8877 {
8878 /* Get size of image. */
8879 uint64_t cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
8880 uint64_t cbSizeFile = pImage->Backend->pfnGetFileSize(pImage->pBackendData);
8881 uint64_t cbFileWritten = 0;
8882 uint64_t uOffset = 0;
8883 uint64_t cbRemaining = cbSize;
8884
8885 do
8886 {
8887 size_t cbThisRead = RT_MIN(VD_MERGE_BUFFER_SIZE, cbRemaining);
8888 RTSGSEG SegmentBuf;
8889 RTSGBUF SgBuf;
8890 VDIOCTX IoCtx;
8891
8892 SegmentBuf.pvSeg = pvBuf;
8893 SegmentBuf.cbSeg = VD_MERGE_BUFFER_SIZE;
8894 RTSgBufInit(&SgBuf, &SegmentBuf, 1);
8895 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_READ, 0, 0, NULL,
8896 &SgBuf, NULL, NULL, VDIOCTX_FLAGS_SYNC);
8897
8898 rc = pImage->Backend->pfnRead(pImage->pBackendData, uOffset,
8899 cbThisRead, &IoCtx, &cbThisRead);
8900 if (rc != VERR_VD_BLOCK_FREE)
8901 {
8902 if (RT_FAILURE(rc))
8903 break;
8904
8905 /* Apply filter chains. */
8906 rc = vdFilterChainApplyRead(pDisk, uOffset, cbThisRead, &IoCtx);
8907 if (RT_FAILURE(rc))
8908 break;
8909
8910 rc = vdFilterChainApplyWrite(pDisk, uOffset, cbThisRead, &IoCtx);
8911 if (RT_FAILURE(rc))
8912 break;
8913
8914 RTSgBufReset(&SgBuf);
8915 size_t cbThisWrite = 0;
8916 size_t cbPreRead = 0;
8917 size_t cbPostRead = 0;
8918 rc = pImage->Backend->pfnWrite(pImage->pBackendData, uOffset,
8919 cbThisRead, &IoCtx, &cbThisWrite,
8920 &cbPreRead, &cbPostRead, 0);
8921 if (RT_FAILURE(rc))
8922 break;
8923 Assert(cbThisWrite == cbThisRead);
8924 cbFileWritten += cbThisWrite;
8925 }
8926 else
8927 rc = VINF_SUCCESS;
8928
8929 uOffset += cbThisRead;
8930 cbRemaining -= cbThisRead;
8931
8932 if (pIfProgress && pIfProgress->pfnProgress)
8933 {
8934 rc2 = pIfProgress->pfnProgress(pIfProgress->Core.pvUser,
8935 uPercentStart + cbFileWritten * uPercentSpan / cbSizeFile);
8936 AssertRC(rc2); /* Cancelling this operation without leaving an inconsistent state is not possible. */
8937 }
8938 } while (uOffset < cbSize);
8939
8940 pImage = pImage->pNext;
8941 cImgCur++;
8942 uPercentStart += uPercentSpan;
8943 }
8944
8945 pDisk->fLocked = false;
8946 if (pvBuf)
8947 RTMemTmpFree(pvBuf);
8948 }
8949
8950 /* Change images except last one back to readonly. */
8951 pImage = pDisk->pBase;
8952 while ( pImage != pDisk->pLast
8953 && pImage)
8954 {
8955 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
8956 uOpenFlags |= VD_OPEN_FLAGS_READONLY;
8957 rc2 = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
8958 if (RT_FAILURE(rc2))
8959 {
8960 if (RT_SUCCESS(rc))
8961 rc = rc2;
8962 break;
8963 }
8964 pImage = pImage->pNext;
8965 }
8966 } while (0);
8967
8968 if (RT_UNLIKELY(fLockWrite))
8969 {
8970 rc2 = vdThreadFinishWrite(pDisk);
8971 AssertRC(rc2);
8972 }
8973 else if (RT_UNLIKELY(fLockRead))
8974 {
8975 rc2 = vdThreadFinishRead(pDisk);
8976 AssertRC(rc2);
8977 }
8978
8979 if ( RT_SUCCESS(rc)
8980 && pIfProgress
8981 && pIfProgress->pfnProgress)
8982 pIfProgress->pfnProgress(pIfProgress->Core.pvUser, 100);
8983
8984 LogFlowFunc(("returns %Rrc\n", rc));
8985 return rc;
8986}
8987
8988/**
8989 * Closes the last opened image file in HDD container.
8990 * If previous image file was opened in read-only mode (the normal case) and
8991 * the last opened image is in read-write mode then the previous image will be
8992 * reopened in read/write mode.
8993 *
8994 * @returns VBox status code.
8995 * @returns VERR_VD_NOT_OPENED if no image is opened in HDD container.
8996 * @param pDisk Pointer to HDD container.
8997 * @param fDelete If true, delete the image from the host disk.
8998 */
8999VBOXDDU_DECL(int) VDClose(PVBOXHDD pDisk, bool fDelete)
9000{
9001 int rc = VINF_SUCCESS;
9002 int rc2;
9003 bool fLockWrite = false;
9004
9005 LogFlowFunc(("pDisk=%#p fDelete=%d\n", pDisk, fDelete));
9006 do
9007 {
9008 /* sanity check */
9009 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9010 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9011
9012 /* Not worth splitting this up into a read lock phase and write
9013 * lock phase, as closing an image is a relatively fast operation
9014 * dominated by the part which needs the write lock. */
9015 rc2 = vdThreadStartWrite(pDisk);
9016 AssertRC(rc2);
9017 fLockWrite = true;
9018
9019 PVDIMAGE pImage = pDisk->pLast;
9020 if (!pImage)
9021 {
9022 rc = VERR_VD_NOT_OPENED;
9023 break;
9024 }
9025
9026 /* Destroy the current discard state first which might still have pending blocks. */
9027 rc = vdDiscardStateDestroy(pDisk);
9028 if (RT_FAILURE(rc))
9029 break;
9030
9031 unsigned uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
9032 /* Remove image from list of opened images. */
9033 vdRemoveImageFromList(pDisk, pImage);
9034 /* Close (and optionally delete) image. */
9035 rc = pImage->Backend->pfnClose(pImage->pBackendData, fDelete);
9036 /* Free remaining resources related to the image. */
9037 RTStrFree(pImage->pszFilename);
9038 RTMemFree(pImage);
9039
9040 pImage = pDisk->pLast;
9041 if (!pImage)
9042 break;
9043
9044 /* If disk was previously in read/write mode, make sure it will stay
9045 * like this (if possible) after closing this image. Set the open flags
9046 * accordingly. */
9047 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
9048 {
9049 uOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
9050 uOpenFlags &= ~ VD_OPEN_FLAGS_READONLY;
9051 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData, uOpenFlags);
9052 }
9053
9054 /* Cache disk information. */
9055 pDisk->cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
9056
9057 /* Cache PCHS geometry. */
9058 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9059 &pDisk->PCHSGeometry);
9060 if (RT_FAILURE(rc2))
9061 {
9062 pDisk->PCHSGeometry.cCylinders = 0;
9063 pDisk->PCHSGeometry.cHeads = 0;
9064 pDisk->PCHSGeometry.cSectors = 0;
9065 }
9066 else
9067 {
9068 /* Make sure the PCHS geometry is properly clipped. */
9069 pDisk->PCHSGeometry.cCylinders = RT_MIN(pDisk->PCHSGeometry.cCylinders, 16383);
9070 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 16);
9071 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
9072 }
9073
9074 /* Cache LCHS geometry. */
9075 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
9076 &pDisk->LCHSGeometry);
9077 if (RT_FAILURE(rc2))
9078 {
9079 pDisk->LCHSGeometry.cCylinders = 0;
9080 pDisk->LCHSGeometry.cHeads = 0;
9081 pDisk->LCHSGeometry.cSectors = 0;
9082 }
9083 else
9084 {
9085 /* Make sure the LCHS geometry is properly clipped. */
9086 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
9087 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
9088 }
9089 } while (0);
9090
9091 if (RT_UNLIKELY(fLockWrite))
9092 {
9093 rc2 = vdThreadFinishWrite(pDisk);
9094 AssertRC(rc2);
9095 }
9096
9097 LogFlowFunc(("returns %Rrc\n", rc));
9098 return rc;
9099}
9100
9101/**
9102 * Closes the currently opened cache image file in HDD container.
9103 *
9104 * @return VBox status code.
9105 * @return VERR_VD_NOT_OPENED if no cache is opened in HDD container.
9106 * @param pDisk Pointer to HDD container.
9107 * @param fDelete If true, delete the image from the host disk.
9108 */
9109VBOXDDU_DECL(int) VDCacheClose(PVBOXHDD pDisk, bool fDelete)
9110{
9111 int rc = VINF_SUCCESS;
9112 int rc2;
9113 bool fLockWrite = false;
9114 PVDCACHE pCache = NULL;
9115
9116 LogFlowFunc(("pDisk=%#p fDelete=%d\n", pDisk, fDelete));
9117
9118 do
9119 {
9120 /* sanity check */
9121 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9122 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9123
9124 rc2 = vdThreadStartWrite(pDisk);
9125 AssertRC(rc2);
9126 fLockWrite = true;
9127
9128 AssertPtrBreakStmt(pDisk->pCache, rc = VERR_VD_CACHE_NOT_FOUND);
9129
9130 pCache = pDisk->pCache;
9131 pDisk->pCache = NULL;
9132
9133 pCache->Backend->pfnClose(pCache->pBackendData, fDelete);
9134 if (pCache->pszFilename)
9135 RTStrFree(pCache->pszFilename);
9136 RTMemFree(pCache);
9137 } while (0);
9138
9139 if (RT_LIKELY(fLockWrite))
9140 {
9141 rc2 = vdThreadFinishWrite(pDisk);
9142 AssertRC(rc2);
9143 }
9144
9145 LogFlowFunc(("returns %Rrc\n", rc));
9146 return rc;
9147}
9148
9149VBOXDDU_DECL(int) VDFilterRemove(PVBOXHDD pDisk, uint32_t fFlags)
9150{
9151 int rc = VINF_SUCCESS;
9152 int rc2;
9153 bool fLockWrite = false;
9154 PVDFILTER pFilter = NULL;
9155
9156 LogFlowFunc(("pDisk=%#p\n", pDisk));
9157
9158 do
9159 {
9160 /* sanity check */
9161 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9162 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9163
9164 AssertMsgBreakStmt(!(fFlags & ~VD_FILTER_FLAGS_MASK),
9165 ("Invalid flags set (fFlags=%#x)\n", fFlags),
9166 rc = VERR_INVALID_PARAMETER);
9167
9168 rc2 = vdThreadStartWrite(pDisk);
9169 AssertRC(rc2);
9170 fLockWrite = true;
9171
9172 if (fFlags & VD_FILTER_FLAGS_WRITE)
9173 {
9174 AssertBreakStmt(!RTListIsEmpty(&pDisk->ListFilterChainWrite), rc = VERR_VD_NOT_OPENED);
9175 pFilter = RTListGetLast(&pDisk->ListFilterChainWrite, VDFILTER, ListNodeChainWrite);
9176 AssertPtr(pFilter);
9177 RTListNodeRemove(&pFilter->ListNodeChainWrite);
9178 vdFilterRelease(pFilter);
9179 }
9180
9181 if (fFlags & VD_FILTER_FLAGS_READ)
9182 {
9183 AssertBreakStmt(!RTListIsEmpty(&pDisk->ListFilterChainRead), rc = VERR_VD_NOT_OPENED);
9184 pFilter = RTListGetLast(&pDisk->ListFilterChainRead, VDFILTER, ListNodeChainRead);
9185 AssertPtr(pFilter);
9186 RTListNodeRemove(&pFilter->ListNodeChainRead);
9187 vdFilterRelease(pFilter);
9188 }
9189 } while (0);
9190
9191 if (RT_LIKELY(fLockWrite))
9192 {
9193 rc2 = vdThreadFinishWrite(pDisk);
9194 AssertRC(rc2);
9195 }
9196
9197 LogFlowFunc(("returns %Rrc\n", rc));
9198 return rc;
9199}
9200
9201/**
9202 * Closes all opened image files in HDD container.
9203 *
9204 * @returns VBox status code.
9205 * @param pDisk Pointer to HDD container.
9206 */
9207VBOXDDU_DECL(int) VDCloseAll(PVBOXHDD pDisk)
9208{
9209 int rc = VINF_SUCCESS;
9210 int rc2;
9211 bool fLockWrite = false;
9212
9213 LogFlowFunc(("pDisk=%#p\n", pDisk));
9214 do
9215 {
9216 /* sanity check */
9217 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9218 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9219
9220 /* Lock the entire operation. */
9221 rc2 = vdThreadStartWrite(pDisk);
9222 AssertRC(rc2);
9223 fLockWrite = true;
9224
9225 PVDCACHE pCache = pDisk->pCache;
9226 if (pCache)
9227 {
9228 rc2 = pCache->Backend->pfnClose(pCache->pBackendData, false);
9229 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
9230 rc = rc2;
9231
9232 if (pCache->pszFilename)
9233 RTStrFree(pCache->pszFilename);
9234 RTMemFree(pCache);
9235 }
9236
9237 PVDIMAGE pImage = pDisk->pLast;
9238 while (VALID_PTR(pImage))
9239 {
9240 PVDIMAGE pPrev = pImage->pPrev;
9241 /* Remove image from list of opened images. */
9242 vdRemoveImageFromList(pDisk, pImage);
9243 /* Close image. */
9244 rc2 = pImage->Backend->pfnClose(pImage->pBackendData, false);
9245 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
9246 rc = rc2;
9247 /* Free remaining resources related to the image. */
9248 RTStrFree(pImage->pszFilename);
9249 RTMemFree(pImage);
9250 pImage = pPrev;
9251 }
9252 Assert(!VALID_PTR(pDisk->pLast));
9253 } while (0);
9254
9255 if (RT_UNLIKELY(fLockWrite))
9256 {
9257 rc2 = vdThreadFinishWrite(pDisk);
9258 AssertRC(rc2);
9259 }
9260
9261 LogFlowFunc(("returns %Rrc\n", rc));
9262 return rc;
9263}
9264
9265/**
9266 * Removes all filters of the given HDD container.
9267 *
9268 * @return VBox status code.
9269 * @param pDisk Pointer to HDD container.
9270 */
9271VBOXDDU_DECL(int) VDFilterRemoveAll(PVBOXHDD pDisk)
9272{
9273 int rc = VINF_SUCCESS;
9274 int rc2;
9275 bool fLockWrite = false;
9276
9277 LogFlowFunc(("pDisk=%#p\n", pDisk));
9278 do
9279 {
9280 /* sanity check */
9281 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9282 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9283
9284 /* Lock the entire operation. */
9285 rc2 = vdThreadStartWrite(pDisk);
9286 AssertRC(rc2);
9287 fLockWrite = true;
9288
9289 PVDFILTER pFilter, pFilterNext;
9290 RTListForEachSafe(&pDisk->ListFilterChainWrite, pFilter, pFilterNext, VDFILTER, ListNodeChainWrite)
9291 {
9292 RTListNodeRemove(&pFilter->ListNodeChainWrite);
9293 vdFilterRelease(pFilter);
9294 }
9295
9296 RTListForEachSafe(&pDisk->ListFilterChainRead, pFilter, pFilterNext, VDFILTER, ListNodeChainRead)
9297 {
9298 RTListNodeRemove(&pFilter->ListNodeChainRead);
9299 vdFilterRelease(pFilter);
9300 }
9301 Assert(RTListIsEmpty(&pDisk->ListFilterChainRead));
9302 Assert(RTListIsEmpty(&pDisk->ListFilterChainWrite));
9303 } while (0);
9304
9305 if (RT_UNLIKELY(fLockWrite))
9306 {
9307 rc2 = vdThreadFinishWrite(pDisk);
9308 AssertRC(rc2);
9309 }
9310
9311 LogFlowFunc(("returns %Rrc\n", rc));
9312 return rc;
9313}
9314
9315/**
9316 * Read data from virtual HDD.
9317 *
9318 * @returns VBox status code.
9319 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9320 * @param pDisk Pointer to HDD container.
9321 * @param uOffset Offset of first reading byte from start of disk.
9322 * @param pvBuf Pointer to buffer for reading data.
9323 * @param cbRead Number of bytes to read.
9324 */
9325VBOXDDU_DECL(int) VDRead(PVBOXHDD pDisk, uint64_t uOffset, void *pvBuf,
9326 size_t cbRead)
9327{
9328 int rc = VINF_SUCCESS;
9329 int rc2;
9330 bool fLockRead = false;
9331
9332 LogFlowFunc(("pDisk=%#p uOffset=%llu pvBuf=%p cbRead=%zu\n",
9333 pDisk, uOffset, pvBuf, cbRead));
9334 do
9335 {
9336 /* sanity check */
9337 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9338 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9339
9340 /* Check arguments. */
9341 AssertMsgBreakStmt(VALID_PTR(pvBuf),
9342 ("pvBuf=%#p\n", pvBuf),
9343 rc = VERR_INVALID_PARAMETER);
9344 AssertMsgBreakStmt(cbRead,
9345 ("cbRead=%zu\n", cbRead),
9346 rc = VERR_INVALID_PARAMETER);
9347
9348 rc2 = vdThreadStartRead(pDisk);
9349 AssertRC(rc2);
9350 fLockRead = true;
9351
9352 PVDIMAGE pImage = pDisk->pLast;
9353 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9354
9355 if (uOffset + cbRead > pDisk->cbSize)
9356 {
9357 /* Floppy images might be smaller than the standard expected by
9358 the floppy controller code. So, we won't fail here. */
9359 AssertMsgBreakStmt(pDisk->enmType == VDTYPE_FLOPPY,
9360 ("uOffset=%llu cbRead=%zu pDisk->cbSize=%llu\n",
9361 uOffset, cbRead, pDisk->cbSize),
9362 rc = VERR_EOF);
9363 memset(pvBuf, 0xf6, cbRead); /* f6h = format.com filler byte */
9364 if (uOffset >= pDisk->cbSize)
9365 break;
9366 cbRead = pDisk->cbSize - uOffset;
9367 }
9368
9369 rc = vdReadHelper(pDisk, pImage, uOffset, pvBuf, cbRead,
9370 true /* fUpdateCache */);
9371 } while (0);
9372
9373 if (RT_UNLIKELY(fLockRead))
9374 {
9375 rc2 = vdThreadFinishRead(pDisk);
9376 AssertRC(rc2);
9377 }
9378
9379 LogFlowFunc(("returns %Rrc\n", rc));
9380 return rc;
9381}
9382
9383/**
9384 * Write data to virtual HDD.
9385 *
9386 * @returns VBox status code.
9387 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9388 * @param pDisk Pointer to HDD container.
9389 * @param uOffset Offset of the first byte being
9390 * written from start of disk.
9391 * @param pvBuf Pointer to buffer for writing data.
9392 * @param cbWrite Number of bytes to write.
9393 */
9394VBOXDDU_DECL(int) VDWrite(PVBOXHDD pDisk, uint64_t uOffset, const void *pvBuf,
9395 size_t cbWrite)
9396{
9397 int rc = VINF_SUCCESS;
9398 int rc2;
9399 bool fLockWrite = false;
9400
9401 LogFlowFunc(("pDisk=%#p uOffset=%llu pvBuf=%p cbWrite=%zu\n",
9402 pDisk, uOffset, pvBuf, cbWrite));
9403 do
9404 {
9405 /* sanity check */
9406 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9407 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9408
9409 /* Check arguments. */
9410 AssertMsgBreakStmt(VALID_PTR(pvBuf),
9411 ("pvBuf=%#p\n", pvBuf),
9412 rc = VERR_INVALID_PARAMETER);
9413 AssertMsgBreakStmt(cbWrite,
9414 ("cbWrite=%zu\n", cbWrite),
9415 rc = VERR_INVALID_PARAMETER);
9416
9417 rc2 = vdThreadStartWrite(pDisk);
9418 AssertRC(rc2);
9419 fLockWrite = true;
9420
9421 AssertMsgBreakStmt(uOffset + cbWrite <= pDisk->cbSize,
9422 ("uOffset=%llu cbWrite=%zu pDisk->cbSize=%llu\n",
9423 uOffset, cbWrite, pDisk->cbSize),
9424 rc = VERR_INVALID_PARAMETER);
9425
9426 PVDIMAGE pImage = pDisk->pLast;
9427 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9428
9429 vdSetModifiedFlag(pDisk);
9430 rc = vdWriteHelper(pDisk, pImage, uOffset, pvBuf, cbWrite,
9431 VDIOCTX_FLAGS_READ_UPDATE_CACHE);
9432 if (RT_FAILURE(rc))
9433 break;
9434
9435 /* If there is a merge (in the direction towards a parent) running
9436 * concurrently then we have to also "relay" the write to this parent,
9437 * as the merge position might be already past the position where
9438 * this write is going. The "context" of the write can come from the
9439 * natural chain, since merging either already did or will take care
9440 * of the "other" content which is might be needed to fill the block
9441 * to a full allocation size. The cache doesn't need to be touched
9442 * as this write is covered by the previous one. */
9443 if (RT_UNLIKELY(pDisk->pImageRelay))
9444 rc = vdWriteHelper(pDisk, pDisk->pImageRelay, uOffset,
9445 pvBuf, cbWrite, VDIOCTX_FLAGS_DEFAULT);
9446 } while (0);
9447
9448 if (RT_UNLIKELY(fLockWrite))
9449 {
9450 rc2 = vdThreadFinishWrite(pDisk);
9451 AssertRC(rc2);
9452 }
9453
9454 LogFlowFunc(("returns %Rrc\n", rc));
9455 return rc;
9456}
9457
9458/**
9459 * Make sure the on disk representation of a virtual HDD is up to date.
9460 *
9461 * @returns VBox status code.
9462 * @retval VERR_VD_NOT_OPENED if no image is opened in HDD container.
9463 * @param pDisk Pointer to HDD container.
9464 */
9465VBOXDDU_DECL(int) VDFlush(PVBOXHDD pDisk)
9466{
9467 int rc = VINF_SUCCESS;
9468 int rc2;
9469 bool fLockWrite = false;
9470
9471 LogFlowFunc(("pDisk=%#p\n", pDisk));
9472 do
9473 {
9474 /* sanity check */
9475 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9476 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9477
9478 rc2 = vdThreadStartWrite(pDisk);
9479 AssertRC(rc2);
9480 fLockWrite = true;
9481
9482 PVDIMAGE pImage = pDisk->pLast;
9483 AssertPtrBreakStmt(pImage, rc = VERR_VD_NOT_OPENED);
9484
9485 VDIOCTX IoCtx;
9486 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
9487
9488 rc = RTSemEventCreate(&hEventComplete);
9489 if (RT_FAILURE(rc))
9490 break;
9491
9492 vdIoCtxInit(&IoCtx, pDisk, VDIOCTXTXDIR_FLUSH, 0, 0, pImage, NULL,
9493 NULL, vdFlushHelperAsync, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE);
9494
9495 IoCtx.Type.Root.pfnComplete = vdIoCtxSyncComplete;
9496 IoCtx.Type.Root.pvUser1 = pDisk;
9497 IoCtx.Type.Root.pvUser2 = hEventComplete;
9498 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
9499
9500 RTSemEventDestroy(hEventComplete);
9501 } while (0);
9502
9503 if (RT_UNLIKELY(fLockWrite))
9504 {
9505 rc2 = vdThreadFinishWrite(pDisk);
9506 AssertRC(rc2);
9507 }
9508
9509 LogFlowFunc(("returns %Rrc\n", rc));
9510 return rc;
9511}
9512
9513/**
9514 * Get number of opened images in HDD container.
9515 *
9516 * @returns Number of opened images for HDD container. 0 if no images have been opened.
9517 * @param pDisk Pointer to HDD container.
9518 */
9519VBOXDDU_DECL(unsigned) VDGetCount(PVBOXHDD pDisk)
9520{
9521 unsigned cImages;
9522 int rc2;
9523 bool fLockRead = false;
9524
9525 LogFlowFunc(("pDisk=%#p\n", pDisk));
9526 do
9527 {
9528 /* sanity check */
9529 AssertPtrBreakStmt(pDisk, cImages = 0);
9530 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9531
9532 rc2 = vdThreadStartRead(pDisk);
9533 AssertRC(rc2);
9534 fLockRead = true;
9535
9536 cImages = pDisk->cImages;
9537 } while (0);
9538
9539 if (RT_UNLIKELY(fLockRead))
9540 {
9541 rc2 = vdThreadFinishRead(pDisk);
9542 AssertRC(rc2);
9543 }
9544
9545 LogFlowFunc(("returns %u\n", cImages));
9546 return cImages;
9547}
9548
9549/**
9550 * Get read/write mode of HDD container.
9551 *
9552 * @returns Virtual disk ReadOnly status.
9553 * @returns true if no image is opened in HDD container.
9554 * @param pDisk Pointer to HDD container.
9555 */
9556VBOXDDU_DECL(bool) VDIsReadOnly(PVBOXHDD pDisk)
9557{
9558 bool fReadOnly;
9559 int rc2;
9560 bool fLockRead = false;
9561
9562 LogFlowFunc(("pDisk=%#p\n", pDisk));
9563 do
9564 {
9565 /* sanity check */
9566 AssertPtrBreakStmt(pDisk, fReadOnly = false);
9567 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9568
9569 rc2 = vdThreadStartRead(pDisk);
9570 AssertRC(rc2);
9571 fLockRead = true;
9572
9573 PVDIMAGE pImage = pDisk->pLast;
9574 AssertPtrBreakStmt(pImage, fReadOnly = true);
9575
9576 unsigned uOpenFlags;
9577 uOpenFlags = pDisk->pLast->Backend->pfnGetOpenFlags(pDisk->pLast->pBackendData);
9578 fReadOnly = !!(uOpenFlags & VD_OPEN_FLAGS_READONLY);
9579 } while (0);
9580
9581 if (RT_UNLIKELY(fLockRead))
9582 {
9583 rc2 = vdThreadFinishRead(pDisk);
9584 AssertRC(rc2);
9585 }
9586
9587 LogFlowFunc(("returns %d\n", fReadOnly));
9588 return fReadOnly;
9589}
9590
9591/**
9592 * Get sector size of an image in HDD container.
9593 *
9594 * @return Virtual disk sector size in bytes.
9595 * @return 0 if image with specified number was not opened.
9596 * @param pDisk Pointer to HDD container.
9597 * @param nImage Image number, counts from 0. 0 is always base image of container.
9598 */
9599VBOXDDU_DECL(uint32_t) VDGetSectorSize(PVBOXHDD pDisk, unsigned nImage)
9600{
9601 uint64_t cbSector;
9602 int rc2;
9603 bool fLockRead = false;
9604
9605 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9606 do
9607 {
9608 /* sanity check */
9609 AssertPtrBreakStmt(pDisk, cbSector = 0);
9610 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9611
9612 rc2 = vdThreadStartRead(pDisk);
9613 AssertRC(rc2);
9614 fLockRead = true;
9615
9616 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9617 AssertPtrBreakStmt(pImage, cbSector = 0);
9618 cbSector = pImage->Backend->pfnGetSectorSize(pImage->pBackendData);
9619 } while (0);
9620
9621 if (RT_UNLIKELY(fLockRead))
9622 {
9623 rc2 = vdThreadFinishRead(pDisk);
9624 AssertRC(rc2);
9625 }
9626
9627 LogFlowFunc(("returns %u\n", cbSector));
9628 return cbSector;
9629}
9630
9631/**
9632 * Get total capacity of an image in HDD container.
9633 *
9634 * @returns Virtual disk size in bytes.
9635 * @returns 0 if no image with specified number was not opened.
9636 * @param pDisk Pointer to HDD container.
9637 * @param nImage Image number, counts from 0. 0 is always base image of container.
9638 */
9639VBOXDDU_DECL(uint64_t) VDGetSize(PVBOXHDD pDisk, unsigned nImage)
9640{
9641 uint64_t cbSize;
9642 int rc2;
9643 bool fLockRead = false;
9644
9645 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9646 do
9647 {
9648 /* sanity check */
9649 AssertPtrBreakStmt(pDisk, cbSize = 0);
9650 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9651
9652 rc2 = vdThreadStartRead(pDisk);
9653 AssertRC(rc2);
9654 fLockRead = true;
9655
9656 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9657 AssertPtrBreakStmt(pImage, cbSize = 0);
9658 cbSize = pImage->Backend->pfnGetSize(pImage->pBackendData);
9659 } while (0);
9660
9661 if (RT_UNLIKELY(fLockRead))
9662 {
9663 rc2 = vdThreadFinishRead(pDisk);
9664 AssertRC(rc2);
9665 }
9666
9667 LogFlowFunc(("returns %llu\n", cbSize));
9668 return cbSize;
9669}
9670
9671/**
9672 * Get total file size of an image in HDD container.
9673 *
9674 * @returns Virtual disk size in bytes.
9675 * @returns 0 if no image is opened in HDD container.
9676 * @param pDisk Pointer to HDD container.
9677 * @param nImage Image number, counts from 0. 0 is always base image of container.
9678 */
9679VBOXDDU_DECL(uint64_t) VDGetFileSize(PVBOXHDD pDisk, unsigned nImage)
9680{
9681 uint64_t cbSize;
9682 int rc2;
9683 bool fLockRead = false;
9684
9685 LogFlowFunc(("pDisk=%#p nImage=%u\n", pDisk, nImage));
9686 do
9687 {
9688 /* sanity check */
9689 AssertPtrBreakStmt(pDisk, cbSize = 0);
9690 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9691
9692 rc2 = vdThreadStartRead(pDisk);
9693 AssertRC(rc2);
9694 fLockRead = true;
9695
9696 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9697 AssertPtrBreakStmt(pImage, cbSize = 0);
9698 cbSize = pImage->Backend->pfnGetFileSize(pImage->pBackendData);
9699 } while (0);
9700
9701 if (RT_UNLIKELY(fLockRead))
9702 {
9703 rc2 = vdThreadFinishRead(pDisk);
9704 AssertRC(rc2);
9705 }
9706
9707 LogFlowFunc(("returns %llu\n", cbSize));
9708 return cbSize;
9709}
9710
9711/**
9712 * Get virtual disk PCHS geometry stored in HDD container.
9713 *
9714 * @returns VBox status code.
9715 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9716 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9717 * @param pDisk Pointer to HDD container.
9718 * @param nImage Image number, counts from 0. 0 is always base image of container.
9719 * @param pPCHSGeometry Where to store PCHS geometry. Not NULL.
9720 */
9721VBOXDDU_DECL(int) VDGetPCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9722 PVDGEOMETRY pPCHSGeometry)
9723{
9724 int rc = VINF_SUCCESS;
9725 int rc2;
9726 bool fLockRead = false;
9727
9728 LogFlowFunc(("pDisk=%#p nImage=%u pPCHSGeometry=%#p\n",
9729 pDisk, nImage, pPCHSGeometry));
9730 do
9731 {
9732 /* sanity check */
9733 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9734 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9735
9736 /* Check arguments. */
9737 AssertMsgBreakStmt(VALID_PTR(pPCHSGeometry),
9738 ("pPCHSGeometry=%#p\n", pPCHSGeometry),
9739 rc = VERR_INVALID_PARAMETER);
9740
9741 rc2 = vdThreadStartRead(pDisk);
9742 AssertRC(rc2);
9743 fLockRead = true;
9744
9745 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9746 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9747
9748 if (pImage == pDisk->pLast)
9749 {
9750 /* Use cached information if possible. */
9751 if (pDisk->PCHSGeometry.cCylinders != 0)
9752 *pPCHSGeometry = pDisk->PCHSGeometry;
9753 else
9754 rc = VERR_VD_GEOMETRY_NOT_SET;
9755 }
9756 else
9757 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9758 pPCHSGeometry);
9759 } while (0);
9760
9761 if (RT_UNLIKELY(fLockRead))
9762 {
9763 rc2 = vdThreadFinishRead(pDisk);
9764 AssertRC(rc2);
9765 }
9766
9767 LogFlowFunc(("%Rrc (PCHS=%u/%u/%u)\n", rc,
9768 pDisk->PCHSGeometry.cCylinders, pDisk->PCHSGeometry.cHeads,
9769 pDisk->PCHSGeometry.cSectors));
9770 return rc;
9771}
9772
9773/**
9774 * Store virtual disk PCHS geometry in HDD container.
9775 *
9776 * Note that in case of unrecoverable error all images in HDD container will be closed.
9777 *
9778 * @returns VBox status code.
9779 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9780 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9781 * @param pDisk Pointer to HDD container.
9782 * @param nImage Image number, counts from 0. 0 is always base image of container.
9783 * @param pPCHSGeometry Where to load PCHS geometry from. Not NULL.
9784 */
9785VBOXDDU_DECL(int) VDSetPCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9786 PCVDGEOMETRY pPCHSGeometry)
9787{
9788 int rc = VINF_SUCCESS;
9789 int rc2;
9790 bool fLockWrite = false;
9791
9792 LogFlowFunc(("pDisk=%#p nImage=%u pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
9793 pDisk, nImage, pPCHSGeometry, pPCHSGeometry->cCylinders,
9794 pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
9795 do
9796 {
9797 /* sanity check */
9798 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9799 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9800
9801 /* Check arguments. */
9802 AssertMsgBreakStmt( VALID_PTR(pPCHSGeometry)
9803 && pPCHSGeometry->cHeads <= 16
9804 && pPCHSGeometry->cSectors <= 63,
9805 ("pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pPCHSGeometry,
9806 pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads,
9807 pPCHSGeometry->cSectors),
9808 rc = VERR_INVALID_PARAMETER);
9809
9810 rc2 = vdThreadStartWrite(pDisk);
9811 AssertRC(rc2);
9812 fLockWrite = true;
9813
9814 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9815 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9816
9817 if (pImage == pDisk->pLast)
9818 {
9819 if ( pPCHSGeometry->cCylinders != pDisk->PCHSGeometry.cCylinders
9820 || pPCHSGeometry->cHeads != pDisk->PCHSGeometry.cHeads
9821 || pPCHSGeometry->cSectors != pDisk->PCHSGeometry.cSectors)
9822 {
9823 /* Only update geometry if it is changed. Avoids similar checks
9824 * in every backend. Most of the time the new geometry is set
9825 * to the previous values, so no need to go through the hassle
9826 * of updating an image which could be opened in read-only mode
9827 * right now. */
9828 rc = pImage->Backend->pfnSetPCHSGeometry(pImage->pBackendData,
9829 pPCHSGeometry);
9830
9831 /* Cache new geometry values in any case. */
9832 rc2 = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9833 &pDisk->PCHSGeometry);
9834 if (RT_FAILURE(rc2))
9835 {
9836 pDisk->PCHSGeometry.cCylinders = 0;
9837 pDisk->PCHSGeometry.cHeads = 0;
9838 pDisk->PCHSGeometry.cSectors = 0;
9839 }
9840 else
9841 {
9842 /* Make sure the CHS geometry is properly clipped. */
9843 pDisk->PCHSGeometry.cHeads = RT_MIN(pDisk->PCHSGeometry.cHeads, 255);
9844 pDisk->PCHSGeometry.cSectors = RT_MIN(pDisk->PCHSGeometry.cSectors, 63);
9845 }
9846 }
9847 }
9848 else
9849 {
9850 VDGEOMETRY PCHS;
9851 rc = pImage->Backend->pfnGetPCHSGeometry(pImage->pBackendData,
9852 &PCHS);
9853 if ( RT_FAILURE(rc)
9854 || pPCHSGeometry->cCylinders != PCHS.cCylinders
9855 || pPCHSGeometry->cHeads != PCHS.cHeads
9856 || pPCHSGeometry->cSectors != PCHS.cSectors)
9857 {
9858 /* Only update geometry if it is changed. Avoids similar checks
9859 * in every backend. Most of the time the new geometry is set
9860 * to the previous values, so no need to go through the hassle
9861 * of updating an image which could be opened in read-only mode
9862 * right now. */
9863 rc = pImage->Backend->pfnSetPCHSGeometry(pImage->pBackendData,
9864 pPCHSGeometry);
9865 }
9866 }
9867 } while (0);
9868
9869 if (RT_UNLIKELY(fLockWrite))
9870 {
9871 rc2 = vdThreadFinishWrite(pDisk);
9872 AssertRC(rc2);
9873 }
9874
9875 LogFlowFunc(("returns %Rrc\n", rc));
9876 return rc;
9877}
9878
9879/**
9880 * Get virtual disk LCHS geometry stored in HDD container.
9881 *
9882 * @returns VBox status code.
9883 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9884 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9885 * @param pDisk Pointer to HDD container.
9886 * @param nImage Image number, counts from 0. 0 is always base image of container.
9887 * @param pLCHSGeometry Where to store LCHS geometry. Not NULL.
9888 */
9889VBOXDDU_DECL(int) VDGetLCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9890 PVDGEOMETRY pLCHSGeometry)
9891{
9892 int rc = VINF_SUCCESS;
9893 int rc2;
9894 bool fLockRead = false;
9895
9896 LogFlowFunc(("pDisk=%#p nImage=%u pLCHSGeometry=%#p\n",
9897 pDisk, nImage, pLCHSGeometry));
9898 do
9899 {
9900 /* sanity check */
9901 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9902 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9903
9904 /* Check arguments. */
9905 AssertMsgBreakStmt(VALID_PTR(pLCHSGeometry),
9906 ("pLCHSGeometry=%#p\n", pLCHSGeometry),
9907 rc = VERR_INVALID_PARAMETER);
9908
9909 rc2 = vdThreadStartRead(pDisk);
9910 AssertRC(rc2);
9911 fLockRead = true;
9912
9913 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9914 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9915
9916 if (pImage == pDisk->pLast)
9917 {
9918 /* Use cached information if possible. */
9919 if (pDisk->LCHSGeometry.cCylinders != 0)
9920 *pLCHSGeometry = pDisk->LCHSGeometry;
9921 else
9922 rc = VERR_VD_GEOMETRY_NOT_SET;
9923 }
9924 else
9925 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
9926 pLCHSGeometry);
9927 } while (0);
9928
9929 if (RT_UNLIKELY(fLockRead))
9930 {
9931 rc2 = vdThreadFinishRead(pDisk);
9932 AssertRC(rc2);
9933 }
9934
9935 LogFlowFunc((": %Rrc (LCHS=%u/%u/%u)\n", rc,
9936 pDisk->LCHSGeometry.cCylinders, pDisk->LCHSGeometry.cHeads,
9937 pDisk->LCHSGeometry.cSectors));
9938 return rc;
9939}
9940
9941/**
9942 * Store virtual disk LCHS geometry in HDD container.
9943 *
9944 * Note that in case of unrecoverable error all images in HDD container will be closed.
9945 *
9946 * @returns VBox status code.
9947 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
9948 * @returns VERR_VD_GEOMETRY_NOT_SET if no geometry present in the HDD container.
9949 * @param pDisk Pointer to HDD container.
9950 * @param nImage Image number, counts from 0. 0 is always base image of container.
9951 * @param pLCHSGeometry Where to load LCHS geometry from. Not NULL.
9952 */
9953VBOXDDU_DECL(int) VDSetLCHSGeometry(PVBOXHDD pDisk, unsigned nImage,
9954 PCVDGEOMETRY pLCHSGeometry)
9955{
9956 int rc = VINF_SUCCESS;
9957 int rc2;
9958 bool fLockWrite = false;
9959
9960 LogFlowFunc(("pDisk=%#p nImage=%u pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
9961 pDisk, nImage, pLCHSGeometry, pLCHSGeometry->cCylinders,
9962 pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
9963 do
9964 {
9965 /* sanity check */
9966 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
9967 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
9968
9969 /* Check arguments. */
9970 AssertMsgBreakStmt( VALID_PTR(pLCHSGeometry)
9971 && pLCHSGeometry->cHeads <= 255
9972 && pLCHSGeometry->cSectors <= 63,
9973 ("pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pLCHSGeometry,
9974 pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads,
9975 pLCHSGeometry->cSectors),
9976 rc = VERR_INVALID_PARAMETER);
9977
9978 rc2 = vdThreadStartWrite(pDisk);
9979 AssertRC(rc2);
9980 fLockWrite = true;
9981
9982 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
9983 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
9984
9985 if (pImage == pDisk->pLast)
9986 {
9987 if ( pLCHSGeometry->cCylinders != pDisk->LCHSGeometry.cCylinders
9988 || pLCHSGeometry->cHeads != pDisk->LCHSGeometry.cHeads
9989 || pLCHSGeometry->cSectors != pDisk->LCHSGeometry.cSectors)
9990 {
9991 /* Only update geometry if it is changed. Avoids similar checks
9992 * in every backend. Most of the time the new geometry is set
9993 * to the previous values, so no need to go through the hassle
9994 * of updating an image which could be opened in read-only mode
9995 * right now. */
9996 rc = pImage->Backend->pfnSetLCHSGeometry(pImage->pBackendData,
9997 pLCHSGeometry);
9998
9999 /* Cache new geometry values in any case. */
10000 rc2 = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
10001 &pDisk->LCHSGeometry);
10002 if (RT_FAILURE(rc2))
10003 {
10004 pDisk->LCHSGeometry.cCylinders = 0;
10005 pDisk->LCHSGeometry.cHeads = 0;
10006 pDisk->LCHSGeometry.cSectors = 0;
10007 }
10008 else
10009 {
10010 /* Make sure the CHS geometry is properly clipped. */
10011 pDisk->LCHSGeometry.cHeads = RT_MIN(pDisk->LCHSGeometry.cHeads, 255);
10012 pDisk->LCHSGeometry.cSectors = RT_MIN(pDisk->LCHSGeometry.cSectors, 63);
10013 }
10014 }
10015 }
10016 else
10017 {
10018 VDGEOMETRY LCHS;
10019 rc = pImage->Backend->pfnGetLCHSGeometry(pImage->pBackendData,
10020 &LCHS);
10021 if ( RT_FAILURE(rc)
10022 || pLCHSGeometry->cCylinders != LCHS.cCylinders
10023 || pLCHSGeometry->cHeads != LCHS.cHeads
10024 || pLCHSGeometry->cSectors != LCHS.cSectors)
10025 {
10026 /* Only update geometry if it is changed. Avoids similar checks
10027 * in every backend. Most of the time the new geometry is set
10028 * to the previous values, so no need to go through the hassle
10029 * of updating an image which could be opened in read-only mode
10030 * right now. */
10031 rc = pImage->Backend->pfnSetLCHSGeometry(pImage->pBackendData,
10032 pLCHSGeometry);
10033 }
10034 }
10035 } while (0);
10036
10037 if (RT_UNLIKELY(fLockWrite))
10038 {
10039 rc2 = vdThreadFinishWrite(pDisk);
10040 AssertRC(rc2);
10041 }
10042
10043 LogFlowFunc(("returns %Rrc\n", rc));
10044 return rc;
10045}
10046
10047/**
10048 * Get version of image in HDD container.
10049 *
10050 * @returns VBox status code.
10051 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10052 * @param pDisk Pointer to HDD container.
10053 * @param nImage Image number, counts from 0. 0 is always base image of container.
10054 * @param puVersion Where to store the image version.
10055 */
10056VBOXDDU_DECL(int) VDGetVersion(PVBOXHDD pDisk, unsigned nImage,
10057 unsigned *puVersion)
10058{
10059 int rc = VINF_SUCCESS;
10060 int rc2;
10061 bool fLockRead = false;
10062
10063 LogFlowFunc(("pDisk=%#p nImage=%u puVersion=%#p\n",
10064 pDisk, nImage, puVersion));
10065 do
10066 {
10067 /* sanity check */
10068 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10069 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10070
10071 /* Check arguments. */
10072 AssertMsgBreakStmt(VALID_PTR(puVersion),
10073 ("puVersion=%#p\n", puVersion),
10074 rc = VERR_INVALID_PARAMETER);
10075
10076 rc2 = vdThreadStartRead(pDisk);
10077 AssertRC(rc2);
10078 fLockRead = true;
10079
10080 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10081 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10082
10083 *puVersion = pImage->Backend->pfnGetVersion(pImage->pBackendData);
10084 } while (0);
10085
10086 if (RT_UNLIKELY(fLockRead))
10087 {
10088 rc2 = vdThreadFinishRead(pDisk);
10089 AssertRC(rc2);
10090 }
10091
10092 LogFlowFunc(("returns %Rrc uVersion=%#x\n", rc, *puVersion));
10093 return rc;
10094}
10095
10096/**
10097 * List the capabilities of image backend in HDD container.
10098 *
10099 * @returns VBox status code.
10100 * @retval VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10101 * @param pDisk Pointer to the HDD container.
10102 * @param nImage Image number, counts from 0. 0 is always base image of container.
10103 * @param pBackendInfo Where to store the backend information.
10104 */
10105VBOXDDU_DECL(int) VDBackendInfoSingle(PVBOXHDD pDisk, unsigned nImage,
10106 PVDBACKENDINFO pBackendInfo)
10107{
10108 int rc = VINF_SUCCESS;
10109 int rc2;
10110 bool fLockRead = false;
10111
10112 LogFlowFunc(("pDisk=%#p nImage=%u pBackendInfo=%#p\n",
10113 pDisk, nImage, pBackendInfo));
10114 do
10115 {
10116 /* sanity check */
10117 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10118 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10119
10120 /* Check arguments. */
10121 AssertMsgBreakStmt(VALID_PTR(pBackendInfo),
10122 ("pBackendInfo=%#p\n", pBackendInfo),
10123 rc = VERR_INVALID_PARAMETER);
10124
10125 rc2 = vdThreadStartRead(pDisk);
10126 AssertRC(rc2);
10127 fLockRead = true;
10128
10129 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10130 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10131
10132 pBackendInfo->pszBackend = pImage->Backend->pszBackendName;
10133 pBackendInfo->uBackendCaps = pImage->Backend->uBackendCaps;
10134 pBackendInfo->paFileExtensions = pImage->Backend->paFileExtensions;
10135 pBackendInfo->paConfigInfo = pImage->Backend->paConfigInfo;
10136 } while (0);
10137
10138 if (RT_UNLIKELY(fLockRead))
10139 {
10140 rc2 = vdThreadFinishRead(pDisk);
10141 AssertRC(rc2);
10142 }
10143
10144 LogFlowFunc(("returns %Rrc\n", rc));
10145 return rc;
10146}
10147
10148/**
10149 * Get flags of image in HDD container.
10150 *
10151 * @returns VBox status code.
10152 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10153 * @param pDisk Pointer to HDD container.
10154 * @param nImage Image number, counts from 0. 0 is always base image of container.
10155 * @param puImageFlags Where to store the image flags.
10156 */
10157VBOXDDU_DECL(int) VDGetImageFlags(PVBOXHDD pDisk, unsigned nImage,
10158 unsigned *puImageFlags)
10159{
10160 int rc = VINF_SUCCESS;
10161 int rc2;
10162 bool fLockRead = false;
10163
10164 LogFlowFunc(("pDisk=%#p nImage=%u puImageFlags=%#p\n",
10165 pDisk, nImage, puImageFlags));
10166 do
10167 {
10168 /* sanity check */
10169 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10170 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10171
10172 /* Check arguments. */
10173 AssertMsgBreakStmt(VALID_PTR(puImageFlags),
10174 ("puImageFlags=%#p\n", puImageFlags),
10175 rc = VERR_INVALID_PARAMETER);
10176
10177 rc2 = vdThreadStartRead(pDisk);
10178 AssertRC(rc2);
10179 fLockRead = true;
10180
10181 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10182 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10183
10184 *puImageFlags = pImage->uImageFlags;
10185 } while (0);
10186
10187 if (RT_UNLIKELY(fLockRead))
10188 {
10189 rc2 = vdThreadFinishRead(pDisk);
10190 AssertRC(rc2);
10191 }
10192
10193 LogFlowFunc(("returns %Rrc uImageFlags=%#x\n", rc, *puImageFlags));
10194 return rc;
10195}
10196
10197/**
10198 * Get open flags of image in HDD container.
10199 *
10200 * @returns VBox status code.
10201 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10202 * @param pDisk Pointer to HDD container.
10203 * @param nImage Image number, counts from 0. 0 is always base image of container.
10204 * @param puOpenFlags Where to store the image open flags.
10205 */
10206VBOXDDU_DECL(int) VDGetOpenFlags(PVBOXHDD pDisk, unsigned nImage,
10207 unsigned *puOpenFlags)
10208{
10209 int rc = VINF_SUCCESS;
10210 int rc2;
10211 bool fLockRead = false;
10212
10213 LogFlowFunc(("pDisk=%#p nImage=%u puOpenFlags=%#p\n",
10214 pDisk, nImage, puOpenFlags));
10215 do
10216 {
10217 /* sanity check */
10218 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10219 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10220
10221 /* Check arguments. */
10222 AssertMsgBreakStmt(VALID_PTR(puOpenFlags),
10223 ("puOpenFlags=%#p\n", puOpenFlags),
10224 rc = VERR_INVALID_PARAMETER);
10225
10226 rc2 = vdThreadStartRead(pDisk);
10227 AssertRC(rc2);
10228 fLockRead = true;
10229
10230 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10231 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10232
10233 *puOpenFlags = pImage->Backend->pfnGetOpenFlags(pImage->pBackendData);
10234 } while (0);
10235
10236 if (RT_UNLIKELY(fLockRead))
10237 {
10238 rc2 = vdThreadFinishRead(pDisk);
10239 AssertRC(rc2);
10240 }
10241
10242 LogFlowFunc(("returns %Rrc uOpenFlags=%#x\n", rc, *puOpenFlags));
10243 return rc;
10244}
10245
10246/**
10247 * Set open flags of image in HDD container.
10248 * This operation may cause file locking changes and/or files being reopened.
10249 * Note that in case of unrecoverable error all images in HDD container will be closed.
10250 *
10251 * @returns VBox status code.
10252 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10253 * @param pDisk Pointer to HDD container.
10254 * @param nImage Image number, counts from 0. 0 is always base image of container.
10255 * @param uOpenFlags Image file open mode, see VD_OPEN_FLAGS_* constants.
10256 */
10257VBOXDDU_DECL(int) VDSetOpenFlags(PVBOXHDD pDisk, unsigned nImage,
10258 unsigned uOpenFlags)
10259{
10260 int rc;
10261 int rc2;
10262 bool fLockWrite = false;
10263
10264 LogFlowFunc(("pDisk=%#p uOpenFlags=%#u\n", pDisk, uOpenFlags));
10265 do
10266 {
10267 /* sanity check */
10268 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10269 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10270
10271 /* Check arguments. */
10272 AssertMsgBreakStmt((uOpenFlags & ~VD_OPEN_FLAGS_MASK) == 0,
10273 ("uOpenFlags=%#x\n", uOpenFlags),
10274 rc = VERR_INVALID_PARAMETER);
10275
10276 rc2 = vdThreadStartWrite(pDisk);
10277 AssertRC(rc2);
10278 fLockWrite = true;
10279
10280 /* Destroy any discard state because the image might be changed to readonly mode. */
10281 rc = vdDiscardStateDestroy(pDisk);
10282 if (RT_FAILURE(rc))
10283 break;
10284
10285 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10286 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10287
10288 rc = pImage->Backend->pfnSetOpenFlags(pImage->pBackendData,
10289 uOpenFlags & ~(VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS));
10290 if (RT_SUCCESS(rc))
10291 pImage->uOpenFlags = uOpenFlags & (VD_OPEN_FLAGS_HONOR_SAME | VD_OPEN_FLAGS_DISCARD | VD_OPEN_FLAGS_IGNORE_FLUSH | VD_OPEN_FLAGS_INFORM_ABOUT_ZERO_BLOCKS);
10292 } while (0);
10293
10294 if (RT_UNLIKELY(fLockWrite))
10295 {
10296 rc2 = vdThreadFinishWrite(pDisk);
10297 AssertRC(rc2);
10298 }
10299
10300 LogFlowFunc(("returns %Rrc\n", rc));
10301 return rc;
10302}
10303
10304/**
10305 * Get base filename of image in HDD container. Some image formats use
10306 * other filenames as well, so don't use this for anything but informational
10307 * purposes.
10308 *
10309 * @returns VBox status code.
10310 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10311 * @returns VERR_BUFFER_OVERFLOW if pszFilename buffer too small to hold filename.
10312 * @param pDisk Pointer to HDD container.
10313 * @param nImage Image number, counts from 0. 0 is always base image of container.
10314 * @param pszFilename Where to store the image file name.
10315 * @param cbFilename Size of buffer pszFilename points to.
10316 */
10317VBOXDDU_DECL(int) VDGetFilename(PVBOXHDD pDisk, unsigned nImage,
10318 char *pszFilename, unsigned cbFilename)
10319{
10320 int rc;
10321 int rc2;
10322 bool fLockRead = false;
10323
10324 LogFlowFunc(("pDisk=%#p nImage=%u pszFilename=%#p cbFilename=%u\n",
10325 pDisk, nImage, pszFilename, cbFilename));
10326 do
10327 {
10328 /* sanity check */
10329 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10330 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10331
10332 /* Check arguments. */
10333 AssertMsgBreakStmt(VALID_PTR(pszFilename) && *pszFilename,
10334 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
10335 rc = VERR_INVALID_PARAMETER);
10336 AssertMsgBreakStmt(cbFilename,
10337 ("cbFilename=%u\n", cbFilename),
10338 rc = VERR_INVALID_PARAMETER);
10339
10340 rc2 = vdThreadStartRead(pDisk);
10341 AssertRC(rc2);
10342 fLockRead = true;
10343
10344 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10345 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10346
10347 size_t cb = strlen(pImage->pszFilename);
10348 if (cb <= cbFilename)
10349 {
10350 strcpy(pszFilename, pImage->pszFilename);
10351 rc = VINF_SUCCESS;
10352 }
10353 else
10354 {
10355 strncpy(pszFilename, pImage->pszFilename, cbFilename - 1);
10356 pszFilename[cbFilename - 1] = '\0';
10357 rc = VERR_BUFFER_OVERFLOW;
10358 }
10359 } while (0);
10360
10361 if (RT_UNLIKELY(fLockRead))
10362 {
10363 rc2 = vdThreadFinishRead(pDisk);
10364 AssertRC(rc2);
10365 }
10366
10367 LogFlowFunc(("returns %Rrc, pszFilename=\"%s\"\n", rc, pszFilename));
10368 return rc;
10369}
10370
10371/**
10372 * Get the comment line of image in HDD container.
10373 *
10374 * @returns VBox status code.
10375 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10376 * @returns VERR_BUFFER_OVERFLOW if pszComment buffer too small to hold comment text.
10377 * @param pDisk Pointer to HDD container.
10378 * @param nImage Image number, counts from 0. 0 is always base image of container.
10379 * @param pszComment Where to store the comment string of image. NULL is ok.
10380 * @param cbComment The size of pszComment buffer. 0 is ok.
10381 */
10382VBOXDDU_DECL(int) VDGetComment(PVBOXHDD pDisk, unsigned nImage,
10383 char *pszComment, unsigned cbComment)
10384{
10385 int rc;
10386 int rc2;
10387 bool fLockRead = false;
10388
10389 LogFlowFunc(("pDisk=%#p nImage=%u pszComment=%#p cbComment=%u\n",
10390 pDisk, nImage, pszComment, cbComment));
10391 do
10392 {
10393 /* sanity check */
10394 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10395 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10396
10397 /* Check arguments. */
10398 AssertMsgBreakStmt(VALID_PTR(pszComment),
10399 ("pszComment=%#p \"%s\"\n", pszComment, pszComment),
10400 rc = VERR_INVALID_PARAMETER);
10401 AssertMsgBreakStmt(cbComment,
10402 ("cbComment=%u\n", cbComment),
10403 rc = VERR_INVALID_PARAMETER);
10404
10405 rc2 = vdThreadStartRead(pDisk);
10406 AssertRC(rc2);
10407 fLockRead = true;
10408
10409 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10410 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10411
10412 rc = pImage->Backend->pfnGetComment(pImage->pBackendData, pszComment,
10413 cbComment);
10414 } while (0);
10415
10416 if (RT_UNLIKELY(fLockRead))
10417 {
10418 rc2 = vdThreadFinishRead(pDisk);
10419 AssertRC(rc2);
10420 }
10421
10422 LogFlowFunc(("returns %Rrc, pszComment=\"%s\"\n", rc, pszComment));
10423 return rc;
10424}
10425
10426/**
10427 * Changes the comment line of image in HDD container.
10428 *
10429 * @returns VBox status code.
10430 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10431 * @param pDisk Pointer to HDD container.
10432 * @param nImage Image number, counts from 0. 0 is always base image of container.
10433 * @param pszComment New comment string (UTF-8). NULL is allowed to reset the comment.
10434 */
10435VBOXDDU_DECL(int) VDSetComment(PVBOXHDD pDisk, unsigned nImage,
10436 const char *pszComment)
10437{
10438 int rc;
10439 int rc2;
10440 bool fLockWrite = false;
10441
10442 LogFlowFunc(("pDisk=%#p nImage=%u pszComment=%#p \"%s\"\n",
10443 pDisk, nImage, pszComment, pszComment));
10444 do
10445 {
10446 /* sanity check */
10447 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10448 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10449
10450 /* Check arguments. */
10451 AssertMsgBreakStmt(VALID_PTR(pszComment) || pszComment == NULL,
10452 ("pszComment=%#p \"%s\"\n", pszComment, pszComment),
10453 rc = VERR_INVALID_PARAMETER);
10454
10455 rc2 = vdThreadStartWrite(pDisk);
10456 AssertRC(rc2);
10457 fLockWrite = true;
10458
10459 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10460 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10461
10462 rc = pImage->Backend->pfnSetComment(pImage->pBackendData, pszComment);
10463 } while (0);
10464
10465 if (RT_UNLIKELY(fLockWrite))
10466 {
10467 rc2 = vdThreadFinishWrite(pDisk);
10468 AssertRC(rc2);
10469 }
10470
10471 LogFlowFunc(("returns %Rrc\n", rc));
10472 return rc;
10473}
10474
10475
10476/**
10477 * Get UUID of image in HDD container.
10478 *
10479 * @returns VBox status code.
10480 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10481 * @param pDisk Pointer to HDD container.
10482 * @param nImage Image number, counts from 0. 0 is always base image of container.
10483 * @param pUuid Where to store the image creation UUID.
10484 */
10485VBOXDDU_DECL(int) VDGetUuid(PVBOXHDD pDisk, unsigned nImage, PRTUUID pUuid)
10486{
10487 int rc;
10488 int rc2;
10489 bool fLockRead = false;
10490
10491 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10492 do
10493 {
10494 /* sanity check */
10495 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10496 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10497
10498 /* Check arguments. */
10499 AssertMsgBreakStmt(VALID_PTR(pUuid),
10500 ("pUuid=%#p\n", pUuid),
10501 rc = VERR_INVALID_PARAMETER);
10502
10503 rc2 = vdThreadStartRead(pDisk);
10504 AssertRC(rc2);
10505 fLockRead = true;
10506
10507 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10508 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10509
10510 rc = pImage->Backend->pfnGetUuid(pImage->pBackendData, pUuid);
10511 } while (0);
10512
10513 if (RT_UNLIKELY(fLockRead))
10514 {
10515 rc2 = vdThreadFinishRead(pDisk);
10516 AssertRC(rc2);
10517 }
10518
10519 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10520 return rc;
10521}
10522
10523/**
10524 * Set the image's UUID. Should not be used by normal applications.
10525 *
10526 * @returns VBox status code.
10527 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10528 * @param pDisk Pointer to HDD container.
10529 * @param nImage Image number, counts from 0. 0 is always base image of container.
10530 * @param pUuid New UUID of the image. If NULL, a new UUID is created.
10531 */
10532VBOXDDU_DECL(int) VDSetUuid(PVBOXHDD pDisk, unsigned nImage, PCRTUUID pUuid)
10533{
10534 int rc;
10535 int rc2;
10536 bool fLockWrite = false;
10537
10538 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10539 pDisk, nImage, pUuid, pUuid));
10540 do
10541 {
10542 /* sanity check */
10543 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10544 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10545
10546 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10547 ("pUuid=%#p\n", pUuid),
10548 rc = VERR_INVALID_PARAMETER);
10549
10550 rc2 = vdThreadStartWrite(pDisk);
10551 AssertRC(rc2);
10552 fLockWrite = true;
10553
10554 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10555 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10556
10557 RTUUID Uuid;
10558 if (!pUuid)
10559 {
10560 RTUuidCreate(&Uuid);
10561 pUuid = &Uuid;
10562 }
10563 rc = pImage->Backend->pfnSetUuid(pImage->pBackendData, pUuid);
10564 } while (0);
10565
10566 if (RT_UNLIKELY(fLockWrite))
10567 {
10568 rc2 = vdThreadFinishWrite(pDisk);
10569 AssertRC(rc2);
10570 }
10571
10572 LogFlowFunc(("returns %Rrc\n", rc));
10573 return rc;
10574}
10575
10576/**
10577 * Get last modification UUID of image in HDD container.
10578 *
10579 * @returns VBox status code.
10580 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10581 * @param pDisk Pointer to HDD container.
10582 * @param nImage Image number, counts from 0. 0 is always base image of container.
10583 * @param pUuid Where to store the image modification UUID.
10584 */
10585VBOXDDU_DECL(int) VDGetModificationUuid(PVBOXHDD pDisk, unsigned nImage, PRTUUID pUuid)
10586{
10587 int rc = VINF_SUCCESS;
10588 int rc2;
10589 bool fLockRead = false;
10590
10591 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10592 do
10593 {
10594 /* sanity check */
10595 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10596 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10597
10598 /* Check arguments. */
10599 AssertMsgBreakStmt(VALID_PTR(pUuid),
10600 ("pUuid=%#p\n", pUuid),
10601 rc = VERR_INVALID_PARAMETER);
10602
10603 rc2 = vdThreadStartRead(pDisk);
10604 AssertRC(rc2);
10605 fLockRead = true;
10606
10607 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10608 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10609
10610 rc = pImage->Backend->pfnGetModificationUuid(pImage->pBackendData,
10611 pUuid);
10612 } while (0);
10613
10614 if (RT_UNLIKELY(fLockRead))
10615 {
10616 rc2 = vdThreadFinishRead(pDisk);
10617 AssertRC(rc2);
10618 }
10619
10620 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10621 return rc;
10622}
10623
10624/**
10625 * Set the image's last modification UUID. Should not be used by normal applications.
10626 *
10627 * @returns VBox status code.
10628 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10629 * @param pDisk Pointer to HDD container.
10630 * @param nImage Image number, counts from 0. 0 is always base image of container.
10631 * @param pUuid New modification UUID of the image. If NULL, a new UUID is created.
10632 */
10633VBOXDDU_DECL(int) VDSetModificationUuid(PVBOXHDD pDisk, unsigned nImage, PCRTUUID pUuid)
10634{
10635 int rc;
10636 int rc2;
10637 bool fLockWrite = false;
10638
10639 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10640 pDisk, nImage, pUuid, pUuid));
10641 do
10642 {
10643 /* sanity check */
10644 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10645 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10646
10647 /* Check arguments. */
10648 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10649 ("pUuid=%#p\n", pUuid),
10650 rc = VERR_INVALID_PARAMETER);
10651
10652 rc2 = vdThreadStartWrite(pDisk);
10653 AssertRC(rc2);
10654 fLockWrite = true;
10655
10656 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10657 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10658
10659 RTUUID Uuid;
10660 if (!pUuid)
10661 {
10662 RTUuidCreate(&Uuid);
10663 pUuid = &Uuid;
10664 }
10665 rc = pImage->Backend->pfnSetModificationUuid(pImage->pBackendData,
10666 pUuid);
10667 } while (0);
10668
10669 if (RT_UNLIKELY(fLockWrite))
10670 {
10671 rc2 = vdThreadFinishWrite(pDisk);
10672 AssertRC(rc2);
10673 }
10674
10675 LogFlowFunc(("returns %Rrc\n", rc));
10676 return rc;
10677}
10678
10679/**
10680 * Get parent UUID of image in HDD container.
10681 *
10682 * @returns VBox status code.
10683 * @returns VERR_VD_IMAGE_NOT_FOUND if image with specified number was not opened.
10684 * @param pDisk Pointer to HDD container.
10685 * @param nImage Image number, counts from 0. 0 is always base image of container.
10686 * @param pUuid Where to store the parent image UUID.
10687 */
10688VBOXDDU_DECL(int) VDGetParentUuid(PVBOXHDD pDisk, unsigned nImage,
10689 PRTUUID pUuid)
10690{
10691 int rc = VINF_SUCCESS;
10692 int rc2;
10693 bool fLockRead = false;
10694
10695 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p\n", pDisk, nImage, pUuid));
10696 do
10697 {
10698 /* sanity check */
10699 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10700 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10701
10702 /* Check arguments. */
10703 AssertMsgBreakStmt(VALID_PTR(pUuid),
10704 ("pUuid=%#p\n", pUuid),
10705 rc = VERR_INVALID_PARAMETER);
10706
10707 rc2 = vdThreadStartRead(pDisk);
10708 AssertRC(rc2);
10709 fLockRead = true;
10710
10711 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10712 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10713
10714 rc = pImage->Backend->pfnGetParentUuid(pImage->pBackendData, pUuid);
10715 } while (0);
10716
10717 if (RT_UNLIKELY(fLockRead))
10718 {
10719 rc2 = vdThreadFinishRead(pDisk);
10720 AssertRC(rc2);
10721 }
10722
10723 LogFlowFunc(("returns %Rrc, Uuid={%RTuuid}\n", rc, pUuid));
10724 return rc;
10725}
10726
10727/**
10728 * Set the image's parent UUID. Should not be used by normal applications.
10729 *
10730 * @returns VBox status code.
10731 * @param pDisk Pointer to HDD container.
10732 * @param nImage Image number, counts from 0. 0 is always base image of container.
10733 * @param pUuid New parent UUID of the image. If NULL, a new UUID is created.
10734 */
10735VBOXDDU_DECL(int) VDSetParentUuid(PVBOXHDD pDisk, unsigned nImage,
10736 PCRTUUID pUuid)
10737{
10738 int rc;
10739 int rc2;
10740 bool fLockWrite = false;
10741
10742 LogFlowFunc(("pDisk=%#p nImage=%u pUuid=%#p {%RTuuid}\n",
10743 pDisk, nImage, pUuid, pUuid));
10744 do
10745 {
10746 /* sanity check */
10747 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10748 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10749
10750 /* Check arguments. */
10751 AssertMsgBreakStmt(VALID_PTR(pUuid) || pUuid == NULL,
10752 ("pUuid=%#p\n", pUuid),
10753 rc = VERR_INVALID_PARAMETER);
10754
10755 rc2 = vdThreadStartWrite(pDisk);
10756 AssertRC(rc2);
10757 fLockWrite = true;
10758
10759 PVDIMAGE pImage = vdGetImageByNumber(pDisk, nImage);
10760 AssertPtrBreakStmt(pImage, rc = VERR_VD_IMAGE_NOT_FOUND);
10761
10762 RTUUID Uuid;
10763 if (!pUuid)
10764 {
10765 RTUuidCreate(&Uuid);
10766 pUuid = &Uuid;
10767 }
10768 rc = pImage->Backend->pfnSetParentUuid(pImage->pBackendData, pUuid);
10769 } while (0);
10770
10771 if (RT_UNLIKELY(fLockWrite))
10772 {
10773 rc2 = vdThreadFinishWrite(pDisk);
10774 AssertRC(rc2);
10775 }
10776
10777 LogFlowFunc(("returns %Rrc\n", rc));
10778 return rc;
10779}
10780
10781
10782/**
10783 * Debug helper - dumps all opened images in HDD container into the log file.
10784 *
10785 * @param pDisk Pointer to HDD container.
10786 */
10787VBOXDDU_DECL(void) VDDumpImages(PVBOXHDD pDisk)
10788{
10789 int rc2;
10790 bool fLockRead = false;
10791
10792 do
10793 {
10794 /* sanity check */
10795 AssertPtrBreak(pDisk);
10796 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10797
10798 if (!pDisk->pInterfaceError || !VALID_PTR(pDisk->pInterfaceError->pfnMessage))
10799 pDisk->pInterfaceError->pfnMessage = vdLogMessage;
10800
10801 rc2 = vdThreadStartRead(pDisk);
10802 AssertRC(rc2);
10803 fLockRead = true;
10804
10805 vdMessageWrapper(pDisk, "--- Dumping VD Disk, Images=%u\n", pDisk->cImages);
10806 for (PVDIMAGE pImage = pDisk->pBase; pImage; pImage = pImage->pNext)
10807 {
10808 vdMessageWrapper(pDisk, "Dumping VD image \"%s\" (Backend=%s)\n",
10809 pImage->pszFilename, pImage->Backend->pszBackendName);
10810 pImage->Backend->pfnDump(pImage->pBackendData);
10811 }
10812 } while (0);
10813
10814 if (RT_UNLIKELY(fLockRead))
10815 {
10816 rc2 = vdThreadFinishRead(pDisk);
10817 AssertRC(rc2);
10818 }
10819}
10820
10821
10822VBOXDDU_DECL(int) VDDiscardRanges(PVBOXHDD pDisk, PCRTRANGE paRanges, unsigned cRanges)
10823{
10824 int rc;
10825 int rc2;
10826 bool fLockWrite = false;
10827
10828 LogFlowFunc(("pDisk=%#p paRanges=%#p cRanges=%u\n",
10829 pDisk, paRanges, cRanges));
10830 do
10831 {
10832 /* sanity check */
10833 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10834 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10835
10836 /* Check arguments. */
10837 AssertMsgBreakStmt(cRanges,
10838 ("cRanges=%u\n", cRanges),
10839 rc = VERR_INVALID_PARAMETER);
10840 AssertMsgBreakStmt(VALID_PTR(paRanges),
10841 ("paRanges=%#p\n", paRanges),
10842 rc = VERR_INVALID_PARAMETER);
10843
10844 rc2 = vdThreadStartWrite(pDisk);
10845 AssertRC(rc2);
10846 fLockWrite = true;
10847
10848 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
10849
10850 AssertMsgBreakStmt(pDisk->pLast->uOpenFlags & VD_OPEN_FLAGS_DISCARD,
10851 ("Discarding not supported\n"),
10852 rc = VERR_NOT_SUPPORTED);
10853
10854 VDIOCTX IoCtx;
10855 RTSEMEVENT hEventComplete = NIL_RTSEMEVENT;
10856
10857 rc = RTSemEventCreate(&hEventComplete);
10858 if (RT_FAILURE(rc))
10859 break;
10860
10861 vdIoCtxDiscardInit(&IoCtx, pDisk, paRanges, cRanges,
10862 vdIoCtxSyncComplete, pDisk, hEventComplete, NULL,
10863 vdDiscardHelperAsync, VDIOCTX_FLAGS_SYNC | VDIOCTX_FLAGS_DONT_FREE);
10864 rc = vdIoCtxProcessSync(&IoCtx, hEventComplete);
10865
10866 RTSemEventDestroy(hEventComplete);
10867 } while (0);
10868
10869 if (RT_UNLIKELY(fLockWrite))
10870 {
10871 rc2 = vdThreadFinishWrite(pDisk);
10872 AssertRC(rc2);
10873 }
10874
10875 LogFlowFunc(("returns %Rrc\n", rc));
10876 return rc;
10877}
10878
10879
10880VBOXDDU_DECL(int) VDAsyncRead(PVBOXHDD pDisk, uint64_t uOffset, size_t cbRead,
10881 PCRTSGBUF pcSgBuf,
10882 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
10883 void *pvUser1, void *pvUser2)
10884{
10885 int rc = VERR_VD_BLOCK_FREE;
10886 int rc2;
10887 bool fLockRead = false;
10888 PVDIOCTX pIoCtx = NULL;
10889
10890 LogFlowFunc(("pDisk=%#p uOffset=%llu pcSgBuf=%#p cbRead=%zu pvUser1=%#p pvUser2=%#p\n",
10891 pDisk, uOffset, pcSgBuf, cbRead, pvUser1, pvUser2));
10892
10893 do
10894 {
10895 /* sanity check */
10896 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10897 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10898
10899 /* Check arguments. */
10900 AssertMsgBreakStmt(cbRead,
10901 ("cbRead=%zu\n", cbRead),
10902 rc = VERR_INVALID_PARAMETER);
10903 AssertMsgBreakStmt(VALID_PTR(pcSgBuf),
10904 ("pcSgBuf=%#p\n", pcSgBuf),
10905 rc = VERR_INVALID_PARAMETER);
10906
10907 rc2 = vdThreadStartRead(pDisk);
10908 AssertRC(rc2);
10909 fLockRead = true;
10910
10911 AssertMsgBreakStmt(uOffset + cbRead <= pDisk->cbSize,
10912 ("uOffset=%llu cbRead=%zu pDisk->cbSize=%llu\n",
10913 uOffset, cbRead, pDisk->cbSize),
10914 rc = VERR_INVALID_PARAMETER);
10915 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
10916
10917 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_READ, uOffset,
10918 cbRead, pDisk->pLast, pcSgBuf,
10919 pfnComplete, pvUser1, pvUser2,
10920 NULL, vdReadHelperAsync,
10921 VDIOCTX_FLAGS_ZERO_FREE_BLOCKS);
10922 if (!pIoCtx)
10923 {
10924 rc = VERR_NO_MEMORY;
10925 break;
10926 }
10927
10928 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
10929 if (rc == VINF_VD_ASYNC_IO_FINISHED)
10930 {
10931 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
10932 vdIoCtxFree(pDisk, pIoCtx);
10933 else
10934 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
10935 }
10936 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
10937 vdIoCtxFree(pDisk, pIoCtx);
10938
10939 } while (0);
10940
10941 if (RT_UNLIKELY(fLockRead) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
10942 {
10943 rc2 = vdThreadFinishRead(pDisk);
10944 AssertRC(rc2);
10945 }
10946
10947 LogFlowFunc(("returns %Rrc\n", rc));
10948 return rc;
10949}
10950
10951
10952VBOXDDU_DECL(int) VDAsyncWrite(PVBOXHDD pDisk, uint64_t uOffset, size_t cbWrite,
10953 PCRTSGBUF pcSgBuf,
10954 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
10955 void *pvUser1, void *pvUser2)
10956{
10957 int rc;
10958 int rc2;
10959 bool fLockWrite = false;
10960 PVDIOCTX pIoCtx = NULL;
10961
10962 LogFlowFunc(("pDisk=%#p uOffset=%llu cSgBuf=%#p cbWrite=%zu pvUser1=%#p pvUser2=%#p\n",
10963 pDisk, uOffset, pcSgBuf, cbWrite, pvUser1, pvUser2));
10964 do
10965 {
10966 /* sanity check */
10967 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
10968 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
10969
10970 /* Check arguments. */
10971 AssertMsgBreakStmt(cbWrite,
10972 ("cbWrite=%zu\n", cbWrite),
10973 rc = VERR_INVALID_PARAMETER);
10974 AssertMsgBreakStmt(VALID_PTR(pcSgBuf),
10975 ("pcSgBuf=%#p\n", pcSgBuf),
10976 rc = VERR_INVALID_PARAMETER);
10977
10978 rc2 = vdThreadStartWrite(pDisk);
10979 AssertRC(rc2);
10980 fLockWrite = true;
10981
10982 AssertMsgBreakStmt(uOffset + cbWrite <= pDisk->cbSize,
10983 ("uOffset=%llu cbWrite=%zu pDisk->cbSize=%llu\n",
10984 uOffset, cbWrite, pDisk->cbSize),
10985 rc = VERR_INVALID_PARAMETER);
10986 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
10987
10988 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_WRITE, uOffset,
10989 cbWrite, pDisk->pLast, pcSgBuf,
10990 pfnComplete, pvUser1, pvUser2,
10991 NULL, vdWriteHelperAsync,
10992 VDIOCTX_FLAGS_DEFAULT);
10993 if (!pIoCtx)
10994 {
10995 rc = VERR_NO_MEMORY;
10996 break;
10997 }
10998
10999 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11000 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11001 {
11002 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11003 vdIoCtxFree(pDisk, pIoCtx);
11004 else
11005 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11006 }
11007 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11008 vdIoCtxFree(pDisk, pIoCtx);
11009 } while (0);
11010
11011 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11012 {
11013 rc2 = vdThreadFinishWrite(pDisk);
11014 AssertRC(rc2);
11015 }
11016
11017 LogFlowFunc(("returns %Rrc\n", rc));
11018 return rc;
11019}
11020
11021
11022VBOXDDU_DECL(int) VDAsyncFlush(PVBOXHDD pDisk, PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11023 void *pvUser1, void *pvUser2)
11024{
11025 int rc;
11026 int rc2;
11027 bool fLockWrite = false;
11028 PVDIOCTX pIoCtx = NULL;
11029
11030 LogFlowFunc(("pDisk=%#p\n", pDisk));
11031
11032 do
11033 {
11034 /* sanity check */
11035 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11036 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11037
11038 rc2 = vdThreadStartWrite(pDisk);
11039 AssertRC(rc2);
11040 fLockWrite = true;
11041
11042 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11043
11044 pIoCtx = vdIoCtxRootAlloc(pDisk, VDIOCTXTXDIR_FLUSH, 0,
11045 0, pDisk->pLast, NULL,
11046 pfnComplete, pvUser1, pvUser2,
11047 NULL, vdFlushHelperAsync,
11048 VDIOCTX_FLAGS_DEFAULT);
11049 if (!pIoCtx)
11050 {
11051 rc = VERR_NO_MEMORY;
11052 break;
11053 }
11054
11055 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11056 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11057 {
11058 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11059 vdIoCtxFree(pDisk, pIoCtx);
11060 else
11061 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11062 }
11063 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11064 vdIoCtxFree(pDisk, pIoCtx);
11065 } while (0);
11066
11067 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11068 {
11069 rc2 = vdThreadFinishWrite(pDisk);
11070 AssertRC(rc2);
11071 }
11072
11073 LogFlowFunc(("returns %Rrc\n", rc));
11074 return rc;
11075}
11076
11077VBOXDDU_DECL(int) VDAsyncDiscardRanges(PVBOXHDD pDisk, PCRTRANGE paRanges, unsigned cRanges,
11078 PFNVDASYNCTRANSFERCOMPLETE pfnComplete,
11079 void *pvUser1, void *pvUser2)
11080{
11081 int rc;
11082 int rc2;
11083 bool fLockWrite = false;
11084 PVDIOCTX pIoCtx = NULL;
11085
11086 LogFlowFunc(("pDisk=%#p\n", pDisk));
11087
11088 do
11089 {
11090 /* sanity check */
11091 AssertPtrBreakStmt(pDisk, rc = VERR_INVALID_PARAMETER);
11092 AssertMsg(pDisk->u32Signature == VBOXHDDDISK_SIGNATURE, ("u32Signature=%08x\n", pDisk->u32Signature));
11093
11094 rc2 = vdThreadStartWrite(pDisk);
11095 AssertRC(rc2);
11096 fLockWrite = true;
11097
11098 AssertPtrBreakStmt(pDisk->pLast, rc = VERR_VD_NOT_OPENED);
11099
11100 pIoCtx = vdIoCtxDiscardAlloc(pDisk, paRanges, cRanges,
11101 pfnComplete, pvUser1, pvUser2, NULL,
11102 vdDiscardHelperAsync,
11103 VDIOCTX_FLAGS_DEFAULT);
11104 if (!pIoCtx)
11105 {
11106 rc = VERR_NO_MEMORY;
11107 break;
11108 }
11109
11110 rc = vdIoCtxProcessTryLockDefer(pIoCtx);
11111 if (rc == VINF_VD_ASYNC_IO_FINISHED)
11112 {
11113 if (ASMAtomicCmpXchgBool(&pIoCtx->fComplete, true, false))
11114 vdIoCtxFree(pDisk, pIoCtx);
11115 else
11116 rc = VERR_VD_ASYNC_IO_IN_PROGRESS; /* Let the other handler complete the request. */
11117 }
11118 else if (rc != VERR_VD_ASYNC_IO_IN_PROGRESS) /* Another error */
11119 vdIoCtxFree(pDisk, pIoCtx);
11120 } while (0);
11121
11122 if (RT_UNLIKELY(fLockWrite) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
11123 {
11124 rc2 = vdThreadFinishWrite(pDisk);
11125 AssertRC(rc2);
11126 }
11127
11128 LogFlowFunc(("returns %Rrc\n", rc));
11129 return rc;
11130}
11131
11132VBOXDDU_DECL(int) VDRepair(PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
11133 const char *pszFilename, const char *pszBackend,
11134 uint32_t fFlags)
11135{
11136 int rc = VERR_NOT_SUPPORTED;
11137 PCVDIMAGEBACKEND pBackend = NULL;
11138 VDINTERFACEIOINT VDIfIoInt;
11139 VDINTERFACEIO VDIfIoFallback;
11140 PVDINTERFACEIO pInterfaceIo;
11141
11142 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
11143 /* Check arguments. */
11144 AssertMsgReturn(VALID_PTR(pszFilename) && *pszFilename,
11145 ("pszFilename=%#p \"%s\"\n", pszFilename, pszFilename),
11146 VERR_INVALID_PARAMETER);
11147 AssertMsgReturn(VALID_PTR(pszBackend),
11148 ("pszBackend=%#p\n", pszBackend),
11149 VERR_INVALID_PARAMETER);
11150 AssertMsgReturn((fFlags & ~VD_REPAIR_FLAGS_MASK) == 0,
11151 ("fFlags=%#x\n", fFlags),
11152 VERR_INVALID_PARAMETER);
11153
11154 pInterfaceIo = VDIfIoGet(pVDIfsImage);
11155 if (!pInterfaceIo)
11156 {
11157 /*
11158 * Caller doesn't provide an I/O interface, create our own using the
11159 * native file API.
11160 */
11161 vdIfIoFallbackCallbacksSetup(&VDIfIoFallback);
11162 pInterfaceIo = &VDIfIoFallback;
11163 }
11164
11165 /* Set up the internal I/O interface. */
11166 AssertReturn(!VDIfIoIntGet(pVDIfsImage), VERR_INVALID_PARAMETER);
11167 VDIfIoInt.pfnOpen = vdIOIntOpenLimited;
11168 VDIfIoInt.pfnClose = vdIOIntCloseLimited;
11169 VDIfIoInt.pfnDelete = vdIOIntDeleteLimited;
11170 VDIfIoInt.pfnMove = vdIOIntMoveLimited;
11171 VDIfIoInt.pfnGetFreeSpace = vdIOIntGetFreeSpaceLimited;
11172 VDIfIoInt.pfnGetModificationTime = vdIOIntGetModificationTimeLimited;
11173 VDIfIoInt.pfnGetSize = vdIOIntGetSizeLimited;
11174 VDIfIoInt.pfnSetSize = vdIOIntSetSizeLimited;
11175 VDIfIoInt.pfnReadUser = vdIOIntReadUserLimited;
11176 VDIfIoInt.pfnWriteUser = vdIOIntWriteUserLimited;
11177 VDIfIoInt.pfnReadMeta = vdIOIntReadMetaLimited;
11178 VDIfIoInt.pfnWriteMeta = vdIOIntWriteMetaLimited;
11179 VDIfIoInt.pfnFlush = vdIOIntFlushLimited;
11180 rc = VDInterfaceAdd(&VDIfIoInt.Core, "VD_IOINT", VDINTERFACETYPE_IOINT,
11181 pInterfaceIo, sizeof(VDINTERFACEIOINT), &pVDIfsImage);
11182 AssertRC(rc);
11183
11184 rc = vdFindBackend(pszBackend, &pBackend);
11185 if (RT_SUCCESS(rc))
11186 {
11187 if (pBackend->pfnRepair)
11188 rc = pBackend->pfnRepair(pszFilename, pVDIfsDisk, pVDIfsImage, fFlags);
11189 else
11190 rc = VERR_VD_IMAGE_REPAIR_NOT_SUPPORTED;
11191 }
11192
11193 LogFlowFunc(("returns %Rrc\n", rc));
11194 return rc;
11195}
11196
11197
11198/*
11199 * generic plugin functions
11200 */
11201
11202/**
11203 * @interface_method_impl{VDIMAGEBACKEND,pfnComposeLocation}
11204 */
11205DECLCALLBACK(int) genericFileComposeLocation(PVDINTERFACE pConfig, char **pszLocation)
11206{
11207 RT_NOREF1(pConfig);
11208 *pszLocation = NULL;
11209 return VINF_SUCCESS;
11210}
11211
11212/**
11213 * @interface_method_impl{VDIMAGEBACKEND,pfnComposeName}
11214 */
11215DECLCALLBACK(int) genericFileComposeName(PVDINTERFACE pConfig, char **pszName)
11216{
11217 RT_NOREF1(pConfig);
11218 *pszName = NULL;
11219 return VINF_SUCCESS;
11220}
11221
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette