VirtualBox

source: vbox/trunk/src/VBox/Storage/QED.cpp@ 63942

Last change on this file since 63942 was 63905, checked in by vboxsync, 8 years ago

Storage/VD: Add proper versioning of the backend structures instead of just relying on the structure size to make changing callback signatures possible in the future and still being able to reject incompatible plugins

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 88.0 KB
Line 
1/* $Id: QED.cpp 63905 2016-09-20 08:31:05Z vboxsync $ */
2/** @file
3 * QED - QED Disk image.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VD_QED
23#include <VBox/vd-plugin.h>
24#include <VBox/err.h>
25
26#include <VBox/log.h>
27#include <iprt/asm.h>
28#include <iprt/assert.h>
29#include <iprt/string.h>
30#include <iprt/alloc.h>
31#include <iprt/path.h>
32#include <iprt/list.h>
33
34#include "VDBackends.h"
35
36/**
37 * The QED backend implements support for the qemu enhanced disk format (short QED)
38 * The specification for the format is available under http://wiki.qemu.org/Features/QED/Specification
39 *
40 * Missing things to implement:
41 * - compaction
42 * - resizing which requires block relocation (very rare case)
43 */
44
45
46/*********************************************************************************************************************************
47* Structures in a QED image, little endian *
48*********************************************************************************************************************************/
49
50#pragma pack(1)
51typedef struct QedHeader
52{
53 /** Magic value. */
54 uint32_t u32Magic;
55 /** Cluster size in bytes. */
56 uint32_t u32ClusterSize;
57 /** Size of L1 and L2 tables in clusters. */
58 uint32_t u32TableSize;
59 /** size of this header structure in clusters. */
60 uint32_t u32HeaderSize;
61 /** Features used for the image. */
62 uint64_t u64FeatureFlags;
63 /** Compatibility features used for the image. */
64 uint64_t u64CompatFeatureFlags;
65 /** Self resetting feature bits. */
66 uint64_t u64AutoresetFeatureFlags;
67 /** Offset of the L1 table in bytes. */
68 uint64_t u64OffL1Table;
69 /** Logical image size as seen by the guest. */
70 uint64_t u64Size;
71 /** Offset of the backing filename in bytes. */
72 uint32_t u32OffBackingFilename;
73 /** Size of the backing filename. */
74 uint32_t u32BackingFilenameSize;
75} QedHeader;
76#pragma pack()
77/** Pointer to a on disk QED header. */
78typedef QedHeader *PQedHeader;
79
80/** QED magic value. */
81#define QED_MAGIC UINT32_C(0x00444551) /* QED\0 */
82/** Cluster size minimum. */
83#define QED_CLUSTER_SIZE_MIN RT_BIT(12)
84/** Cluster size maximum. */
85#define QED_CLUSTER_SIZE_MAX RT_BIT(26)
86/** L1 and L2 Table size minimum. */
87#define QED_TABLE_SIZE_MIN 1
88/** L1 and L2 Table size maximum. */
89#define QED_TABLE_SIZE_MAX 16
90
91/** QED default cluster size when creating an image. */
92#define QED_CLUSTER_SIZE_DEFAULT (64 * _1K)
93/** The default table size in clusters. */
94#define QED_TABLE_SIZE_DEFAULT 4
95
96/** Feature flags.
97 * @{
98 */
99/** Image uses a backing file to provide data for unallocated clusters. */
100#define QED_FEATURE_BACKING_FILE RT_BIT_64(0)
101/** Image needs checking before use. */
102#define QED_FEATURE_NEED_CHECK RT_BIT_64(1)
103/** Don't probe for format of the backing file, treat as raw image. */
104#define QED_FEATURE_BACKING_FILE_NO_PROBE RT_BIT_64(2)
105/** Mask of valid features. */
106#define QED_FEATURE_MASK (QED_FEATURE_BACKING_FILE | QED_FEATURE_NEED_CHECK | QED_FEATURE_BACKING_FILE_NO_PROBE)
107/** @} */
108
109/** Compatibility feature flags.
110 * @{
111 */
112/** Mask of valid compatibility features. */
113#define QED_COMPAT_FEATURE_MASK (0)
114/** @} */
115
116/** Autoreset feature flags.
117 * @{
118 */
119/** Mask of valid autoreset features. */
120#define QED_AUTORESET_FEATURE_MASK (0)
121/** @} */
122
123
124/*********************************************************************************************************************************
125* Constants And Macros, Structures and Typedefs *
126*********************************************************************************************************************************/
127
128/**
129 * QED L2 cache entry.
130 */
131typedef struct QEDL2CACHEENTRY
132{
133 /** List node for the search list. */
134 RTLISTNODE NodeSearch;
135 /** List node for the LRU list. */
136 RTLISTNODE NodeLru;
137 /** Reference counter. */
138 uint32_t cRefs;
139 /** The offset of the L2 table, used as search key. */
140 uint64_t offL2Tbl;
141 /** Pointer to the cached L2 table. */
142 uint64_t *paL2Tbl;
143} QEDL2CACHEENTRY, *PQEDL2CACHEENTRY;
144
145/** Maximum amount of memory the cache is allowed to use. */
146#define QED_L2_CACHE_MEMORY_MAX (2*_1M)
147
148/**
149 * QED image data structure.
150 */
151typedef struct QEDIMAGE
152{
153 /** Image name. */
154 const char *pszFilename;
155 /** Storage handle. */
156 PVDIOSTORAGE pStorage;
157
158 /** Pointer to the per-disk VD interface list. */
159 PVDINTERFACE pVDIfsDisk;
160 /** Pointer to the per-image VD interface list. */
161 PVDINTERFACE pVDIfsImage;
162 /** Error interface. */
163 PVDINTERFACEERROR pIfError;
164 /** I/O interface. */
165 PVDINTERFACEIOINT pIfIo;
166
167 /** Open flags passed by VBoxHD layer. */
168 unsigned uOpenFlags;
169 /** Image flags defined during creation or determined during open. */
170 unsigned uImageFlags;
171 /** Total size of the image. */
172 uint64_t cbSize;
173 /** Physical geometry of this image. */
174 VDGEOMETRY PCHSGeometry;
175 /** Logical geometry of this image. */
176 VDGEOMETRY LCHSGeometry;
177
178 /** Filename of the backing file if any. */
179 char *pszBackingFilename;
180 /** Offset of the filename in the image. */
181 uint32_t offBackingFilename;
182 /** Size of the backing filename excluding \0. */
183 uint32_t cbBackingFilename;
184
185 /** Size of the image, multiple of clusters. */
186 uint64_t cbImage;
187 /** Cluster size in bytes. */
188 uint32_t cbCluster;
189 /** Number of entries in the L1 and L2 table. */
190 uint32_t cTableEntries;
191 /** Size of an L1 or L2 table rounded to the next cluster size. */
192 uint32_t cbTable;
193 /** Pointer to the L1 table. */
194 uint64_t *paL1Table;
195 /** Offset of the L1 table. */
196 uint64_t offL1Table;
197
198 /** Offset mask for a cluster. */
199 uint64_t fOffsetMask;
200 /** L1 table mask to get the L1 index. */
201 uint64_t fL1Mask;
202 /** Number of bits to shift to get the L1 index. */
203 uint32_t cL1Shift;
204 /** L2 table mask to get the L2 index. */
205 uint64_t fL2Mask;
206 /** Number of bits to shift to get the L2 index. */
207 uint32_t cL2Shift;
208
209 /** Memory occupied by the L2 table cache. */
210 size_t cbL2Cache;
211 /** The sorted L2 entry list used for searching. */
212 RTLISTNODE ListSearch;
213 /** The LRU L2 entry list used for eviction. */
214 RTLISTNODE ListLru;
215
216} QEDIMAGE, *PQEDIMAGE;
217
218/**
219 * State of the async cluster allocation.
220 */
221typedef enum QEDCLUSTERASYNCALLOCSTATE
222{
223 /** Invalid. */
224 QEDCLUSTERASYNCALLOCSTATE_INVALID = 0,
225 /** L2 table allocation. */
226 QEDCLUSTERASYNCALLOCSTATE_L2_ALLOC,
227 /** Link L2 table into L1. */
228 QEDCLUSTERASYNCALLOCSTATE_L2_LINK,
229 /** Allocate user data cluster. */
230 QEDCLUSTERASYNCALLOCSTATE_USER_ALLOC,
231 /** Link user data cluster. */
232 QEDCLUSTERASYNCALLOCSTATE_USER_LINK,
233 /** 32bit blowup. */
234 QEDCLUSTERASYNCALLOCSTATE_32BIT_HACK = 0x7fffffff
235} QEDCLUSTERASYNCALLOCSTATE, *PQEDCLUSTERASYNCALLOCSTATE;
236
237/**
238 * Data needed to track async cluster allocation.
239 */
240typedef struct QEDCLUSTERASYNCALLOC
241{
242 /** The state of the cluster allocation. */
243 QEDCLUSTERASYNCALLOCSTATE enmAllocState;
244 /** Old image size to rollback in case of an error. */
245 uint64_t cbImageOld;
246 /** L1 index to link if any. */
247 uint32_t idxL1;
248 /** L2 index to link, required in any case. */
249 uint32_t idxL2;
250 /** Start offset of the allocated cluster. */
251 uint64_t offClusterNew;
252 /** L2 cache entry if a L2 table is allocated. */
253 PQEDL2CACHEENTRY pL2Entry;
254 /** Number of bytes to write. */
255 size_t cbToWrite;
256} QEDCLUSTERASYNCALLOC, *PQEDCLUSTERASYNCALLOC;
257
258
259/*********************************************************************************************************************************
260* Static Variables *
261*********************************************************************************************************************************/
262
263/** NULL-terminated array of supported file extensions. */
264static const VDFILEEXTENSION s_aQedFileExtensions[] =
265{
266 {"qed", VDTYPE_HDD},
267 {NULL, VDTYPE_INVALID}
268};
269
270
271/*********************************************************************************************************************************
272* Internal Functions *
273*********************************************************************************************************************************/
274
275/**
276 * Converts the image header to the host endianess and performs basic checks.
277 *
278 * @returns Whether the given header is valid or not.
279 * @param pHeader Pointer to the header to convert.
280 */
281static bool qedHdrConvertToHostEndianess(PQedHeader pHeader)
282{
283 pHeader->u32Magic = RT_LE2H_U32(pHeader->u32Magic);
284 pHeader->u32ClusterSize = RT_LE2H_U32(pHeader->u32ClusterSize);
285 pHeader->u32TableSize = RT_LE2H_U32(pHeader->u32TableSize);
286 pHeader->u32HeaderSize = RT_LE2H_U32(pHeader->u32HeaderSize);
287 pHeader->u64FeatureFlags = RT_LE2H_U64(pHeader->u64FeatureFlags);
288 pHeader->u64CompatFeatureFlags = RT_LE2H_U64(pHeader->u64CompatFeatureFlags);
289 pHeader->u64AutoresetFeatureFlags = RT_LE2H_U64(pHeader->u64AutoresetFeatureFlags);
290 pHeader->u64OffL1Table = RT_LE2H_U64(pHeader->u64OffL1Table);
291 pHeader->u64Size = RT_LE2H_U64(pHeader->u64Size);
292 pHeader->u32OffBackingFilename = RT_LE2H_U32(pHeader->u32OffBackingFilename);
293 pHeader->u32BackingFilenameSize = RT_LE2H_U32(pHeader->u32BackingFilenameSize);
294
295 if (RT_UNLIKELY(pHeader->u32Magic != QED_MAGIC))
296 return false;
297 if (RT_UNLIKELY( pHeader->u32ClusterSize < QED_CLUSTER_SIZE_MIN
298 || pHeader->u32ClusterSize > QED_CLUSTER_SIZE_MAX))
299 return false;
300 if (RT_UNLIKELY( pHeader->u32TableSize < QED_TABLE_SIZE_MIN
301 || pHeader->u32TableSize > QED_TABLE_SIZE_MAX))
302 return false;
303 if (RT_UNLIKELY(pHeader->u64Size % 512 != 0))
304 return false;
305 if (RT_UNLIKELY( pHeader->u64FeatureFlags & QED_FEATURE_BACKING_FILE
306 && ( pHeader->u32BackingFilenameSize == 0
307 || pHeader->u32BackingFilenameSize == UINT32_MAX)))
308 return false;
309
310 return true;
311}
312
313/**
314 * Creates a QED header from the given image state.
315 *
316 * @returns nothing.
317 * @param pImage Image instance data.
318 * @param pHeader Pointer to the header to convert.
319 */
320static void qedHdrConvertFromHostEndianess(PQEDIMAGE pImage, PQedHeader pHeader)
321{
322 pHeader->u32Magic = RT_H2LE_U32(QED_MAGIC);
323 pHeader->u32ClusterSize = RT_H2LE_U32(pImage->cbCluster);
324 pHeader->u32TableSize = RT_H2LE_U32(pImage->cbTable / pImage->cbCluster);
325 pHeader->u32HeaderSize = RT_H2LE_U32(1);
326 pHeader->u64FeatureFlags = RT_H2LE_U64(pImage->pszBackingFilename ? QED_FEATURE_BACKING_FILE : UINT64_C(0));
327 pHeader->u64CompatFeatureFlags = RT_H2LE_U64(UINT64_C(0));
328 pHeader->u64AutoresetFeatureFlags = RT_H2LE_U64(UINT64_C(0));
329 pHeader->u64OffL1Table = RT_H2LE_U64(pImage->offL1Table);
330 pHeader->u64Size = RT_H2LE_U64(pImage->cbSize);
331 pHeader->u32OffBackingFilename = RT_H2LE_U32(pImage->offBackingFilename);
332 pHeader->u32BackingFilenameSize = RT_H2LE_U32(pImage->cbBackingFilename);
333}
334
335/**
336 * Convert table entries from little endian to host endianess.
337 *
338 * @returns nothing.
339 * @param paTbl Pointer to the table.
340 * @param cEntries Number of entries in the table.
341 */
342static void qedTableConvertToHostEndianess(uint64_t *paTbl, uint32_t cEntries)
343{
344 while(cEntries-- > 0)
345 {
346 *paTbl = RT_LE2H_U64(*paTbl);
347 paTbl++;
348 }
349}
350
351#if defined(RT_BIG_ENDIAN)
352/**
353 * Convert table entries from host to little endian format.
354 *
355 * @returns nothing.
356 * @param paTblImg Pointer to the table which will store the little endian table.
357 * @param paTbl The source table to convert.
358 * @param cEntries Number of entries in the table.
359 */
360static void qedTableConvertFromHostEndianess(uint64_t *paTblImg, uint64_t *paTbl,
361 uint32_t cEntries)
362{
363 while(cEntries-- > 0)
364 {
365 *paTblImg = RT_H2LE_U64(*paTbl);
366 paTbl++;
367 paTblImg++;
368 }
369}
370#endif
371
372/**
373 * Creates the L2 table cache.
374 *
375 * @returns VBox status code.
376 * @param pImage The image instance data.
377 */
378static int qedL2TblCacheCreate(PQEDIMAGE pImage)
379{
380 pImage->cbL2Cache = 0;
381 RTListInit(&pImage->ListSearch);
382 RTListInit(&pImage->ListLru);
383
384 return VINF_SUCCESS;
385}
386
387/**
388 * Destroys the L2 table cache.
389 *
390 * @returns nothing.
391 * @param pImage The image instance data.
392 */
393static void qedL2TblCacheDestroy(PQEDIMAGE pImage)
394{
395 PQEDL2CACHEENTRY pL2Entry = NULL;
396 PQEDL2CACHEENTRY pL2Next = NULL;
397
398 RTListForEachSafe(&pImage->ListSearch, pL2Entry, pL2Next, QEDL2CACHEENTRY, NodeSearch)
399 {
400 Assert(!pL2Entry->cRefs);
401
402 RTListNodeRemove(&pL2Entry->NodeSearch);
403 RTMemPageFree(pL2Entry->paL2Tbl, pImage->cbTable);
404 RTMemFree(pL2Entry);
405 }
406
407 pImage->cbL2Cache = 0;
408 RTListInit(&pImage->ListSearch);
409 RTListInit(&pImage->ListLru);
410}
411
412/**
413 * Returns the L2 table matching the given offset or NULL if none could be found.
414 *
415 * @returns Pointer to the L2 table cache entry or NULL.
416 * @param pImage The image instance data.
417 * @param offL2Tbl Offset of the L2 table to search for.
418 */
419static PQEDL2CACHEENTRY qedL2TblCacheRetain(PQEDIMAGE pImage, uint64_t offL2Tbl)
420{
421 PQEDL2CACHEENTRY pL2Entry = NULL;
422
423 RTListForEach(&pImage->ListSearch, pL2Entry, QEDL2CACHEENTRY, NodeSearch)
424 {
425 if (pL2Entry->offL2Tbl == offL2Tbl)
426 break;
427 }
428
429 if (!RTListNodeIsDummy(&pImage->ListSearch, pL2Entry, QEDL2CACHEENTRY, NodeSearch))
430 {
431 /* Update LRU list. */
432 RTListNodeRemove(&pL2Entry->NodeLru);
433 RTListPrepend(&pImage->ListLru, &pL2Entry->NodeLru);
434 pL2Entry->cRefs++;
435 return pL2Entry;
436 }
437 else
438 return NULL;
439}
440
441/**
442 * Releases a L2 table cache entry.
443 *
444 * @returns nothing.
445 * @param pL2Entry The L2 cache entry.
446 */
447static void qedL2TblCacheEntryRelease(PQEDL2CACHEENTRY pL2Entry)
448{
449 Assert(pL2Entry->cRefs > 0);
450 pL2Entry->cRefs--;
451}
452
453/**
454 * Allocates a new L2 table from the cache evicting old entries if required.
455 *
456 * @returns Pointer to the L2 cache entry or NULL.
457 * @param pImage The image instance data.
458 */
459static PQEDL2CACHEENTRY qedL2TblCacheEntryAlloc(PQEDIMAGE pImage)
460{
461 PQEDL2CACHEENTRY pL2Entry = NULL;
462
463 if (pImage->cbL2Cache + pImage->cbTable <= QED_L2_CACHE_MEMORY_MAX)
464 {
465 /* Add a new entry. */
466 pL2Entry = (PQEDL2CACHEENTRY)RTMemAllocZ(sizeof(QEDL2CACHEENTRY));
467 if (pL2Entry)
468 {
469 pL2Entry->paL2Tbl = (uint64_t *)RTMemPageAllocZ(pImage->cbTable);
470 if (RT_UNLIKELY(!pL2Entry->paL2Tbl))
471 {
472 RTMemFree(pL2Entry);
473 pL2Entry = NULL;
474 }
475 else
476 {
477 pL2Entry->cRefs = 1;
478 pImage->cbL2Cache += pImage->cbTable;
479 }
480 }
481 }
482 else
483 {
484 /* Evict the last not in use entry and use it */
485 Assert(!RTListIsEmpty(&pImage->ListLru));
486
487 RTListForEachReverse(&pImage->ListLru, pL2Entry, QEDL2CACHEENTRY, NodeLru)
488 {
489 if (!pL2Entry->cRefs)
490 break;
491 }
492
493 if (!RTListNodeIsDummy(&pImage->ListSearch, pL2Entry, QEDL2CACHEENTRY, NodeSearch))
494 {
495 RTListNodeRemove(&pL2Entry->NodeSearch);
496 RTListNodeRemove(&pL2Entry->NodeLru);
497 pL2Entry->offL2Tbl = 0;
498 pL2Entry->cRefs = 1;
499 }
500 else
501 pL2Entry = NULL;
502 }
503
504 return pL2Entry;
505}
506
507/**
508 * Frees a L2 table cache entry.
509 *
510 * @returns nothing.
511 * @param pImage The image instance data.
512 * @param pL2Entry The L2 cache entry to free.
513 */
514static void qedL2TblCacheEntryFree(PQEDIMAGE pImage, PQEDL2CACHEENTRY pL2Entry)
515{
516 Assert(!pL2Entry->cRefs);
517 RTMemPageFree(pL2Entry->paL2Tbl, pImage->cbTable);
518 RTMemFree(pL2Entry);
519
520 pImage->cbL2Cache -= pImage->cbTable;
521}
522
523/**
524 * Inserts an entry in the L2 table cache.
525 *
526 * @returns nothing.
527 * @param pImage The image instance data.
528 * @param pL2Entry The L2 cache entry to insert.
529 */
530static void qedL2TblCacheEntryInsert(PQEDIMAGE pImage, PQEDL2CACHEENTRY pL2Entry)
531{
532 PQEDL2CACHEENTRY pIt = NULL;
533
534 Assert(pL2Entry->offL2Tbl > 0);
535
536 /* Insert at the top of the LRU list. */
537 RTListPrepend(&pImage->ListLru, &pL2Entry->NodeLru);
538
539 if (RTListIsEmpty(&pImage->ListSearch))
540 {
541 RTListAppend(&pImage->ListSearch, &pL2Entry->NodeSearch);
542 }
543 else
544 {
545 /* Insert into search list. */
546 pIt = RTListGetFirst(&pImage->ListSearch, QEDL2CACHEENTRY, NodeSearch);
547 if (pIt->offL2Tbl > pL2Entry->offL2Tbl)
548 RTListPrepend(&pImage->ListSearch, &pL2Entry->NodeSearch);
549 else
550 {
551 bool fInserted = false;
552
553 RTListForEach(&pImage->ListSearch, pIt, QEDL2CACHEENTRY, NodeSearch)
554 {
555 Assert(pIt->offL2Tbl != pL2Entry->offL2Tbl);
556 if (pIt->offL2Tbl < pL2Entry->offL2Tbl)
557 {
558 RTListNodeInsertAfter(&pIt->NodeSearch, &pL2Entry->NodeSearch);
559 fInserted = true;
560 break;
561 }
562 }
563 Assert(fInserted);
564 }
565 }
566}
567
568/**
569 * Fetches the L2 from the given offset trying the LRU cache first and
570 * reading it from the image after a cache miss - version for async I/O.
571 *
572 * @returns VBox status code.
573 * @param pImage Image instance data.
574 * @param pIoCtx The I/O context.
575 * @param offL2Tbl The offset of the L2 table in the image.
576 * @param ppL2Entry Where to store the L2 table on success.
577 */
578static int qedL2TblCacheFetchAsync(PQEDIMAGE pImage, PVDIOCTX pIoCtx,
579 uint64_t offL2Tbl, PQEDL2CACHEENTRY *ppL2Entry)
580{
581 int rc = VINF_SUCCESS;
582
583 /* Try to fetch the L2 table from the cache first. */
584 PQEDL2CACHEENTRY pL2Entry = qedL2TblCacheRetain(pImage, offL2Tbl);
585 if (!pL2Entry)
586 {
587 pL2Entry = qedL2TblCacheEntryAlloc(pImage);
588
589 if (pL2Entry)
590 {
591 /* Read from the image. */
592 PVDMETAXFER pMetaXfer;
593
594 pL2Entry->offL2Tbl = offL2Tbl;
595 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pImage->pStorage,
596 offL2Tbl, pL2Entry->paL2Tbl,
597 pImage->cbTable, pIoCtx,
598 &pMetaXfer, NULL, NULL);
599 if (RT_SUCCESS(rc))
600 {
601 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
602#if defined(RT_BIG_ENDIAN)
603 qedTableConvertToHostEndianess(pL2Entry->paL2Tbl, pImage->cTableEntries);
604#endif
605 qedL2TblCacheEntryInsert(pImage, pL2Entry);
606 }
607 else
608 {
609 qedL2TblCacheEntryRelease(pL2Entry);
610 qedL2TblCacheEntryFree(pImage, pL2Entry);
611 }
612 }
613 else
614 rc = VERR_NO_MEMORY;
615 }
616
617 if (RT_SUCCESS(rc))
618 *ppL2Entry = pL2Entry;
619
620 return rc;
621}
622
623/**
624 * Return power of 2 or 0 if num error.
625 *
626 * @returns The power of 2 or 0 if the given number is not a power of 2.
627 * @param u32 The number.
628 */
629static uint32_t qedGetPowerOfTwo(uint32_t u32)
630{
631 if (u32 == 0)
632 return 0;
633 uint32_t uPower2 = 0;
634 while ((u32 & 1) == 0)
635 {
636 u32 >>= 1;
637 uPower2++;
638 }
639 return u32 == 1 ? uPower2 : 0;
640}
641
642/**
643 * Sets the L1, L2 and offset bitmasks and L1 and L2 bit shift members.
644 *
645 * @returns nothing.
646 * @param pImage The image instance data.
647 */
648static void qedTableMasksInit(PQEDIMAGE pImage)
649{
650 uint32_t cClusterBits, cTableBits;
651
652 cClusterBits = qedGetPowerOfTwo(pImage->cbCluster);
653 cTableBits = qedGetPowerOfTwo(pImage->cTableEntries);
654
655 Assert(cClusterBits + 2 * cTableBits <= 64);
656
657 pImage->fOffsetMask = ((uint64_t)pImage->cbCluster - 1);
658 pImage->fL2Mask = ((uint64_t)pImage->cTableEntries - 1) << cClusterBits;
659 pImage->cL2Shift = cClusterBits;
660 pImage->fL1Mask = ((uint64_t)pImage->cTableEntries - 1) << (cClusterBits + cTableBits);
661 pImage->cL1Shift = cClusterBits + cTableBits;
662}
663
664/**
665 * Converts a given logical offset into the
666 *
667 * @returns nothing.
668 * @param pImage The image instance data.
669 * @param off The logical offset to convert.
670 * @param pidxL1 Where to store the index in the L1 table on success.
671 * @param pidxL2 Where to store the index in the L2 table on success.
672 * @param poffCluster Where to store the offset in the cluster on success.
673 */
674DECLINLINE(void) qedConvertLogicalOffset(PQEDIMAGE pImage, uint64_t off, uint32_t *pidxL1,
675 uint32_t *pidxL2, uint32_t *poffCluster)
676{
677 AssertPtr(pidxL1);
678 AssertPtr(pidxL2);
679 AssertPtr(poffCluster);
680
681 *poffCluster = off & pImage->fOffsetMask;
682 *pidxL1 = (off & pImage->fL1Mask) >> pImage->cL1Shift;
683 *pidxL2 = (off & pImage->fL2Mask) >> pImage->cL2Shift;
684}
685
686/**
687 * Converts Cluster size to a byte size.
688 *
689 * @returns Number of bytes derived from the given number of clusters.
690 * @param pImage The image instance data.
691 * @param cClusters The clusters to convert.
692 */
693DECLINLINE(uint64_t) qedCluster2Byte(PQEDIMAGE pImage, uint64_t cClusters)
694{
695 return cClusters * pImage->cbCluster;
696}
697
698/**
699 * Converts number of bytes to cluster size rounding to the next cluster.
700 *
701 * @returns Number of bytes derived from the given number of clusters.
702 * @param pImage The image instance data.
703 * @param cb Number of bytes to convert.
704 */
705DECLINLINE(uint64_t) qedByte2Cluster(PQEDIMAGE pImage, uint64_t cb)
706{
707 return cb / pImage->cbCluster + (cb % pImage->cbCluster ? 1 : 0);
708}
709
710/**
711 * Allocates a new cluster in the image.
712 *
713 * @returns The start offset of the new cluster in the image.
714 * @param pImage The image instance data.
715 * @param cCLusters Number of clusters to allocate.
716 */
717DECLINLINE(uint64_t) qedClusterAllocate(PQEDIMAGE pImage, uint32_t cClusters)
718{
719 uint64_t offCluster;
720
721 offCluster = pImage->cbImage;
722 pImage->cbImage += cClusters*pImage->cbCluster;
723
724 return offCluster;
725}
726
727/**
728 * Returns the real image offset for a given cluster or an error if the cluster is not
729 * yet allocated.
730 *
731 * @returns VBox status code.
732 * VERR_VD_BLOCK_FREE if the cluster is not yet allocated.
733 * @param pImage The image instance data.
734 * @param pIoCtx The I/O context.
735 * @param idxL1 The L1 index.
736 * @param idxL2 The L2 index.
737 * @param offCluster Offset inside the cluster.
738 * @param poffImage Where to store the image offset on success;
739 */
740static int qedConvertToImageOffset(PQEDIMAGE pImage, PVDIOCTX pIoCtx,
741 uint32_t idxL1, uint32_t idxL2,
742 uint32_t offCluster, uint64_t *poffImage)
743{
744 int rc = VERR_VD_BLOCK_FREE;
745
746 AssertReturn(idxL1 < pImage->cTableEntries, VERR_INVALID_PARAMETER);
747 AssertReturn(idxL2 < pImage->cTableEntries, VERR_INVALID_PARAMETER);
748
749 if (pImage->paL1Table[idxL1])
750 {
751 PQEDL2CACHEENTRY pL2Entry;
752
753 rc = qedL2TblCacheFetchAsync(pImage, pIoCtx, pImage->paL1Table[idxL1],
754 &pL2Entry);
755 if (RT_SUCCESS(rc))
756 {
757 /* Get real file offset. */
758 if (pL2Entry->paL2Tbl[idxL2])
759 *poffImage = pL2Entry->paL2Tbl[idxL2] + offCluster;
760 else
761 rc = VERR_VD_BLOCK_FREE;
762
763 qedL2TblCacheEntryRelease(pL2Entry);
764 }
765 }
766
767 return rc;
768}
769
770/**
771 * Write the given table to image converting to the image endianess if required.
772 *
773 * @returns VBox status code.
774 * @param pImage The image instance data.
775 * @param pIoCtx The I/O context.
776 * @param offTbl The offset the table should be written to.
777 * @param paTbl The table to write.
778 * @param pfnComplete Callback called when the write completes.
779 * @param pvUser Opaque user data to pass in the completion callback.
780 */
781static int qedTblWrite(PQEDIMAGE pImage, PVDIOCTX pIoCtx, uint64_t offTbl, uint64_t *paTbl,
782 PFNVDXFERCOMPLETED pfnComplete, void *pvUser)
783{
784 int rc = VINF_SUCCESS;
785
786#if defined(RT_BIG_ENDIAN)
787 uint64_t *paTblImg = (uint64_t *)RTMemAllocZ(pImage->cbTable);
788 if (paTblImg)
789 {
790 qedTableConvertFromHostEndianess(paTblImg, paTbl,
791 pImage->cTableEntries);
792 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pImage->pStorage,
793 offTbl, paTblImg, pImage->cbTable,
794 pIoCtx, pfnComplete, pvUser);
795 RTMemFree(paTblImg);
796 }
797 else
798 rc = VERR_NO_MEMORY;
799#else
800 /* Write table directly. */
801 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pImage->pStorage,
802 offTbl, paTbl, pImage->cbTable, pIoCtx,
803 pfnComplete, pvUser);
804#endif
805
806 return rc;
807}
808
809/**
810 * Internal. Flush image data to disk.
811 */
812static int qedFlushImage(PQEDIMAGE pImage)
813{
814 int rc = VINF_SUCCESS;
815
816 if ( pImage->pStorage
817 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
818 {
819 QedHeader Header;
820
821 Assert(!(pImage->cbTable % pImage->cbCluster));
822#if defined(RT_BIG_ENDIAN)
823 uint64_t *paL1TblImg = (uint64_t *)RTMemAllocZ(pImage->cbTable);
824 if (paL1TblImg)
825 {
826 qedTableConvertFromHostEndianess(paL1TblImg, pImage->paL1Table,
827 pImage->cTableEntries);
828 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pImage->pStorage,
829 pImage->offL1Table, paL1TblImg,
830 pImage->cbTable);
831 RTMemFree(paL1TblImg);
832 }
833 else
834 rc = VERR_NO_MEMORY;
835#else
836 /* Write L1 table directly. */
837 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pImage->pStorage, pImage->offL1Table,
838 pImage->paL1Table, pImage->cbTable);
839#endif
840 if (RT_SUCCESS(rc))
841 {
842 /* Write header. */
843 qedHdrConvertFromHostEndianess(pImage, &Header);
844 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pImage->pStorage, 0, &Header,
845 sizeof(Header));
846 if (RT_SUCCESS(rc))
847 rc = vdIfIoIntFileFlushSync(pImage->pIfIo, pImage->pStorage);
848 }
849 }
850
851 return rc;
852}
853
854/**
855 * Checks whether the given cluster offset is valid.
856 *
857 * @returns Whether the given cluster offset is valid.
858 * @param offCluster The table offset to check.
859 * @param cbFile The real file size of the image.
860 * @param cbCluster The cluster size in bytes.
861 */
862DECLINLINE(bool) qedIsClusterOffsetValid(uint64_t offCluster, uint64_t cbFile, size_t cbCluster)
863{
864 return (offCluster <= cbFile - cbCluster)
865 && !(offCluster & (cbCluster - 1));
866}
867
868/**
869 * Checks whether the given table offset is valid.
870 *
871 * @returns Whether the given table offset is valid.
872 * @param offTbl The table offset to check.
873 * @param cbFile The real file size of the image.
874 * @param cbTable The table size in bytes.
875 * @param cbCluster The cluster size in bytes.
876 */
877DECLINLINE(bool) qedIsTblOffsetValid(uint64_t offTbl, uint64_t cbFile, size_t cbTable, size_t cbCluster)
878{
879 return (offTbl <= cbFile - cbTable)
880 && !(offTbl & (cbCluster - 1));
881}
882
883/**
884 * Sets the specified range in the cluster bitmap checking whether any of the clusters is already
885 * used before.
886 *
887 * @returns Whether the range was clear and is set now.
888 * @param pvClusterBitmap The cluster bitmap to use.
889 * @param offClusterStart The first cluster to check and set.
890 * @param offClusterEnd The first cluster to not check and set anymore.
891 */
892static bool qedClusterBitmapCheckAndSet(void *pvClusterBitmap, uint32_t offClusterStart, uint32_t offClusterEnd)
893{
894 for (uint32_t offCluster = offClusterStart; offCluster < offClusterEnd; offCluster++)
895 if (ASMBitTest(pvClusterBitmap, offCluster))
896 return false;
897
898 ASMBitSetRange(pvClusterBitmap, offClusterStart, offClusterEnd);
899 return true;
900}
901
902/**
903 * Checks the given image for consistency, usually called when the
904 * QED_FEATURE_NEED_CHECK bit is set.
905 *
906 * @returns VBox status code.
907 * @retval VINF_SUCCESS when the image can be accessed.
908 * @param pImage The image instance data.
909 * @param pHeader The header to use for checking.
910 *
911 * @note It is not required that the image state is fully initialized Only
912 * The I/O interface and storage handle need to be valid.
913 * @note The header must be converted to the host CPU endian format already
914 * and should be validated already.
915 */
916static int qedCheckImage(PQEDIMAGE pImage, PQedHeader pHeader)
917{
918 uint64_t cbFile;
919 uint32_t cbTable;
920 uint32_t cTableEntries;
921 uint64_t *paL1Tbl = NULL;
922 uint64_t *paL2Tbl = NULL;
923 void *pvClusterBitmap = NULL;
924 uint32_t offClusterStart;
925 int rc = VINF_SUCCESS;
926
927 pImage->cbCluster = pHeader->u32ClusterSize;
928 cbTable = pHeader->u32TableSize * pHeader->u32ClusterSize;
929 cTableEntries = cbTable / sizeof(uint64_t);
930
931 do
932 {
933 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pStorage, &cbFile);
934 if (RT_FAILURE(rc))
935 {
936 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
937 N_("Qed: Querying the file size of image '%s' failed"),
938 pImage->pszFilename);
939 break;
940 }
941
942 /* Allocate L1 table. */
943 paL1Tbl = (uint64_t *)RTMemAllocZ(cbTable);
944 if (!paL1Tbl)
945 {
946 rc = vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
947 N_("Qed: Allocating memory for the L1 table for image '%s' failed"),
948 pImage->pszFilename);
949 break;
950 }
951
952 paL2Tbl = (uint64_t *)RTMemAllocZ(cbTable);
953 if (!paL2Tbl)
954 {
955 rc = vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
956 N_("Qed: Allocating memory for the L2 table for image '%s' failed"),
957 pImage->pszFilename);
958 break;
959 }
960
961 pvClusterBitmap = RTMemAllocZ(cbFile / pHeader->u32ClusterSize / 8);
962 if (!pvClusterBitmap)
963 {
964 rc = vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
965 N_("Qed: Allocating memory for the cluster bitmap for image '%s' failed"),
966 pImage->pszFilename);
967 break;
968 }
969
970 /* Validate L1 table offset. */
971 if (!qedIsTblOffsetValid(pHeader->u64OffL1Table, cbFile, cbTable, pHeader->u32ClusterSize))
972 {
973 rc = vdIfError(pImage->pIfError, VERR_VD_GEN_INVALID_HEADER, RT_SRC_POS,
974 N_("Qed: L1 table offset of image '%s' is corrupt (%llu)"),
975 pImage->pszFilename, pHeader->u64OffL1Table);
976 break;
977 }
978
979 /* Read L1 table. */
980 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pImage->pStorage,
981 pHeader->u64OffL1Table, paL1Tbl, cbTable);
982 if (RT_FAILURE(rc))
983 {
984 rc = vdIfError(pImage->pIfError, VERR_VD_GEN_INVALID_HEADER, RT_SRC_POS,
985 N_("Qed: Reading the L1 table from image '%s' failed"),
986 pImage->pszFilename);
987 break;
988 }
989
990 /* Mark the L1 table in cluster bitmap. */
991 ASMBitSet(pvClusterBitmap, 0); /* Header is always in cluster 0. */
992 offClusterStart = qedByte2Cluster(pImage, pHeader->u64OffL1Table);
993 bool fSet = qedClusterBitmapCheckAndSet(pvClusterBitmap, offClusterStart, offClusterStart + pHeader->u32TableSize);
994 Assert(fSet);
995
996 /* Scan the L1 and L2 tables for invalid entries. */
997 qedTableConvertToHostEndianess(paL1Tbl, cTableEntries);
998
999 for (unsigned iL1 = 0; iL1 < cTableEntries; iL1++)
1000 {
1001 if (!paL1Tbl[iL1])
1002 continue; /* Skip unallocated clusters. */
1003
1004 if (!qedIsTblOffsetValid(paL1Tbl[iL1], cbFile, cbTable, pHeader->u32ClusterSize))
1005 {
1006 rc = vdIfError(pImage->pIfError, VERR_VD_GEN_INVALID_HEADER, RT_SRC_POS,
1007 N_("Qed: Entry %d of the L1 table from image '%s' is invalid (%llu)"),
1008 iL1, pImage->pszFilename, paL1Tbl[iL1]);
1009 break;
1010 }
1011
1012 /* Now check that the clusters are not allocated already. */
1013 offClusterStart = qedByte2Cluster(pImage, paL1Tbl[iL1]);
1014 fSet = qedClusterBitmapCheckAndSet(pvClusterBitmap, offClusterStart, offClusterStart + pHeader->u32TableSize);
1015 if (!fSet)
1016 {
1017 rc = vdIfError(pImage->pIfError, VERR_VD_GEN_INVALID_HEADER, RT_SRC_POS,
1018 N_("Qed: Entry %d of the L1 table from image '%s' points to a already used cluster (%llu)"),
1019 iL1, pImage->pszFilename, paL1Tbl[iL1]);
1020 break;
1021 }
1022
1023 /* Read the linked L2 table and check it. */
1024 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pImage->pStorage,
1025 paL1Tbl[iL1], paL2Tbl, cbTable);
1026 if (RT_FAILURE(rc))
1027 {
1028 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1029 N_("Qed: Reading the L2 table from image '%s' failed"),
1030 pImage->pszFilename);
1031 break;
1032 }
1033
1034 /* Check all L2 entries. */
1035 for (unsigned iL2 = 0; iL2 < cTableEntries; iL2++)
1036 {
1037 if (paL2Tbl[iL2])
1038 continue; /* Skip unallocated clusters. */
1039
1040 if (!qedIsClusterOffsetValid(paL2Tbl[iL2], cbFile, pHeader->u32ClusterSize))
1041 {
1042 rc = vdIfError(pImage->pIfError, VERR_VD_GEN_INVALID_HEADER, RT_SRC_POS,
1043 N_("Qed: Entry %d of the L2 table from image '%s' is invalid (%llu)"),
1044 iL2, pImage->pszFilename, paL2Tbl[iL2]);
1045 break;
1046 }
1047
1048 /* Now check that the clusters are not allocated already. */
1049 offClusterStart = qedByte2Cluster(pImage, paL2Tbl[iL2]);
1050 fSet = qedClusterBitmapCheckAndSet(pvClusterBitmap, offClusterStart, offClusterStart + 1);
1051 if (!fSet)
1052 {
1053 rc = vdIfError(pImage->pIfError, VERR_VD_GEN_INVALID_HEADER, RT_SRC_POS,
1054 N_("Qed: Entry %d of the L2 table from image '%s' points to a already used cluster (%llu)"),
1055 iL2, pImage->pszFilename, paL2Tbl[iL2]);
1056 break;
1057 }
1058 }
1059 }
1060 } while(0);
1061
1062 if (paL1Tbl)
1063 RTMemFree(paL1Tbl);
1064 if (paL2Tbl)
1065 RTMemFree(paL2Tbl);
1066 if (pvClusterBitmap)
1067 RTMemFree(pvClusterBitmap);
1068
1069 return rc;
1070}
1071
1072/**
1073 * Internal. Free all allocated space for representing an image except pImage,
1074 * and optionally delete the image from disk.
1075 */
1076static int qedFreeImage(PQEDIMAGE pImage, bool fDelete)
1077{
1078 int rc = VINF_SUCCESS;
1079
1080 /* Freeing a never allocated image (e.g. because the open failed) is
1081 * not signalled as an error. After all nothing bad happens. */
1082 if (pImage)
1083 {
1084 if (pImage->pStorage)
1085 {
1086 /* No point updating the file that is deleted anyway. */
1087 if (!fDelete)
1088 qedFlushImage(pImage);
1089
1090 rc = vdIfIoIntFileClose(pImage->pIfIo, pImage->pStorage);
1091 pImage->pStorage = NULL;
1092 }
1093
1094 if (pImage->paL1Table)
1095 RTMemFree(pImage->paL1Table);
1096
1097 if (pImage->pszBackingFilename)
1098 {
1099 RTMemFree(pImage->pszBackingFilename);
1100 pImage->pszBackingFilename = NULL;
1101 }
1102
1103 qedL2TblCacheDestroy(pImage);
1104
1105 if (fDelete && pImage->pszFilename)
1106 vdIfIoIntFileDelete(pImage->pIfIo, pImage->pszFilename);
1107 }
1108
1109 LogFlowFunc(("returns %Rrc\n", rc));
1110 return rc;
1111}
1112
1113/**
1114 * Internal: Open an image, constructing all necessary data structures.
1115 */
1116static int qedOpenImage(PQEDIMAGE pImage, unsigned uOpenFlags)
1117{
1118 pImage->uOpenFlags = uOpenFlags;
1119
1120 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
1121 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
1122 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
1123
1124 /*
1125 * Create the L2 cache before opening the image so we can call qedFreeImage()
1126 * even if opening the image file fails.
1127 */
1128 int rc = qedL2TblCacheCreate(pImage);
1129 if (RT_SUCCESS(rc))
1130 {
1131 /* Open the image. */
1132 rc = vdIfIoIntFileOpen(pImage->pIfIo, pImage->pszFilename,
1133 VDOpenFlagsToFileOpenFlags(uOpenFlags,
1134 false /* fCreate */),
1135 &pImage->pStorage);
1136 if (RT_SUCCESS(rc))
1137 {
1138 uint64_t cbFile;
1139 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pStorage, &cbFile);
1140 if ( RT_SUCCESS(rc)
1141 && cbFile > sizeof(QedHeader))
1142 {
1143 QedHeader Header;
1144
1145 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pImage->pStorage, 0, &Header, sizeof(Header));
1146 if ( RT_SUCCESS(rc)
1147 && qedHdrConvertToHostEndianess(&Header))
1148 {
1149 if ( !(Header.u64FeatureFlags & ~QED_FEATURE_MASK)
1150 && !(Header.u64FeatureFlags & QED_FEATURE_BACKING_FILE_NO_PROBE))
1151 {
1152 if (Header.u64FeatureFlags & QED_FEATURE_NEED_CHECK)
1153 {
1154 /* Image needs checking. */
1155 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
1156 rc = qedCheckImage(pImage, &Header);
1157 else
1158 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
1159 N_("Qed: Image '%s' needs checking but is opened readonly"),
1160 pImage->pszFilename);
1161 }
1162
1163 if ( RT_SUCCESS(rc)
1164 && (Header.u64FeatureFlags & QED_FEATURE_BACKING_FILE))
1165 {
1166 /* Load backing filename from image. */
1167 pImage->pszBackingFilename = (char *)RTMemAllocZ(Header.u32BackingFilenameSize + 1); /* +1 for \0 terminator. */
1168 if (pImage->pszBackingFilename)
1169 {
1170 pImage->cbBackingFilename = Header.u32BackingFilenameSize;
1171 pImage->offBackingFilename = Header.u32OffBackingFilename;
1172 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pImage->pStorage,
1173 Header.u32OffBackingFilename, pImage->pszBackingFilename,
1174 Header.u32BackingFilenameSize);
1175 }
1176 else
1177 rc = VERR_NO_MEMORY;
1178 }
1179
1180 if (RT_SUCCESS(rc))
1181 {
1182 pImage->cbImage = cbFile;
1183 pImage->cbCluster = Header.u32ClusterSize;
1184 pImage->cbTable = Header.u32TableSize * pImage->cbCluster;
1185 pImage->cTableEntries = pImage->cbTable / sizeof(uint64_t);
1186 pImage->offL1Table = Header.u64OffL1Table;
1187 pImage->cbSize = Header.u64Size;
1188 qedTableMasksInit(pImage);
1189
1190 /* Allocate L1 table. */
1191 pImage->paL1Table = (uint64_t *)RTMemAllocZ(pImage->cbTable);
1192 if (pImage->paL1Table)
1193 {
1194 /* Read from the image. */
1195 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pImage->pStorage,
1196 pImage->offL1Table, pImage->paL1Table,
1197 pImage->cbTable);
1198 if (RT_SUCCESS(rc))
1199 {
1200 qedTableConvertToHostEndianess(pImage->paL1Table, pImage->cTableEntries);
1201
1202 /* If the consistency check succeeded, clear the flag by flushing the image. */
1203 if (Header.u64FeatureFlags & QED_FEATURE_NEED_CHECK)
1204 rc = qedFlushImage(pImage);
1205 }
1206 else
1207 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1208 N_("Qed: Reading the L1 table for image '%s' failed"),
1209 pImage->pszFilename);
1210 }
1211 else
1212 rc = vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
1213 N_("Qed: Out of memory allocating L1 table for image '%s'"),
1214 pImage->pszFilename);
1215 }
1216 }
1217 else
1218 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
1219 N_("Qed: The image '%s' makes use of unsupported features"),
1220 pImage->pszFilename);
1221 }
1222 else if (RT_SUCCESS(rc))
1223 rc = VERR_VD_GEN_INVALID_HEADER;
1224 }
1225 else if (RT_SUCCESS(rc))
1226 rc = VERR_VD_GEN_INVALID_HEADER;
1227 }
1228 /* else: Do NOT signal an appropriate error here, as the VD layer has the
1229 * choice of retrying the open if it failed. */
1230 }
1231 else
1232 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1233 N_("Qed: Creating the L2 table cache for image '%s' failed"),
1234 pImage->pszFilename);
1235
1236 if (RT_FAILURE(rc))
1237 qedFreeImage(pImage, false);
1238 return rc;
1239}
1240
1241/**
1242 * Internal: Create a qed image.
1243 */
1244static int qedCreateImage(PQEDIMAGE pImage, uint64_t cbSize,
1245 unsigned uImageFlags, const char *pszComment,
1246 PCVDGEOMETRY pPCHSGeometry,
1247 PCVDGEOMETRY pLCHSGeometry, unsigned uOpenFlags,
1248 PVDINTERFACEPROGRESS pIfProgress,
1249 unsigned uPercentStart, unsigned uPercentSpan)
1250{
1251 RT_NOREF1(pszComment);
1252 int rc;
1253
1254 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
1255 {
1256 rc = qedL2TblCacheCreate(pImage);
1257 if (RT_SUCCESS(rc))
1258 {
1259 pImage->uOpenFlags = uOpenFlags & ~VD_OPEN_FLAGS_READONLY;
1260 pImage->uImageFlags = uImageFlags;
1261 pImage->PCHSGeometry = *pPCHSGeometry;
1262 pImage->LCHSGeometry = *pLCHSGeometry;
1263
1264 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
1265 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
1266 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
1267
1268 /* Create image file. */
1269 uint32_t fOpen = VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags, true /* fCreate */);
1270 rc = vdIfIoIntFileOpen(pImage->pIfIo, pImage->pszFilename, fOpen, &pImage->pStorage);
1271 if (RT_SUCCESS(rc))
1272 {
1273 /* Init image state. */
1274 pImage->cbSize = cbSize;
1275 pImage->cbCluster = QED_CLUSTER_SIZE_DEFAULT;
1276 pImage->cbTable = qedCluster2Byte(pImage, QED_TABLE_SIZE_DEFAULT);
1277 pImage->cTableEntries = pImage->cbTable / sizeof(uint64_t);
1278 pImage->offL1Table = qedCluster2Byte(pImage, 1); /* Cluster 0 is the header. */
1279 pImage->cbImage = (1 * pImage->cbCluster) + pImage->cbTable; /* Header + L1 table size. */
1280 pImage->cbBackingFilename = 0;
1281 pImage->offBackingFilename = 0;
1282 qedTableMasksInit(pImage);
1283
1284 /* Init L1 table. */
1285 pImage->paL1Table = (uint64_t *)RTMemAllocZ(pImage->cbTable);
1286 if (RT_LIKELY(pImage->paL1Table))
1287 {
1288 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
1289 rc = qedFlushImage(pImage);
1290 }
1291 else
1292 rc = vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("Qed: cannot allocate memory for L1 table of image '%s'"),
1293 pImage->pszFilename);
1294 }
1295 else
1296 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("Qed: cannot create image '%s'"), pImage->pszFilename);
1297 }
1298 else
1299 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("Qed: Failed to create L2 cache for image '%s'"),
1300 pImage->pszFilename);
1301 }
1302 else
1303 rc = vdIfError(pImage->pIfError, VERR_VD_INVALID_TYPE, RT_SRC_POS, N_("Qed: cannot create fixed image '%s'"), pImage->pszFilename);
1304
1305 if (RT_SUCCESS(rc))
1306 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
1307 else
1308 qedFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
1309
1310 return rc;
1311}
1312
1313/**
1314 * Rollback anything done during async cluster allocation.
1315 *
1316 * @returns VBox status code.
1317 * @param pImage The image instance data.
1318 * @param pIoCtx The I/O context.
1319 * @param pClusterAlloc The cluster allocation to rollback.
1320 */
1321static int qedAsyncClusterAllocRollback(PQEDIMAGE pImage, PVDIOCTX pIoCtx, PQEDCLUSTERASYNCALLOC pClusterAlloc)
1322{
1323 RT_NOREF1(pIoCtx);
1324 int rc = VINF_SUCCESS;
1325
1326 switch (pClusterAlloc->enmAllocState)
1327 {
1328 case QEDCLUSTERASYNCALLOCSTATE_L2_ALLOC:
1329 case QEDCLUSTERASYNCALLOCSTATE_L2_LINK:
1330 {
1331 /* Revert the L1 table entry */
1332 pImage->paL1Table[pClusterAlloc->idxL1] = 0;
1333
1334 /* Assumption right now is that the L1 table is not modified on storage if the link fails. */
1335 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pImage->pStorage, pClusterAlloc->cbImageOld);
1336 qedL2TblCacheEntryRelease(pClusterAlloc->pL2Entry); /* Release L2 cache entry. */
1337 qedL2TblCacheEntryFree(pImage, pClusterAlloc->pL2Entry); /* Free it, it is not in the cache yet. */
1338 break;
1339 }
1340 case QEDCLUSTERASYNCALLOCSTATE_USER_ALLOC:
1341 case QEDCLUSTERASYNCALLOCSTATE_USER_LINK:
1342 {
1343 /* Assumption right now is that the L2 table is not modified if the link fails. */
1344 pClusterAlloc->pL2Entry->paL2Tbl[pClusterAlloc->idxL2] = 0;
1345 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pImage->pStorage, pClusterAlloc->cbImageOld);
1346 qedL2TblCacheEntryRelease(pClusterAlloc->pL2Entry); /* Release L2 cache entry. */
1347 break;
1348 }
1349 default:
1350 AssertMsgFailed(("Invalid cluster allocation state %d\n", pClusterAlloc->enmAllocState));
1351 rc = VERR_INVALID_STATE;
1352 }
1353
1354 RTMemFree(pClusterAlloc);
1355 return rc;
1356}
1357
1358/**
1359 * Updates the state of the async cluster allocation.
1360 *
1361 * @returns VBox status code.
1362 * @param pBackendData The opaque backend data.
1363 * @param pIoCtx I/O context associated with this request.
1364 * @param pvUser Opaque user data passed during a read/write request.
1365 * @param rcReq Status code for the completed request.
1366 */
1367static DECLCALLBACK(int) qedAsyncClusterAllocUpdate(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
1368{
1369 int rc = VINF_SUCCESS;
1370 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1371 PQEDCLUSTERASYNCALLOC pClusterAlloc = (PQEDCLUSTERASYNCALLOC)pvUser;
1372
1373 if (RT_FAILURE(rcReq))
1374 return qedAsyncClusterAllocRollback(pImage, pIoCtx, pClusterAlloc);
1375
1376 AssertPtr(pClusterAlloc->pL2Entry);
1377
1378 switch (pClusterAlloc->enmAllocState)
1379 {
1380 case QEDCLUSTERASYNCALLOCSTATE_L2_ALLOC:
1381 {
1382 /* Update the link in the in memory L1 table now. */
1383 pImage->paL1Table[pClusterAlloc->idxL1] = pClusterAlloc->pL2Entry->offL2Tbl;
1384
1385 /* Update the link in the on disk L1 table now. */
1386 pClusterAlloc->enmAllocState = QEDCLUSTERASYNCALLOCSTATE_L2_LINK;
1387 rc = qedTblWrite(pImage, pIoCtx, pImage->offL1Table, pImage->paL1Table,
1388 qedAsyncClusterAllocUpdate, pClusterAlloc);
1389 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1390 break;
1391 else if (RT_FAILURE(rc))
1392 {
1393 /* Rollback. */
1394 qedAsyncClusterAllocRollback(pImage, pIoCtx, pClusterAlloc);
1395 break;
1396 }
1397 /* Success, fall through. */
1398 }
1399 case QEDCLUSTERASYNCALLOCSTATE_L2_LINK:
1400 {
1401 /* L2 link updated in L1 , save L2 entry in cache and allocate new user data cluster. */
1402 uint64_t offData = qedClusterAllocate(pImage, 1);
1403
1404 qedL2TblCacheEntryInsert(pImage, pClusterAlloc->pL2Entry);
1405
1406 pClusterAlloc->enmAllocState = QEDCLUSTERASYNCALLOCSTATE_USER_ALLOC;
1407 pClusterAlloc->cbImageOld = offData;
1408 pClusterAlloc->offClusterNew = offData;
1409
1410 /* Write data. */
1411 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pImage->pStorage,
1412 offData, pIoCtx, pClusterAlloc->cbToWrite,
1413 qedAsyncClusterAllocUpdate, pClusterAlloc);
1414 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1415 break;
1416 else if (RT_FAILURE(rc))
1417 {
1418 qedAsyncClusterAllocRollback(pImage, pIoCtx, pClusterAlloc);
1419 RTMemFree(pClusterAlloc);
1420 break;
1421 }
1422 }
1423 case QEDCLUSTERASYNCALLOCSTATE_USER_ALLOC:
1424 {
1425 pClusterAlloc->enmAllocState = QEDCLUSTERASYNCALLOCSTATE_USER_LINK;
1426 pClusterAlloc->pL2Entry->paL2Tbl[pClusterAlloc->idxL2] = pClusterAlloc->offClusterNew;
1427
1428 /* Link L2 table and update it. */
1429 rc = qedTblWrite(pImage, pIoCtx, pImage->paL1Table[pClusterAlloc->idxL1],
1430 pClusterAlloc->pL2Entry->paL2Tbl,
1431 qedAsyncClusterAllocUpdate, pClusterAlloc);
1432 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1433 break;
1434 else if (RT_FAILURE(rc))
1435 {
1436 qedAsyncClusterAllocRollback(pImage, pIoCtx, pClusterAlloc);
1437 RTMemFree(pClusterAlloc);
1438 break;
1439 }
1440 }
1441 case QEDCLUSTERASYNCALLOCSTATE_USER_LINK:
1442 {
1443 /* Everything done without errors, signal completion. */
1444 qedL2TblCacheEntryRelease(pClusterAlloc->pL2Entry);
1445 RTMemFree(pClusterAlloc);
1446 rc = VINF_SUCCESS;
1447 break;
1448 }
1449 default:
1450 AssertMsgFailed(("Invalid async cluster allocation state %d\n",
1451 pClusterAlloc->enmAllocState));
1452 }
1453
1454 return rc;
1455}
1456
1457/** @copydoc VDIMAGEBACKEND::pfnProbe */
1458static DECLCALLBACK(int) qedProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
1459 PVDINTERFACE pVDIfsImage, VDTYPE *penmType)
1460{
1461 RT_NOREF1(pVDIfsDisk);
1462 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p\n", pszFilename, pVDIfsDisk, pVDIfsImage));
1463 PVDIOSTORAGE pStorage = NULL;
1464 int rc = VINF_SUCCESS;
1465
1466 /* Get I/O interface. */
1467 PVDINTERFACEIOINT pIfIo = VDIfIoIntGet(pVDIfsImage);
1468 AssertPtrReturn(pIfIo, VERR_INVALID_PARAMETER);
1469 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
1470
1471 /*
1472 * Open the file and read the footer.
1473 */
1474 rc = vdIfIoIntFileOpen(pIfIo, pszFilename,
1475 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_READONLY,
1476 false /* fCreate */),
1477 &pStorage);
1478 if (RT_SUCCESS(rc))
1479 {
1480 uint64_t cbFile;
1481
1482 rc = vdIfIoIntFileGetSize(pIfIo, pStorage, &cbFile);
1483 if ( RT_SUCCESS(rc)
1484 && cbFile > sizeof(QedHeader))
1485 {
1486 QedHeader Header;
1487
1488 rc = vdIfIoIntFileReadSync(pIfIo, pStorage, 0, &Header, sizeof(Header));
1489 if ( RT_SUCCESS(rc)
1490 && qedHdrConvertToHostEndianess(&Header))
1491 *penmType = VDTYPE_HDD;
1492 else
1493 rc = VERR_VD_GEN_INVALID_HEADER;
1494 }
1495 else
1496 rc = VERR_VD_GEN_INVALID_HEADER;
1497 }
1498
1499 if (pStorage)
1500 vdIfIoIntFileClose(pIfIo, pStorage);
1501
1502 LogFlowFunc(("returns %Rrc\n", rc));
1503 return rc;
1504}
1505
1506/** @copydoc VDIMAGEBACKEND::pfnOpen */
1507static DECLCALLBACK(int) qedOpen(const char *pszFilename, unsigned uOpenFlags,
1508 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
1509 VDTYPE enmType, void **ppBackendData)
1510{
1511 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
1512
1513 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
1514 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
1515 int rc;
1516
1517 /* Check open flags. All valid flags are supported. */
1518 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
1519 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
1520
1521 PQEDIMAGE pImage = (PQEDIMAGE)RTMemAllocZ(sizeof(QEDIMAGE));
1522 if (RT_LIKELY(pImage))
1523 {
1524 pImage->pszFilename = pszFilename;
1525 pImage->pStorage = NULL;
1526 pImage->pVDIfsDisk = pVDIfsDisk;
1527 pImage->pVDIfsImage = pVDIfsImage;
1528
1529 rc = qedOpenImage(pImage, uOpenFlags);
1530 if (RT_SUCCESS(rc))
1531 *ppBackendData = pImage;
1532 else
1533 RTMemFree(pImage);
1534 }
1535 else
1536 rc = VERR_NO_MEMORY;
1537
1538 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
1539 return rc;
1540}
1541
1542/** @copydoc VDIMAGEBACKEND::pfnCreate */
1543static DECLCALLBACK(int) qedCreate(const char *pszFilename, uint64_t cbSize,
1544 unsigned uImageFlags, const char *pszComment,
1545 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
1546 PCRTUUID pUuid, unsigned uOpenFlags,
1547 unsigned uPercentStart, unsigned uPercentSpan,
1548 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
1549 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
1550 void **ppBackendData)
1551{
1552 RT_NOREF1(pUuid);
1553 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%d ppBackendData=%#p",
1554 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
1555 int rc;
1556
1557 /* Check the VD container type. */
1558 if (enmType != VDTYPE_HDD)
1559 return VERR_VD_INVALID_TYPE;
1560
1561 /* Check open flags. All valid flags are supported. */
1562 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
1563 AssertReturn( VALID_PTR(pszFilename)
1564 && *pszFilename
1565 && VALID_PTR(pPCHSGeometry)
1566 && VALID_PTR(pLCHSGeometry), VERR_INVALID_PARAMETER);
1567
1568 PQEDIMAGE pImage = (PQEDIMAGE)RTMemAllocZ(sizeof(QEDIMAGE));
1569 if (RT_LIKELY(pImage))
1570 {
1571 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
1572
1573 pImage->pszFilename = pszFilename;
1574 pImage->pStorage = NULL;
1575 pImage->pVDIfsDisk = pVDIfsDisk;
1576 pImage->pVDIfsImage = pVDIfsImage;
1577
1578 rc = qedCreateImage(pImage, cbSize, uImageFlags, pszComment,
1579 pPCHSGeometry, pLCHSGeometry, uOpenFlags,
1580 pIfProgress, uPercentStart, uPercentSpan);
1581 if (RT_SUCCESS(rc))
1582 {
1583 /* So far the image is opened in read/write mode. Make sure the
1584 * image is opened in read-only mode if the caller requested that. */
1585 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
1586 {
1587 qedFreeImage(pImage, false);
1588 rc = qedOpenImage(pImage, uOpenFlags);
1589 }
1590
1591 if (RT_SUCCESS(rc))
1592 *ppBackendData = pImage;
1593 }
1594
1595 if (RT_FAILURE(rc))
1596 RTMemFree(pImage);
1597 }
1598 else
1599 rc = VERR_NO_MEMORY;
1600
1601 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
1602 return rc;
1603}
1604
1605/** @copydoc VDIMAGEBACKEND::pfnRename */
1606static DECLCALLBACK(int) qedRename(void *pBackendData, const char *pszFilename)
1607{
1608 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
1609 int rc = VINF_SUCCESS;
1610 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1611
1612 /* Check arguments. */
1613 AssertReturn((pImage && pszFilename && *pszFilename), VERR_INVALID_PARAMETER);
1614
1615 /* Close the image. */
1616 rc = qedFreeImage(pImage, false);
1617 if (RT_SUCCESS(rc))
1618 {
1619 /* Rename the file. */
1620 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pszFilename, 0);
1621 if (RT_SUCCESS(rc))
1622 {
1623 /* Update pImage with the new information. */
1624 pImage->pszFilename = pszFilename;
1625
1626 /* Open the old image with new name. */
1627 rc = qedOpenImage(pImage, pImage->uOpenFlags);
1628 }
1629 else
1630 {
1631 /* The move failed, try to reopen the original image. */
1632 int rc2 = qedOpenImage(pImage, pImage->uOpenFlags);
1633 if (RT_FAILURE(rc2))
1634 rc = rc2;
1635 }
1636 }
1637
1638 LogFlowFunc(("returns %Rrc\n", rc));
1639 return rc;
1640}
1641
1642/** @copydoc VDIMAGEBACKEND::pfnClose */
1643static DECLCALLBACK(int) qedClose(void *pBackendData, bool fDelete)
1644{
1645 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
1646 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1647
1648 int rc = qedFreeImage(pImage, fDelete);
1649 RTMemFree(pImage);
1650
1651 LogFlowFunc(("returns %Rrc\n", rc));
1652 return rc;
1653}
1654
1655/** @copydoc VDIMAGEBACKEND::pfnWrite */
1656static DECLCALLBACK(int) qedRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
1657 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
1658{
1659 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
1660 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
1661 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1662 uint32_t offCluster = 0;
1663 uint32_t idxL1 = 0;
1664 uint32_t idxL2 = 0;
1665 uint64_t offFile = 0;
1666
1667 AssertPtr(pImage);
1668 Assert(uOffset % 512 == 0);
1669 Assert(cbToRead % 512 == 0);
1670 AssertReturn((VALID_PTR(pIoCtx) && cbToRead), VERR_INVALID_PARAMETER);
1671 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
1672
1673 qedConvertLogicalOffset(pImage, uOffset, &idxL1, &idxL2, &offCluster);
1674
1675 /* Clip read size to remain in the cluster. */
1676 cbToRead = RT_MIN(cbToRead, pImage->cbCluster - offCluster);
1677
1678 /* Get offset in image. */
1679 int rc = qedConvertToImageOffset(pImage, pIoCtx, idxL1, idxL2, offCluster, &offFile);
1680 if (RT_SUCCESS(rc))
1681 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pImage->pStorage, offFile,
1682 pIoCtx, cbToRead);
1683
1684 if ( ( RT_SUCCESS(rc)
1685 || rc == VERR_VD_BLOCK_FREE
1686 || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1687 && pcbActuallyRead)
1688 *pcbActuallyRead = cbToRead;
1689
1690 LogFlowFunc(("returns %Rrc\n", rc));
1691 return rc;
1692}
1693
1694/** @copydoc VDIMAGEBACKEND::pfnRead */
1695static DECLCALLBACK(int) qedWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
1696 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
1697 size_t *pcbPostRead, unsigned fWrite)
1698{
1699 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
1700 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
1701 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1702 uint32_t offCluster = 0;
1703 uint32_t idxL1 = 0;
1704 uint32_t idxL2 = 0;
1705 uint64_t offImage = 0;
1706 int rc = VINF_SUCCESS;
1707
1708 AssertPtr(pImage);
1709 Assert(!(uOffset % 512));
1710 Assert(!(cbToWrite % 512));
1711 AssertReturn((VALID_PTR(pIoCtx) && cbToWrite), VERR_INVALID_PARAMETER);
1712 AssertReturn(uOffset + cbToWrite <= pImage->cbSize, VERR_INVALID_PARAMETER);
1713
1714 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
1715 {
1716 /* Convert offset to L1, L2 index and cluster offset. */
1717 qedConvertLogicalOffset(pImage, uOffset, &idxL1, &idxL2, &offCluster);
1718
1719 /* Clip write size to remain in the cluster. */
1720 cbToWrite = RT_MIN(cbToWrite, pImage->cbCluster - offCluster);
1721 Assert(!(cbToWrite % 512));
1722
1723 /* Get offset in image. */
1724 rc = qedConvertToImageOffset(pImage, pIoCtx, idxL1, idxL2, offCluster, &offImage);
1725 if (RT_SUCCESS(rc))
1726 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pImage->pStorage,
1727 offImage, pIoCtx, cbToWrite, NULL, NULL);
1728 else if (rc == VERR_VD_BLOCK_FREE)
1729 {
1730 if ( cbToWrite == pImage->cbCluster
1731 && !(fWrite & VD_WRITE_NO_ALLOC))
1732 {
1733 PQEDL2CACHEENTRY pL2Entry = NULL;
1734
1735 /* Full cluster write to previously unallocated cluster.
1736 * Allocate cluster and write data. */
1737 Assert(!offCluster);
1738
1739 do
1740 {
1741 /* Check if we have to allocate a new cluster for L2 tables. */
1742 if (!pImage->paL1Table[idxL1])
1743 {
1744 uint64_t offL2Tbl;
1745 PQEDCLUSTERASYNCALLOC pL2ClusterAlloc = NULL;
1746
1747 /* Allocate new async cluster allocation state. */
1748 pL2ClusterAlloc = (PQEDCLUSTERASYNCALLOC)RTMemAllocZ(sizeof(QEDCLUSTERASYNCALLOC));
1749 if (RT_UNLIKELY(!pL2ClusterAlloc))
1750 {
1751 rc = VERR_NO_MEMORY;
1752 break;
1753 }
1754
1755 pL2Entry = qedL2TblCacheEntryAlloc(pImage);
1756 if (!pL2Entry)
1757 {
1758 rc = VERR_NO_MEMORY;
1759 RTMemFree(pL2ClusterAlloc);
1760 break;
1761 }
1762
1763 offL2Tbl = qedClusterAllocate(pImage, qedByte2Cluster(pImage, pImage->cbTable));
1764 pL2Entry->offL2Tbl = offL2Tbl;
1765 memset(pL2Entry->paL2Tbl, 0, pImage->cbTable);
1766
1767 pL2ClusterAlloc->enmAllocState = QEDCLUSTERASYNCALLOCSTATE_L2_ALLOC;
1768 pL2ClusterAlloc->cbImageOld = offL2Tbl;
1769 pL2ClusterAlloc->offClusterNew = offL2Tbl;
1770 pL2ClusterAlloc->idxL1 = idxL1;
1771 pL2ClusterAlloc->idxL2 = idxL2;
1772 pL2ClusterAlloc->cbToWrite = cbToWrite;
1773 pL2ClusterAlloc->pL2Entry = pL2Entry;
1774
1775 /*
1776 * Write the L2 table first and link to the L1 table afterwards.
1777 * If something unexpected happens the worst case which can happen
1778 * is a leak of some clusters.
1779 */
1780 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pImage->pStorage,
1781 offL2Tbl, pL2Entry->paL2Tbl, pImage->cbTable, pIoCtx,
1782 qedAsyncClusterAllocUpdate, pL2ClusterAlloc);
1783 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1784 break;
1785 else if (RT_FAILURE(rc))
1786 {
1787 RTMemFree(pL2ClusterAlloc);
1788 qedL2TblCacheEntryFree(pImage, pL2Entry);
1789 break;
1790 }
1791
1792 rc = qedAsyncClusterAllocUpdate(pImage, pIoCtx, pL2ClusterAlloc, rc);
1793 }
1794 else
1795 {
1796 rc = qedL2TblCacheFetchAsync(pImage, pIoCtx, pImage->paL1Table[idxL1],
1797 &pL2Entry);
1798
1799 if (RT_SUCCESS(rc))
1800 {
1801 PQEDCLUSTERASYNCALLOC pDataClusterAlloc = NULL;
1802
1803 /* Allocate new async cluster allocation state. */
1804 pDataClusterAlloc = (PQEDCLUSTERASYNCALLOC)RTMemAllocZ(sizeof(QEDCLUSTERASYNCALLOC));
1805 if (RT_UNLIKELY(!pDataClusterAlloc))
1806 {
1807 rc = VERR_NO_MEMORY;
1808 break;
1809 }
1810
1811 /* Allocate new cluster for the data. */
1812 uint64_t offData = qedClusterAllocate(pImage, 1);
1813
1814 pDataClusterAlloc->enmAllocState = QEDCLUSTERASYNCALLOCSTATE_USER_ALLOC;
1815 pDataClusterAlloc->cbImageOld = offData;
1816 pDataClusterAlloc->offClusterNew = offData;
1817 pDataClusterAlloc->idxL1 = idxL1;
1818 pDataClusterAlloc->idxL2 = idxL2;
1819 pDataClusterAlloc->cbToWrite = cbToWrite;
1820 pDataClusterAlloc->pL2Entry = pL2Entry;
1821
1822 /* Write data. */
1823 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pImage->pStorage,
1824 offData, pIoCtx, cbToWrite,
1825 qedAsyncClusterAllocUpdate, pDataClusterAlloc);
1826 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1827 break;
1828 else if (RT_FAILURE(rc))
1829 {
1830 RTMemFree(pDataClusterAlloc);
1831 break;
1832 }
1833
1834 rc = qedAsyncClusterAllocUpdate(pImage, pIoCtx, pDataClusterAlloc, rc);
1835 }
1836 }
1837
1838 } while (0);
1839
1840 *pcbPreRead = 0;
1841 *pcbPostRead = 0;
1842 }
1843 else
1844 {
1845 /* Trying to do a partial write to an unallocated cluster. Don't do
1846 * anything except letting the upper layer know what to do. */
1847 *pcbPreRead = offCluster;
1848 *pcbPostRead = pImage->cbCluster - cbToWrite - *pcbPreRead;
1849 }
1850 }
1851
1852 if (pcbWriteProcess)
1853 *pcbWriteProcess = cbToWrite;
1854 }
1855 else
1856 rc = VERR_VD_IMAGE_READ_ONLY;
1857
1858 LogFlowFunc(("returns %Rrc\n", rc));
1859 return rc;
1860}
1861
1862/** @copydoc VDIMAGEBACKEND::pfnFlush */
1863static DECLCALLBACK(int) qedFlush(void *pBackendData, PVDIOCTX pIoCtx)
1864{
1865 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1866 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1867 int rc = VINF_SUCCESS;
1868
1869 AssertPtr(pImage);
1870 AssertPtrReturn(pIoCtx, VERR_INVALID_PARAMETER);
1871
1872 if ( pImage->pStorage
1873 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
1874 {
1875 QedHeader Header;
1876
1877 Assert(!(pImage->cbTable % pImage->cbCluster));
1878 rc = qedTblWrite(pImage, pIoCtx, pImage->offL1Table, pImage->paL1Table,
1879 NULL, NULL);
1880 if (RT_SUCCESS(rc) || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1881 {
1882 /* Write header. */
1883 qedHdrConvertFromHostEndianess(pImage, &Header);
1884 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pImage->pStorage,
1885 0, &Header, sizeof(Header),
1886 pIoCtx, NULL, NULL);
1887 if (RT_SUCCESS(rc) || rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
1888 rc = vdIfIoIntFileFlush(pImage->pIfIo, pImage->pStorage,
1889 pIoCtx, NULL, NULL);
1890 }
1891 }
1892
1893 LogFlowFunc(("returns %Rrc\n", rc));
1894 return rc;
1895}
1896
1897/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
1898static DECLCALLBACK(unsigned) qedGetVersion(void *pBackendData)
1899{
1900 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1901 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1902
1903 AssertPtrReturn(pImage, 0);
1904
1905 return 1;
1906}
1907
1908/** @copydoc VDIMAGEBACKEND::pfnGetSectorSize */
1909static DECLCALLBACK(uint32_t) qedGetSectorSize(void *pBackendData)
1910{
1911 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1912 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1913 uint32_t cb = 0;
1914
1915 AssertPtrReturn(pImage, 0);
1916
1917 if (pImage->pStorage)
1918 cb = 512;
1919
1920 LogFlowFunc(("returns %u\n", cb));
1921 return cb;
1922}
1923
1924/** @copydoc VDIMAGEBACKEND::pfnGetSize */
1925static DECLCALLBACK(uint64_t) qedGetSize(void *pBackendData)
1926{
1927 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1928 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1929 uint64_t cb = 0;
1930
1931 AssertPtrReturn(pImage, 0);
1932
1933 if (pImage->pStorage)
1934 cb = pImage->cbSize;
1935
1936 LogFlowFunc(("returns %llu\n", cb));
1937 return cb;
1938}
1939
1940/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
1941static DECLCALLBACK(uint64_t) qedGetFileSize(void *pBackendData)
1942{
1943 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1944 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1945 uint64_t cb = 0;
1946
1947 AssertPtrReturn(pImage, 0);
1948
1949 uint64_t cbFile;
1950 if (pImage->pStorage)
1951 {
1952 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pStorage, &cbFile);
1953 if (RT_SUCCESS(rc))
1954 cb += cbFile;
1955 }
1956
1957 LogFlowFunc(("returns %lld\n", cb));
1958 return cb;
1959}
1960
1961/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
1962static DECLCALLBACK(int) qedGetPCHSGeometry(void *pBackendData,
1963 PVDGEOMETRY pPCHSGeometry)
1964{
1965 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
1966 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1967 int rc = VINF_SUCCESS;
1968
1969 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
1970
1971 if (pImage->PCHSGeometry.cCylinders)
1972 *pPCHSGeometry = pImage->PCHSGeometry;
1973 else
1974 rc = VERR_VD_GEOMETRY_NOT_SET;
1975
1976 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
1977 return rc;
1978}
1979
1980/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
1981static DECLCALLBACK(int) qedSetPCHSGeometry(void *pBackendData,
1982 PCVDGEOMETRY pPCHSGeometry)
1983{
1984 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
1985 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
1986 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
1987 int rc = VINF_SUCCESS;
1988
1989 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
1990
1991 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
1992 rc = VERR_VD_IMAGE_READ_ONLY;
1993 else
1994 pImage->PCHSGeometry = *pPCHSGeometry;
1995
1996 LogFlowFunc(("returns %Rrc\n", rc));
1997 return rc;
1998}
1999
2000/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
2001static DECLCALLBACK(int) qedGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
2002{
2003 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
2004 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2005 int rc = VINF_SUCCESS;
2006
2007 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2008
2009 if (pImage->LCHSGeometry.cCylinders)
2010 *pLCHSGeometry = pImage->LCHSGeometry;
2011 else
2012 rc = VERR_VD_GEOMETRY_NOT_SET;
2013
2014 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders,
2015 pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
2016 return rc;
2017}
2018
2019/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
2020static DECLCALLBACK(int) qedSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
2021{
2022 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData,
2023 pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
2024 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2025 int rc = VINF_SUCCESS;
2026
2027 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2028
2029 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2030 rc = VERR_VD_IMAGE_READ_ONLY;
2031 else
2032 pImage->LCHSGeometry = *pLCHSGeometry;
2033
2034 LogFlowFunc(("returns %Rrc\n", rc));
2035 return rc;
2036}
2037
2038/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
2039static DECLCALLBACK(unsigned) qedGetImageFlags(void *pBackendData)
2040{
2041 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
2042 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2043
2044 AssertPtrReturn(pImage, 0);
2045
2046 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
2047 return pImage->uImageFlags;
2048}
2049
2050/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
2051static DECLCALLBACK(unsigned) qedGetOpenFlags(void *pBackendData)
2052{
2053 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
2054 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2055
2056 AssertPtrReturn(pImage, 0);
2057
2058 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
2059 return pImage->uOpenFlags;
2060}
2061
2062/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
2063static DECLCALLBACK(int) qedSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
2064{
2065 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
2066 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2067 int rc = VINF_SUCCESS;
2068
2069 /* Image must be opened and the new flags must be valid. */
2070 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
2071 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
2072 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
2073 rc = VERR_INVALID_PARAMETER;
2074 else
2075 {
2076 /* Implement this operation via reopening the image. */
2077 rc = qedFreeImage(pImage, false);
2078 if (RT_SUCCESS(rc))
2079 rc = qedOpenImage(pImage, uOpenFlags);
2080 }
2081
2082 LogFlowFunc(("returns %Rrc\n", rc));
2083 return rc;
2084}
2085
2086/** @copydoc VDIMAGEBACKEND::pfnGetComment */
2087static DECLCALLBACK(int) qedGetComment(void *pBackendData, char *pszComment,
2088 size_t cbComment)
2089{
2090 RT_NOREF2(pszComment, cbComment);
2091 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
2092 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2093
2094 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2095
2096 LogFlowFunc(("returns %Rrc comment='%s'\n", VERR_NOT_SUPPORTED, pszComment));
2097 return VERR_NOT_SUPPORTED;
2098}
2099
2100/** @copydoc VDIMAGEBACKEND::pfnSetComment */
2101static DECLCALLBACK(int) qedSetComment(void *pBackendData, const char *pszComment)
2102{
2103 RT_NOREF1(pszComment);
2104 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
2105 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2106
2107 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2108
2109 int rc;
2110 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2111 rc = VERR_VD_IMAGE_READ_ONLY;
2112 else
2113 rc = VERR_NOT_SUPPORTED;
2114
2115 LogFlowFunc(("returns %Rrc\n", rc));
2116 return rc;
2117}
2118
2119/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
2120static DECLCALLBACK(int) qedGetUuid(void *pBackendData, PRTUUID pUuid)
2121{
2122 RT_NOREF1(pUuid);
2123 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
2124 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2125
2126 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2127
2128 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VERR_NOT_SUPPORTED, pUuid));
2129 return VERR_NOT_SUPPORTED;
2130}
2131
2132/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
2133static DECLCALLBACK(int) qedSetUuid(void *pBackendData, PCRTUUID pUuid)
2134{
2135 RT_NOREF1(pUuid);
2136 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
2137 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2138
2139 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2140
2141 int rc;
2142 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2143 rc = VERR_VD_IMAGE_READ_ONLY;
2144 else
2145 rc = VERR_NOT_SUPPORTED;
2146
2147 LogFlowFunc(("returns %Rrc\n", rc));
2148 return rc;
2149}
2150
2151/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
2152static DECLCALLBACK(int) qedGetModificationUuid(void *pBackendData, PRTUUID pUuid)
2153{
2154 RT_NOREF1(pUuid);
2155 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
2156 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2157
2158 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2159
2160 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VERR_NOT_SUPPORTED, pUuid));
2161 return VERR_NOT_SUPPORTED;
2162}
2163
2164/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
2165static DECLCALLBACK(int) qedSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
2166{
2167 RT_NOREF1(pUuid);
2168 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
2169 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2170
2171 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2172
2173 int rc;
2174 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2175 rc = VERR_VD_IMAGE_READ_ONLY;
2176 else
2177 rc = VERR_NOT_SUPPORTED;
2178
2179 LogFlowFunc(("returns %Rrc\n", rc));
2180 return rc;
2181}
2182
2183/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
2184static DECLCALLBACK(int) qedGetParentUuid(void *pBackendData, PRTUUID pUuid)
2185{
2186 RT_NOREF1(pUuid);
2187 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
2188 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2189
2190 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2191
2192 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VERR_NOT_SUPPORTED, pUuid));
2193 return VERR_NOT_SUPPORTED;
2194}
2195
2196/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
2197static DECLCALLBACK(int) qedSetParentUuid(void *pBackendData, PCRTUUID pUuid)
2198{
2199 RT_NOREF1(pUuid);
2200 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
2201 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2202
2203 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2204
2205 int rc;
2206 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2207 rc = VERR_VD_IMAGE_READ_ONLY;
2208 else
2209 rc = VERR_NOT_SUPPORTED;
2210
2211 LogFlowFunc(("returns %Rrc\n", rc));
2212 return rc;
2213}
2214
2215/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
2216static DECLCALLBACK(int) qedGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
2217{
2218 RT_NOREF1(pUuid);
2219 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
2220 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2221
2222 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2223
2224 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VERR_NOT_SUPPORTED, pUuid));
2225 return VERR_NOT_SUPPORTED;
2226}
2227
2228/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
2229static DECLCALLBACK(int) qedSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
2230{
2231 RT_NOREF1(pUuid);
2232 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
2233 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2234
2235 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2236
2237 int rc;
2238 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2239 rc = VERR_VD_IMAGE_READ_ONLY;
2240 else
2241 rc = VERR_NOT_SUPPORTED;
2242
2243 LogFlowFunc(("returns %Rrc\n", rc));
2244 return rc;
2245}
2246
2247/** @copydoc VDIMAGEBACKEND::pfnDump */
2248static DECLCALLBACK(void) qedDump(void *pBackendData)
2249{
2250 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2251
2252 AssertPtrReturnVoid(pImage);
2253 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
2254 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
2255 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
2256 pImage->cbSize / 512);
2257}
2258
2259/** @copydoc VDIMAGEBACKEND::pfnGetParentFilename */
2260static DECLCALLBACK(int) qedGetParentFilename(void *pBackendData, char **ppszParentFilename)
2261{
2262 int rc = VINF_SUCCESS;
2263 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2264
2265 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2266
2267 if (pImage->pszBackingFilename)
2268 *ppszParentFilename = RTStrDup(pImage->pszBackingFilename);
2269 else
2270 rc = VERR_NOT_SUPPORTED;
2271
2272 LogFlowFunc(("returns %Rrc\n", rc));
2273 return rc;
2274}
2275
2276/** @copydoc VDIMAGEBACKEND::pfnSetParentFilename */
2277static DECLCALLBACK(int) qedSetParentFilename(void *pBackendData, const char *pszParentFilename)
2278{
2279 int rc = VINF_SUCCESS;
2280 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2281
2282 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
2283
2284 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2285 rc = VERR_VD_IMAGE_READ_ONLY;
2286 else if ( pImage->pszBackingFilename
2287 && (strlen(pszParentFilename) > pImage->cbBackingFilename))
2288 rc = VERR_NOT_SUPPORTED; /* The new filename is longer than the old one. */
2289 else
2290 {
2291 if (pImage->pszBackingFilename)
2292 RTStrFree(pImage->pszBackingFilename);
2293 pImage->pszBackingFilename = RTStrDup(pszParentFilename);
2294 if (!pImage->pszBackingFilename)
2295 rc = VERR_NO_MEMORY;
2296 else
2297 {
2298 if (!pImage->offBackingFilename)
2299 {
2300 /* Allocate new cluster. */
2301 uint64_t offData = qedClusterAllocate(pImage, 1);
2302
2303 Assert((offData & UINT32_MAX) == offData);
2304 pImage->offBackingFilename = (uint32_t)offData;
2305 pImage->cbBackingFilename = (uint32_t)strlen(pszParentFilename);
2306 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pImage->pStorage,
2307 offData + pImage->cbCluster);
2308 }
2309
2310 if (RT_SUCCESS(rc))
2311 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pImage->pStorage,
2312 pImage->offBackingFilename,
2313 pImage->pszBackingFilename,
2314 strlen(pImage->pszBackingFilename));
2315 }
2316 }
2317
2318 LogFlowFunc(("returns %Rrc\n", rc));
2319 return rc;
2320}
2321
2322/** @copydoc VDIMAGEBACKEND::pfnResize */
2323static DECLCALLBACK(int) qedResize(void *pBackendData, uint64_t cbSize,
2324 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
2325 unsigned uPercentStart, unsigned uPercentSpan,
2326 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
2327 PVDINTERFACE pVDIfsOperation)
2328{
2329 RT_NOREF7(pPCHSGeometry, pLCHSGeometry, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation);
2330 PQEDIMAGE pImage = (PQEDIMAGE)pBackendData;
2331 int rc = VINF_SUCCESS;
2332
2333 /* Making the image smaller is not supported at the moment. */
2334 if (cbSize < pImage->cbSize)
2335 rc = VERR_NOT_SUPPORTED;
2336 else if (cbSize > pImage->cbSize)
2337 {
2338 /*
2339 * It is enough to just update the size field in the header to complete
2340 * growing. With the default cluster and table sizes the image can be expanded
2341 * to 64TB without overflowing the L1 and L2 tables making block relocation
2342 * superfluous.
2343 * @todo: The rare case where block relocation is still required (non default
2344 * table and/or cluster size or images with more than 64TB) is not
2345 * implemented yet and resizing such an image will fail with an error.
2346 */
2347 if (qedByte2Cluster(pImage, pImage->cbTable)*pImage->cTableEntries*pImage->cTableEntries*pImage->cbCluster < cbSize)
2348 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS,
2349 N_("Qed: Resizing the image '%s' is not supported because it would overflow the L1 and L2 table\n"),
2350 pImage->pszFilename);
2351 else
2352 {
2353 uint64_t cbSizeOld = pImage->cbSize;
2354
2355 pImage->cbSize = cbSize;
2356 rc = qedFlushImage(pImage);
2357 if (RT_FAILURE(rc))
2358 {
2359 pImage->cbSize = cbSizeOld; /* Restore */
2360
2361 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("Qed: Resizing the image '%s' failed\n"),
2362 pImage->pszFilename);
2363 }
2364 }
2365 }
2366 /* Same size doesn't change the image at all. */
2367
2368 LogFlowFunc(("returns %Rrc\n", rc));
2369 return rc;
2370}
2371
2372
2373const VDIMAGEBACKEND g_QedBackend =
2374{
2375 /* u32Version */
2376 VD_IMGBACKEND_VERSION,
2377 /* pszBackendName */
2378 "QED",
2379 /* uBackendCaps */
2380 VD_CAP_FILE | VD_CAP_VFS | VD_CAP_CREATE_DYNAMIC | VD_CAP_DIFF | VD_CAP_ASYNC,
2381 /* paFileExtensions */
2382 s_aQedFileExtensions,
2383 /* paConfigInfo */
2384 NULL,
2385 /* pfnProbe */
2386 qedProbe,
2387 /* pfnOpen */
2388 qedOpen,
2389 /* pfnCreate */
2390 qedCreate,
2391 /* pfnRename */
2392 qedRename,
2393 /* pfnClose */
2394 qedClose,
2395 /* pfnRead */
2396 qedRead,
2397 /* pfnWrite */
2398 qedWrite,
2399 /* pfnFlush */
2400 qedFlush,
2401 /* pfnDiscard */
2402 NULL,
2403 /* pfnGetVersion */
2404 qedGetVersion,
2405 /* pfnGetSectorSize */
2406 qedGetSectorSize,
2407 /* pfnGetSize */
2408 qedGetSize,
2409 /* pfnGetFileSize */
2410 qedGetFileSize,
2411 /* pfnGetPCHSGeometry */
2412 qedGetPCHSGeometry,
2413 /* pfnSetPCHSGeometry */
2414 qedSetPCHSGeometry,
2415 /* pfnGetLCHSGeometry */
2416 qedGetLCHSGeometry,
2417 /* pfnSetLCHSGeometry */
2418 qedSetLCHSGeometry,
2419 /* pfnGetImageFlags */
2420 qedGetImageFlags,
2421 /* pfnGetOpenFlags */
2422 qedGetOpenFlags,
2423 /* pfnSetOpenFlags */
2424 qedSetOpenFlags,
2425 /* pfnGetComment */
2426 qedGetComment,
2427 /* pfnSetComment */
2428 qedSetComment,
2429 /* pfnGetUuid */
2430 qedGetUuid,
2431 /* pfnSetUuid */
2432 qedSetUuid,
2433 /* pfnGetModificationUuid */
2434 qedGetModificationUuid,
2435 /* pfnSetModificationUuid */
2436 qedSetModificationUuid,
2437 /* pfnGetParentUuid */
2438 qedGetParentUuid,
2439 /* pfnSetParentUuid */
2440 qedSetParentUuid,
2441 /* pfnGetParentModificationUuid */
2442 qedGetParentModificationUuid,
2443 /* pfnSetParentModificationUuid */
2444 qedSetParentModificationUuid,
2445 /* pfnDump */
2446 qedDump,
2447 /* pfnGetTimestamp */
2448 NULL,
2449 /* pfnGetParentTimestamp */
2450 NULL,
2451 /* pfnSetParentTimestamp */
2452 NULL,
2453 /* pfnGetParentFilename */
2454 qedGetParentFilename,
2455 /* pfnSetParentFilename */
2456 qedSetParentFilename,
2457 /* pfnComposeLocation */
2458 genericFileComposeLocation,
2459 /* pfnComposeName */
2460 genericFileComposeName,
2461 /* pfnCompact */
2462 NULL,
2463 /* pfnResize */
2464 qedResize,
2465 /* pfnRepair */
2466 NULL,
2467 /* pfnTraverseMetadata */
2468 NULL,
2469 /* u32Version */
2470 VD_IMGBACKEND_VERSION
2471};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette