VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 64213

Last change on this file since 64213 was 63905, checked in by vboxsync, 8 years ago

Storage/VD: Add proper versioning of the backend structures instead of just relying on the structure size to make changing callback signatures possible in the future and still being able to reject incompatible plugins

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 252.3 KB
Line 
1/* $Id: VMDK.cpp 63905 2016-09-20 08:31:05Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VD_VMDK
23#include <VBox/vd-plugin.h>
24#include <VBox/err.h>
25
26#include <VBox/log.h>
27#include <iprt/assert.h>
28#include <iprt/alloc.h>
29#include <iprt/uuid.h>
30#include <iprt/path.h>
31#include <iprt/string.h>
32#include <iprt/rand.h>
33#include <iprt/zip.h>
34#include <iprt/asm.h>
35
36#include "VDBackends.h"
37
38
39/*********************************************************************************************************************************
40* Constants And Macros, Structures and Typedefs *
41*********************************************************************************************************************************/
42
43/** Maximum encoded string size (including NUL) we allow for VMDK images.
44 * Deliberately not set high to avoid running out of descriptor space. */
45#define VMDK_ENCODED_COMMENT_MAX 1024
46
47/** VMDK descriptor DDB entry for PCHS cylinders. */
48#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
49
50/** VMDK descriptor DDB entry for PCHS heads. */
51#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
52
53/** VMDK descriptor DDB entry for PCHS sectors. */
54#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
55
56/** VMDK descriptor DDB entry for LCHS cylinders. */
57#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
58
59/** VMDK descriptor DDB entry for LCHS heads. */
60#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
61
62/** VMDK descriptor DDB entry for LCHS sectors. */
63#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
64
65/** VMDK descriptor DDB entry for image UUID. */
66#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
67
68/** VMDK descriptor DDB entry for image modification UUID. */
69#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
70
71/** VMDK descriptor DDB entry for parent image UUID. */
72#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
73
74/** VMDK descriptor DDB entry for parent image modification UUID. */
75#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
76
77/** No compression for streamOptimized files. */
78#define VMDK_COMPRESSION_NONE 0
79
80/** Deflate compression for streamOptimized files. */
81#define VMDK_COMPRESSION_DEFLATE 1
82
83/** Marker that the actual GD value is stored in the footer. */
84#define VMDK_GD_AT_END 0xffffffffffffffffULL
85
86/** Marker for end-of-stream in streamOptimized images. */
87#define VMDK_MARKER_EOS 0
88
89/** Marker for grain table block in streamOptimized images. */
90#define VMDK_MARKER_GT 1
91
92/** Marker for grain directory block in streamOptimized images. */
93#define VMDK_MARKER_GD 2
94
95/** Marker for footer in streamOptimized images. */
96#define VMDK_MARKER_FOOTER 3
97
98/** Marker for unknown purpose in streamOptimized images.
99 * Shows up in very recent images created by vSphere, but only sporadically.
100 * They "forgot" to document that one in the VMDK specification. */
101#define VMDK_MARKER_UNSPECIFIED 4
102
103/** Dummy marker for "don't check the marker value". */
104#define VMDK_MARKER_IGNORE 0xffffffffU
105
106/**
107 * Magic number for hosted images created by VMware Workstation 4, VMware
108 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
109 */
110#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
111
112/**
113 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
114 * this header is also used for monolithic flat images.
115 */
116#pragma pack(1)
117typedef struct SparseExtentHeader
118{
119 uint32_t magicNumber;
120 uint32_t version;
121 uint32_t flags;
122 uint64_t capacity;
123 uint64_t grainSize;
124 uint64_t descriptorOffset;
125 uint64_t descriptorSize;
126 uint32_t numGTEsPerGT;
127 uint64_t rgdOffset;
128 uint64_t gdOffset;
129 uint64_t overHead;
130 bool uncleanShutdown;
131 char singleEndLineChar;
132 char nonEndLineChar;
133 char doubleEndLineChar1;
134 char doubleEndLineChar2;
135 uint16_t compressAlgorithm;
136 uint8_t pad[433];
137} SparseExtentHeader;
138#pragma pack()
139
140/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
141 * divisible by the default grain size (64K) */
142#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
143
144/** VMDK streamOptimized file format marker. The type field may or may not
145 * be actually valid, but there's always data to read there. */
146#pragma pack(1)
147typedef struct VMDKMARKER
148{
149 uint64_t uSector;
150 uint32_t cbSize;
151 uint32_t uType;
152} VMDKMARKER, *PVMDKMARKER;
153#pragma pack()
154
155
156/** Convert sector number/size to byte offset/size. */
157#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
158
159/** Convert byte offset/size to sector number/size. */
160#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
161
162/**
163 * VMDK extent type.
164 */
165typedef enum VMDKETYPE
166{
167 /** Hosted sparse extent. */
168 VMDKETYPE_HOSTED_SPARSE = 1,
169 /** Flat extent. */
170 VMDKETYPE_FLAT,
171 /** Zero extent. */
172 VMDKETYPE_ZERO,
173 /** VMFS extent, used by ESX. */
174 VMDKETYPE_VMFS
175} VMDKETYPE, *PVMDKETYPE;
176
177/**
178 * VMDK access type for a extent.
179 */
180typedef enum VMDKACCESS
181{
182 /** No access allowed. */
183 VMDKACCESS_NOACCESS = 0,
184 /** Read-only access. */
185 VMDKACCESS_READONLY,
186 /** Read-write access. */
187 VMDKACCESS_READWRITE
188} VMDKACCESS, *PVMDKACCESS;
189
190/** Forward declaration for PVMDKIMAGE. */
191typedef struct VMDKIMAGE *PVMDKIMAGE;
192
193/**
194 * Extents files entry. Used for opening a particular file only once.
195 */
196typedef struct VMDKFILE
197{
198 /** Pointer to filename. Local copy. */
199 const char *pszFilename;
200 /** File open flags for consistency checking. */
201 unsigned fOpen;
202 /** Handle for sync/async file abstraction.*/
203 PVDIOSTORAGE pStorage;
204 /** Reference counter. */
205 unsigned uReferences;
206 /** Flag whether the file should be deleted on last close. */
207 bool fDelete;
208 /** Pointer to the image we belong to (for debugging purposes). */
209 PVMDKIMAGE pImage;
210 /** Pointer to next file descriptor. */
211 struct VMDKFILE *pNext;
212 /** Pointer to the previous file descriptor. */
213 struct VMDKFILE *pPrev;
214} VMDKFILE, *PVMDKFILE;
215
216/**
217 * VMDK extent data structure.
218 */
219typedef struct VMDKEXTENT
220{
221 /** File handle. */
222 PVMDKFILE pFile;
223 /** Base name of the image extent. */
224 const char *pszBasename;
225 /** Full name of the image extent. */
226 const char *pszFullname;
227 /** Number of sectors in this extent. */
228 uint64_t cSectors;
229 /** Number of sectors per block (grain in VMDK speak). */
230 uint64_t cSectorsPerGrain;
231 /** Starting sector number of descriptor. */
232 uint64_t uDescriptorSector;
233 /** Size of descriptor in sectors. */
234 uint64_t cDescriptorSectors;
235 /** Starting sector number of grain directory. */
236 uint64_t uSectorGD;
237 /** Starting sector number of redundant grain directory. */
238 uint64_t uSectorRGD;
239 /** Total number of metadata sectors. */
240 uint64_t cOverheadSectors;
241 /** Nominal size (i.e. as described by the descriptor) of this extent. */
242 uint64_t cNominalSectors;
243 /** Sector offset (i.e. as described by the descriptor) of this extent. */
244 uint64_t uSectorOffset;
245 /** Number of entries in a grain table. */
246 uint32_t cGTEntries;
247 /** Number of sectors reachable via a grain directory entry. */
248 uint32_t cSectorsPerGDE;
249 /** Number of entries in the grain directory. */
250 uint32_t cGDEntries;
251 /** Pointer to the next free sector. Legacy information. Do not use. */
252 uint32_t uFreeSector;
253 /** Number of this extent in the list of images. */
254 uint32_t uExtent;
255 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
256 char *pDescData;
257 /** Pointer to the grain directory. */
258 uint32_t *pGD;
259 /** Pointer to the redundant grain directory. */
260 uint32_t *pRGD;
261 /** VMDK version of this extent. 1=1.0/1.1 */
262 uint32_t uVersion;
263 /** Type of this extent. */
264 VMDKETYPE enmType;
265 /** Access to this extent. */
266 VMDKACCESS enmAccess;
267 /** Flag whether this extent is marked as unclean. */
268 bool fUncleanShutdown;
269 /** Flag whether the metadata in the extent header needs to be updated. */
270 bool fMetaDirty;
271 /** Flag whether there is a footer in this extent. */
272 bool fFooter;
273 /** Compression type for this extent. */
274 uint16_t uCompression;
275 /** Append position for writing new grain. Only for sparse extents. */
276 uint64_t uAppendPosition;
277 /** Last grain which was accessed. Only for streamOptimized extents. */
278 uint32_t uLastGrainAccess;
279 /** Starting sector corresponding to the grain buffer. */
280 uint32_t uGrainSectorAbs;
281 /** Grain number corresponding to the grain buffer. */
282 uint32_t uGrain;
283 /** Actual size of the compressed data, only valid for reading. */
284 uint32_t cbGrainStreamRead;
285 /** Size of compressed grain buffer for streamOptimized extents. */
286 size_t cbCompGrain;
287 /** Compressed grain buffer for streamOptimized extents, with marker. */
288 void *pvCompGrain;
289 /** Decompressed grain buffer for streamOptimized extents. */
290 void *pvGrain;
291 /** Reference to the image in which this extent is used. Do not use this
292 * on a regular basis to avoid passing pImage references to functions
293 * explicitly. */
294 struct VMDKIMAGE *pImage;
295} VMDKEXTENT, *PVMDKEXTENT;
296
297/**
298 * Grain table cache size. Allocated per image.
299 */
300#define VMDK_GT_CACHE_SIZE 256
301
302/**
303 * Grain table block size. Smaller than an actual grain table block to allow
304 * more grain table blocks to be cached without having to allocate excessive
305 * amounts of memory for the cache.
306 */
307#define VMDK_GT_CACHELINE_SIZE 128
308
309
310/**
311 * Maximum number of lines in a descriptor file. Not worth the effort of
312 * making it variable. Descriptor files are generally very short (~20 lines),
313 * with the exception of sparse files split in 2G chunks, which need for the
314 * maximum size (almost 2T) exactly 1025 lines for the disk database.
315 */
316#define VMDK_DESCRIPTOR_LINES_MAX 1100U
317
318/**
319 * Parsed descriptor information. Allows easy access and update of the
320 * descriptor (whether separate file or not). Free form text files suck.
321 */
322typedef struct VMDKDESCRIPTOR
323{
324 /** Line number of first entry of the disk descriptor. */
325 unsigned uFirstDesc;
326 /** Line number of first entry in the extent description. */
327 unsigned uFirstExtent;
328 /** Line number of first disk database entry. */
329 unsigned uFirstDDB;
330 /** Total number of lines. */
331 unsigned cLines;
332 /** Total amount of memory available for the descriptor. */
333 size_t cbDescAlloc;
334 /** Set if descriptor has been changed and not yet written to disk. */
335 bool fDirty;
336 /** Array of pointers to the data in the descriptor. */
337 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
338 /** Array of line indices pointing to the next non-comment line. */
339 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
340} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
341
342
343/**
344 * Cache entry for translating extent/sector to a sector number in that
345 * extent.
346 */
347typedef struct VMDKGTCACHEENTRY
348{
349 /** Extent number for which this entry is valid. */
350 uint32_t uExtent;
351 /** GT data block number. */
352 uint64_t uGTBlock;
353 /** Data part of the cache entry. */
354 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
355} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
356
357/**
358 * Cache data structure for blocks of grain table entries. For now this is a
359 * fixed size direct mapping cache, but this should be adapted to the size of
360 * the sparse image and maybe converted to a set-associative cache. The
361 * implementation below implements a write-through cache with write allocate.
362 */
363typedef struct VMDKGTCACHE
364{
365 /** Cache entries. */
366 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
367 /** Number of cache entries (currently unused). */
368 unsigned cEntries;
369} VMDKGTCACHE, *PVMDKGTCACHE;
370
371/**
372 * Complete VMDK image data structure. Mainly a collection of extents and a few
373 * extra global data fields.
374 */
375typedef struct VMDKIMAGE
376{
377 /** Image name. */
378 const char *pszFilename;
379 /** Descriptor file if applicable. */
380 PVMDKFILE pFile;
381
382 /** Pointer to the per-disk VD interface list. */
383 PVDINTERFACE pVDIfsDisk;
384 /** Pointer to the per-image VD interface list. */
385 PVDINTERFACE pVDIfsImage;
386
387 /** Error interface. */
388 PVDINTERFACEERROR pIfError;
389 /** I/O interface. */
390 PVDINTERFACEIOINT pIfIo;
391
392
393 /** Pointer to the image extents. */
394 PVMDKEXTENT pExtents;
395 /** Number of image extents. */
396 unsigned cExtents;
397 /** Pointer to the files list, for opening a file referenced multiple
398 * times only once (happens mainly with raw partition access). */
399 PVMDKFILE pFiles;
400
401 /**
402 * Pointer to an array of segment entries for async I/O.
403 * This is an optimization because the task number to submit is not known
404 * and allocating/freeing an array in the read/write functions every time
405 * is too expensive.
406 */
407 PPDMDATASEG paSegments;
408 /** Entries available in the segments array. */
409 unsigned cSegments;
410
411 /** Open flags passed by VBoxHD layer. */
412 unsigned uOpenFlags;
413 /** Image flags defined during creation or determined during open. */
414 unsigned uImageFlags;
415 /** Total size of the image. */
416 uint64_t cbSize;
417 /** Physical geometry of this image. */
418 VDGEOMETRY PCHSGeometry;
419 /** Logical geometry of this image. */
420 VDGEOMETRY LCHSGeometry;
421 /** Image UUID. */
422 RTUUID ImageUuid;
423 /** Image modification UUID. */
424 RTUUID ModificationUuid;
425 /** Parent image UUID. */
426 RTUUID ParentUuid;
427 /** Parent image modification UUID. */
428 RTUUID ParentModificationUuid;
429
430 /** Pointer to grain table cache, if this image contains sparse extents. */
431 PVMDKGTCACHE pGTCache;
432 /** Pointer to the descriptor (NULL if no separate descriptor file). */
433 char *pDescData;
434 /** Allocation size of the descriptor file. */
435 size_t cbDescAlloc;
436 /** Parsed descriptor file content. */
437 VMDKDESCRIPTOR Descriptor;
438} VMDKIMAGE;
439
440
441/** State for the input/output callout of the inflate reader/deflate writer. */
442typedef struct VMDKCOMPRESSIO
443{
444 /* Image this operation relates to. */
445 PVMDKIMAGE pImage;
446 /* Current read position. */
447 ssize_t iOffset;
448 /* Size of the compressed grain buffer (available data). */
449 size_t cbCompGrain;
450 /* Pointer to the compressed grain buffer. */
451 void *pvCompGrain;
452} VMDKCOMPRESSIO;
453
454
455/** Tracks async grain allocation. */
456typedef struct VMDKGRAINALLOCASYNC
457{
458 /** Flag whether the allocation failed. */
459 bool fIoErr;
460 /** Current number of transfers pending.
461 * If reached 0 and there is an error the old state is restored. */
462 unsigned cIoXfersPending;
463 /** Sector number */
464 uint64_t uSector;
465 /** Flag whether the grain table needs to be updated. */
466 bool fGTUpdateNeeded;
467 /** Extent the allocation happens. */
468 PVMDKEXTENT pExtent;
469 /** Position of the new grain, required for the grain table update. */
470 uint64_t uGrainOffset;
471 /** Grain table sector. */
472 uint64_t uGTSector;
473 /** Backup grain table sector. */
474 uint64_t uRGTSector;
475} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
476
477/**
478 * State information for vmdkRename() and helpers.
479 */
480typedef struct VMDKRENAMESTATE
481{
482 /** Array of old filenames. */
483 char **apszOldName;
484 /** Array of new filenames. */
485 char **apszNewName;
486 /** Array of new lines in the extent descriptor. */
487 char **apszNewLines;
488 /** Name of the old descriptor file if not a sparse image. */
489 char *pszOldDescName;
490 /** Flag whether we called vmdkFreeImage(). */
491 bool fImageFreed;
492 /** Flag whther the descriptor is embedded in the image (sparse) or
493 * in a separate file. */
494 bool fEmbeddedDesc;
495 /** Number of extents in the image. */
496 unsigned cExtents;
497 /** New base filename. */
498 char *pszNewBaseName;
499 /** The old base filename. */
500 char *pszOldBaseName;
501 /** New full filename. */
502 char *pszNewFullName;
503 /** Old full filename. */
504 char *pszOldFullName;
505 /** The old image name. */
506 const char *pszOldImageName;
507 /** Copy of the original VMDK descriptor. */
508 VMDKDESCRIPTOR DescriptorCopy;
509 /** Copy of the extent state for sparse images. */
510 VMDKEXTENT ExtentCopy;
511} VMDKRENAMESTATE;
512/** Pointer to a VMDK rename state. */
513typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
514
515/*********************************************************************************************************************************
516* Static Variables *
517*********************************************************************************************************************************/
518
519/** NULL-terminated array of supported file extensions. */
520static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
521{
522 {"vmdk", VDTYPE_HDD},
523 {NULL, VDTYPE_INVALID}
524};
525
526
527/*********************************************************************************************************************************
528* Internal Functions *
529*********************************************************************************************************************************/
530
531static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
532static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
533 bool fDelete);
534
535static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
536static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
537static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
538static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
539
540static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
541 void *pvUser, int rcReq);
542
543/**
544 * Internal: open a file (using a file descriptor cache to ensure each file
545 * is only opened once - anything else can cause locking problems).
546 */
547static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
548 const char *pszFilename, uint32_t fOpen)
549{
550 int rc = VINF_SUCCESS;
551 PVMDKFILE pVmdkFile;
552
553 for (pVmdkFile = pImage->pFiles;
554 pVmdkFile != NULL;
555 pVmdkFile = pVmdkFile->pNext)
556 {
557 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
558 {
559 Assert(fOpen == pVmdkFile->fOpen);
560 pVmdkFile->uReferences++;
561
562 *ppVmdkFile = pVmdkFile;
563
564 return rc;
565 }
566 }
567
568 /* If we get here, there's no matching entry in the cache. */
569 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
570 if (!pVmdkFile)
571 {
572 *ppVmdkFile = NULL;
573 return VERR_NO_MEMORY;
574 }
575
576 pVmdkFile->pszFilename = RTStrDup(pszFilename);
577 if (!pVmdkFile->pszFilename)
578 {
579 RTMemFree(pVmdkFile);
580 *ppVmdkFile = NULL;
581 return VERR_NO_MEMORY;
582 }
583 pVmdkFile->fOpen = fOpen;
584
585 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
586 &pVmdkFile->pStorage);
587 if (RT_SUCCESS(rc))
588 {
589 pVmdkFile->uReferences = 1;
590 pVmdkFile->pImage = pImage;
591 pVmdkFile->pNext = pImage->pFiles;
592 if (pImage->pFiles)
593 pImage->pFiles->pPrev = pVmdkFile;
594 pImage->pFiles = pVmdkFile;
595 *ppVmdkFile = pVmdkFile;
596 }
597 else
598 {
599 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
600 RTMemFree(pVmdkFile);
601 *ppVmdkFile = NULL;
602 }
603
604 return rc;
605}
606
607/**
608 * Internal: close a file, updating the file descriptor cache.
609 */
610static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
611{
612 int rc = VINF_SUCCESS;
613 PVMDKFILE pVmdkFile = *ppVmdkFile;
614
615 AssertPtr(pVmdkFile);
616
617 pVmdkFile->fDelete |= fDelete;
618 Assert(pVmdkFile->uReferences);
619 pVmdkFile->uReferences--;
620 if (pVmdkFile->uReferences == 0)
621 {
622 PVMDKFILE pPrev;
623 PVMDKFILE pNext;
624
625 /* Unchain the element from the list. */
626 pPrev = pVmdkFile->pPrev;
627 pNext = pVmdkFile->pNext;
628
629 if (pNext)
630 pNext->pPrev = pPrev;
631 if (pPrev)
632 pPrev->pNext = pNext;
633 else
634 pImage->pFiles = pNext;
635
636 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
637 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
638 rc = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
639 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
640 RTMemFree(pVmdkFile);
641 }
642
643 *ppVmdkFile = NULL;
644 return rc;
645}
646
647/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
648#ifndef VMDK_USE_BLOCK_DECOMP_API
649static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
650{
651 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
652 size_t cbInjected = 0;
653
654 Assert(cbBuf);
655 if (pInflateState->iOffset < 0)
656 {
657 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
658 pvBuf = (uint8_t *)pvBuf + 1;
659 cbBuf--;
660 cbInjected = 1;
661 pInflateState->iOffset = RT_OFFSETOF(VMDKMARKER, uType);
662 }
663 if (!cbBuf)
664 {
665 if (pcbBuf)
666 *pcbBuf = cbInjected;
667 return VINF_SUCCESS;
668 }
669 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
670 memcpy(pvBuf,
671 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
672 cbBuf);
673 pInflateState->iOffset += cbBuf;
674 Assert(pcbBuf);
675 *pcbBuf = cbBuf + cbInjected;
676 return VINF_SUCCESS;
677}
678#endif
679
680/**
681 * Internal: read from a file and inflate the compressed data,
682 * distinguishing between async and normal operation
683 */
684DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
685 uint64_t uOffset, void *pvBuf,
686 size_t cbToRead, const void *pcvMarker,
687 uint64_t *puLBA, uint32_t *pcbMarkerData)
688{
689 int rc;
690#ifndef VMDK_USE_BLOCK_DECOMP_API
691 PRTZIPDECOMP pZip = NULL;
692#endif
693 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
694 size_t cbCompSize, cbActuallyRead;
695
696 if (!pcvMarker)
697 {
698 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
699 uOffset, pMarker, RT_OFFSETOF(VMDKMARKER, uType));
700 if (RT_FAILURE(rc))
701 return rc;
702 }
703 else
704 {
705 memcpy(pMarker, pcvMarker, RT_OFFSETOF(VMDKMARKER, uType));
706 /* pcvMarker endianness has already been partially transformed, fix it */
707 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
708 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
709 }
710
711 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
712 if (cbCompSize == 0)
713 {
714 AssertMsgFailed(("VMDK: corrupted marker\n"));
715 return VERR_VD_VMDK_INVALID_FORMAT;
716 }
717
718 /* Sanity check - the expansion ratio should be much less than 2. */
719 Assert(cbCompSize < 2 * cbToRead);
720 if (cbCompSize >= 2 * cbToRead)
721 return VERR_VD_VMDK_INVALID_FORMAT;
722
723 /* Compressed grain marker. Data follows immediately. */
724 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
725 uOffset + RT_OFFSETOF(VMDKMARKER, uType),
726 (uint8_t *)pExtent->pvCompGrain
727 + RT_OFFSETOF(VMDKMARKER, uType),
728 RT_ALIGN_Z( cbCompSize
729 + RT_OFFSETOF(VMDKMARKER, uType),
730 512)
731 - RT_OFFSETOF(VMDKMARKER, uType));
732
733 if (puLBA)
734 *puLBA = RT_LE2H_U64(pMarker->uSector);
735 if (pcbMarkerData)
736 *pcbMarkerData = RT_ALIGN( cbCompSize
737 + RT_OFFSETOF(VMDKMARKER, uType),
738 512);
739
740#ifdef VMDK_USE_BLOCK_DECOMP_API
741 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
742 pExtent->pvCompGrain, cbCompSize + RT_OFFSETOF(VMDKMARKER, uType), NULL,
743 pvBuf, cbToRead, &cbActuallyRead);
744#else
745 VMDKCOMPRESSIO InflateState;
746 InflateState.pImage = pImage;
747 InflateState.iOffset = -1;
748 InflateState.cbCompGrain = cbCompSize + RT_OFFSETOF(VMDKMARKER, uType);
749 InflateState.pvCompGrain = pExtent->pvCompGrain;
750
751 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
752 if (RT_FAILURE(rc))
753 return rc;
754 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
755 RTZipDecompDestroy(pZip);
756#endif /* !VMDK_USE_BLOCK_DECOMP_API */
757 if (RT_FAILURE(rc))
758 {
759 if (rc == VERR_ZIP_CORRUPTED)
760 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
761 return rc;
762 }
763 if (cbActuallyRead != cbToRead)
764 rc = VERR_VD_VMDK_INVALID_FORMAT;
765 return rc;
766}
767
768static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
769{
770 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
771
772 Assert(cbBuf);
773 if (pDeflateState->iOffset < 0)
774 {
775 pvBuf = (const uint8_t *)pvBuf + 1;
776 cbBuf--;
777 pDeflateState->iOffset = RT_OFFSETOF(VMDKMARKER, uType);
778 }
779 if (!cbBuf)
780 return VINF_SUCCESS;
781 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
782 return VERR_BUFFER_OVERFLOW;
783 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
784 pvBuf, cbBuf);
785 pDeflateState->iOffset += cbBuf;
786 return VINF_SUCCESS;
787}
788
789/**
790 * Internal: deflate the uncompressed data and write to a file,
791 * distinguishing between async and normal operation
792 */
793DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
794 uint64_t uOffset, const void *pvBuf,
795 size_t cbToWrite, uint64_t uLBA,
796 uint32_t *pcbMarkerData)
797{
798 int rc;
799 PRTZIPCOMP pZip = NULL;
800 VMDKCOMPRESSIO DeflateState;
801
802 DeflateState.pImage = pImage;
803 DeflateState.iOffset = -1;
804 DeflateState.cbCompGrain = pExtent->cbCompGrain;
805 DeflateState.pvCompGrain = pExtent->pvCompGrain;
806
807 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
808 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
809 if (RT_FAILURE(rc))
810 return rc;
811 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
812 if (RT_SUCCESS(rc))
813 rc = RTZipCompFinish(pZip);
814 RTZipCompDestroy(pZip);
815 if (RT_SUCCESS(rc))
816 {
817 Assert( DeflateState.iOffset > 0
818 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
819
820 /* pad with zeroes to get to a full sector size */
821 uint32_t uSize = DeflateState.iOffset;
822 if (uSize % 512)
823 {
824 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
825 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
826 uSizeAlign - uSize);
827 uSize = uSizeAlign;
828 }
829
830 if (pcbMarkerData)
831 *pcbMarkerData = uSize;
832
833 /* Compressed grain marker. Data follows immediately. */
834 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
835 pMarker->uSector = RT_H2LE_U64(uLBA);
836 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
837 - RT_OFFSETOF(VMDKMARKER, uType));
838 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
839 uOffset, pMarker, uSize);
840 if (RT_FAILURE(rc))
841 return rc;
842 }
843 return rc;
844}
845
846
847/**
848 * Internal: check if all files are closed, prevent leaking resources.
849 */
850static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
851{
852 int rc = VINF_SUCCESS, rc2;
853 PVMDKFILE pVmdkFile;
854
855 Assert(pImage->pFiles == NULL);
856 for (pVmdkFile = pImage->pFiles;
857 pVmdkFile != NULL;
858 pVmdkFile = pVmdkFile->pNext)
859 {
860 LogRel(("VMDK: leaking reference to file \"%s\"\n",
861 pVmdkFile->pszFilename));
862 pImage->pFiles = pVmdkFile->pNext;
863
864 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
865
866 if (RT_SUCCESS(rc))
867 rc = rc2;
868 }
869 return rc;
870}
871
872/**
873 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
874 * critical non-ASCII characters.
875 */
876static char *vmdkEncodeString(const char *psz)
877{
878 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
879 char *pszDst = szEnc;
880
881 AssertPtr(psz);
882
883 for (; *psz; psz = RTStrNextCp(psz))
884 {
885 char *pszDstPrev = pszDst;
886 RTUNICP Cp = RTStrGetCp(psz);
887 if (Cp == '\\')
888 {
889 pszDst = RTStrPutCp(pszDst, Cp);
890 pszDst = RTStrPutCp(pszDst, Cp);
891 }
892 else if (Cp == '\n')
893 {
894 pszDst = RTStrPutCp(pszDst, '\\');
895 pszDst = RTStrPutCp(pszDst, 'n');
896 }
897 else if (Cp == '\r')
898 {
899 pszDst = RTStrPutCp(pszDst, '\\');
900 pszDst = RTStrPutCp(pszDst, 'r');
901 }
902 else
903 pszDst = RTStrPutCp(pszDst, Cp);
904 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
905 {
906 pszDst = pszDstPrev;
907 break;
908 }
909 }
910 *pszDst = '\0';
911 return RTStrDup(szEnc);
912}
913
914/**
915 * Internal: decode a string and store it into the specified string.
916 */
917static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
918{
919 int rc = VINF_SUCCESS;
920 char szBuf[4];
921
922 if (!cb)
923 return VERR_BUFFER_OVERFLOW;
924
925 AssertPtr(psz);
926
927 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
928 {
929 char *pszDst = szBuf;
930 RTUNICP Cp = RTStrGetCp(pszEncoded);
931 if (Cp == '\\')
932 {
933 pszEncoded = RTStrNextCp(pszEncoded);
934 RTUNICP CpQ = RTStrGetCp(pszEncoded);
935 if (CpQ == 'n')
936 RTStrPutCp(pszDst, '\n');
937 else if (CpQ == 'r')
938 RTStrPutCp(pszDst, '\r');
939 else if (CpQ == '\0')
940 {
941 rc = VERR_VD_VMDK_INVALID_HEADER;
942 break;
943 }
944 else
945 RTStrPutCp(pszDst, CpQ);
946 }
947 else
948 pszDst = RTStrPutCp(pszDst, Cp);
949
950 /* Need to leave space for terminating NUL. */
951 if ((size_t)(pszDst - szBuf) + 1 >= cb)
952 {
953 rc = VERR_BUFFER_OVERFLOW;
954 break;
955 }
956 memcpy(psz, szBuf, pszDst - szBuf);
957 psz += pszDst - szBuf;
958 }
959 *psz = '\0';
960 return rc;
961}
962
963/**
964 * Internal: free all buffers associated with grain directories.
965 */
966static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
967{
968 if (pExtent->pGD)
969 {
970 RTMemFree(pExtent->pGD);
971 pExtent->pGD = NULL;
972 }
973 if (pExtent->pRGD)
974 {
975 RTMemFree(pExtent->pRGD);
976 pExtent->pRGD = NULL;
977 }
978}
979
980/**
981 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
982 * images.
983 */
984static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
985{
986 int rc = VINF_SUCCESS;
987
988 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
989 {
990 /* streamOptimized extents need a compressed grain buffer, which must
991 * be big enough to hold uncompressible data (which needs ~8 bytes
992 * more than the uncompressed data), the marker and padding. */
993 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
994 + 8 + sizeof(VMDKMARKER), 512);
995 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
996 if (RT_LIKELY(pExtent->pvCompGrain))
997 {
998 /* streamOptimized extents need a decompressed grain buffer. */
999 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1000 if (!pExtent->pvGrain)
1001 rc = VERR_NO_MEMORY;
1002 }
1003 else
1004 rc = VERR_NO_MEMORY;
1005 }
1006
1007 if (RT_FAILURE(rc))
1008 vmdkFreeStreamBuffers(pExtent);
1009 return rc;
1010}
1011
1012/**
1013 * Internal: allocate all buffers associated with grain directories.
1014 */
1015static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1016{
1017 RT_NOREF1(pImage);
1018 int rc = VINF_SUCCESS;
1019 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1020
1021 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1022 if (RT_LIKELY(pExtent->pGD))
1023 {
1024 if (pExtent->uSectorRGD)
1025 {
1026 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1027 if (RT_UNLIKELY(!pExtent->pRGD))
1028 rc = VERR_NO_MEMORY;
1029 }
1030 }
1031 else
1032 rc = VERR_NO_MEMORY;
1033
1034 if (RT_FAILURE(rc))
1035 vmdkFreeGrainDirectory(pExtent);
1036 return rc;
1037}
1038
1039/**
1040 * Converts the grain directory from little to host endianess.
1041 *
1042 * @returns nothing.
1043 * @param pGD The grain directory.
1044 * @param cGDEntries Number of entries in the grain directory to convert.
1045 */
1046DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1047{
1048 uint32_t *pGDTmp = pGD;
1049
1050 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1051 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1052}
1053
1054/**
1055 * Read the grain directory and allocated grain tables verifying them against
1056 * their back up copies if available.
1057 *
1058 * @returns VBox status code.
1059 * @param pImage Image instance data.
1060 * @param pExtent The VMDK extent.
1061 */
1062static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1063{
1064 int rc = VINF_SUCCESS;
1065 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1066
1067 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1068 && pExtent->uSectorGD != VMDK_GD_AT_END
1069 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1070
1071 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1072 if (RT_SUCCESS(rc))
1073 {
1074 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1075 * but in reality they are not compressed. */
1076 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1077 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1078 pExtent->pGD, cbGD);
1079 if (RT_SUCCESS(rc))
1080 {
1081 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1082
1083 if ( pExtent->uSectorRGD
1084 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1085 {
1086 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1087 * but in reality they are not compressed. */
1088 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1089 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1090 pExtent->pRGD, cbGD);
1091 if (RT_SUCCESS(rc))
1092 {
1093 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1094
1095 /* Check grain table and redundant grain table for consistency. */
1096 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1097 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1098 size_t cbGTBuffersMax = _1M;
1099
1100 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1101 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1102
1103 if ( !pTmpGT1
1104 || !pTmpGT2)
1105 rc = VERR_NO_MEMORY;
1106
1107 size_t i = 0;
1108 uint32_t *pGDTmp = pExtent->pGD;
1109 uint32_t *pRGDTmp = pExtent->pRGD;
1110
1111 /* Loop through all entries. */
1112 while (i < pExtent->cGDEntries)
1113 {
1114 uint32_t uGTStart = *pGDTmp;
1115 uint32_t uRGTStart = *pRGDTmp;
1116 size_t cbGTRead = cbGT;
1117
1118 /* If no grain table is allocated skip the entry. */
1119 if (*pGDTmp == 0 && *pRGDTmp == 0)
1120 {
1121 i++;
1122 continue;
1123 }
1124
1125 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1126 {
1127 /* Just one grain directory entry refers to a not yet allocated
1128 * grain table or both grain directory copies refer to the same
1129 * grain table. Not allowed. */
1130 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1131 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1132 break;
1133 }
1134
1135 i++;
1136 pGDTmp++;
1137 pRGDTmp++;
1138
1139 /*
1140 * Read a few tables at once if adjacent to decrease the number
1141 * of I/O requests. Read at maximum 1MB at once.
1142 */
1143 while ( i < pExtent->cGDEntries
1144 && cbGTRead < cbGTBuffersMax)
1145 {
1146 /* If no grain table is allocated skip the entry. */
1147 if (*pGDTmp == 0 && *pRGDTmp == 0)
1148 {
1149 i++;
1150 continue;
1151 }
1152
1153 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1154 {
1155 /* Just one grain directory entry refers to a not yet allocated
1156 * grain table or both grain directory copies refer to the same
1157 * grain table. Not allowed. */
1158 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1159 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1160 break;
1161 }
1162
1163 /* Check that the start offsets are adjacent.*/
1164 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1165 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1166 break;
1167
1168 i++;
1169 pGDTmp++;
1170 pRGDTmp++;
1171 cbGTRead += cbGT;
1172 }
1173
1174 /* Increase buffers if required. */
1175 if ( RT_SUCCESS(rc)
1176 && cbGTBuffers < cbGTRead)
1177 {
1178 uint32_t *pTmp;
1179 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1180 if (pTmp)
1181 {
1182 pTmpGT1 = pTmp;
1183 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1184 if (pTmp)
1185 pTmpGT2 = pTmp;
1186 else
1187 rc = VERR_NO_MEMORY;
1188 }
1189 else
1190 rc = VERR_NO_MEMORY;
1191
1192 if (rc == VERR_NO_MEMORY)
1193 {
1194 /* Reset to the old values. */
1195 rc = VINF_SUCCESS;
1196 i -= cbGTRead / cbGT;
1197 cbGTRead = cbGT;
1198
1199 /* Don't try to increase the buffer again in the next run. */
1200 cbGTBuffersMax = cbGTBuffers;
1201 }
1202 }
1203
1204 if (RT_SUCCESS(rc))
1205 {
1206 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1207 * but in reality they are not compressed. */
1208 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1209 VMDK_SECTOR2BYTE(uGTStart),
1210 pTmpGT1, cbGTRead);
1211 if (RT_FAILURE(rc))
1212 {
1213 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1214 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1215 break;
1216 }
1217 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1218 * but in reality they are not compressed. */
1219 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1220 VMDK_SECTOR2BYTE(uRGTStart),
1221 pTmpGT2, cbGTRead);
1222 if (RT_FAILURE(rc))
1223 {
1224 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1225 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1226 break;
1227 }
1228 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1229 {
1230 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1231 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1232 break;
1233 }
1234 }
1235 } /* while (i < pExtent->cGDEntries) */
1236
1237 /** @todo figure out what to do for unclean VMDKs. */
1238 if (pTmpGT1)
1239 RTMemFree(pTmpGT1);
1240 if (pTmpGT2)
1241 RTMemFree(pTmpGT2);
1242 }
1243 else
1244 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1245 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1246 }
1247 }
1248 else
1249 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1250 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1251 }
1252
1253 if (RT_FAILURE(rc))
1254 vmdkFreeGrainDirectory(pExtent);
1255 return rc;
1256}
1257
1258/**
1259 * Creates a new grain directory for the given extent at the given start sector.
1260 *
1261 * @returns VBox status code.
1262 * @param pImage Image instance data.
1263 * @param pExtent The VMDK extent.
1264 * @param uStartSector Where the grain directory should be stored in the image.
1265 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1266 */
1267static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1268 uint64_t uStartSector, bool fPreAlloc)
1269{
1270 int rc = VINF_SUCCESS;
1271 unsigned i;
1272 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1273 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1274 size_t cbGTRounded;
1275 uint64_t cbOverhead;
1276
1277 if (fPreAlloc)
1278 {
1279 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1280 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1281 }
1282 else
1283 {
1284 /* Use a dummy start sector for layout computation. */
1285 if (uStartSector == VMDK_GD_AT_END)
1286 uStartSector = 1;
1287 cbGTRounded = 0;
1288 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1289 }
1290
1291 /* For streamOptimized extents there is only one grain directory,
1292 * and for all others take redundant grain directory into account. */
1293 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1294 {
1295 cbOverhead = RT_ALIGN_64(cbOverhead,
1296 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1297 }
1298 else
1299 {
1300 cbOverhead += cbGDRounded + cbGTRounded;
1301 cbOverhead = RT_ALIGN_64(cbOverhead,
1302 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1303 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1304 }
1305
1306 if (RT_SUCCESS(rc))
1307 {
1308 pExtent->uAppendPosition = cbOverhead;
1309 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1310
1311 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1312 {
1313 pExtent->uSectorRGD = 0;
1314 pExtent->uSectorGD = uStartSector;
1315 }
1316 else
1317 {
1318 pExtent->uSectorRGD = uStartSector;
1319 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1320 }
1321
1322 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1323 if (RT_SUCCESS(rc))
1324 {
1325 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1326 if ( RT_SUCCESS(rc)
1327 && fPreAlloc)
1328 {
1329 uint32_t uGTSectorLE;
1330 uint64_t uOffsetSectors;
1331
1332 if (pExtent->pRGD)
1333 {
1334 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1335 for (i = 0; i < pExtent->cGDEntries; i++)
1336 {
1337 pExtent->pRGD[i] = uOffsetSectors;
1338 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1339 /* Write the redundant grain directory entry to disk. */
1340 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1341 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1342 &uGTSectorLE, sizeof(uGTSectorLE));
1343 if (RT_FAILURE(rc))
1344 {
1345 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1346 break;
1347 }
1348 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1349 }
1350 }
1351
1352 if (RT_SUCCESS(rc))
1353 {
1354 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1355 for (i = 0; i < pExtent->cGDEntries; i++)
1356 {
1357 pExtent->pGD[i] = uOffsetSectors;
1358 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1359 /* Write the grain directory entry to disk. */
1360 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1361 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1362 &uGTSectorLE, sizeof(uGTSectorLE));
1363 if (RT_FAILURE(rc))
1364 {
1365 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1366 break;
1367 }
1368 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1369 }
1370 }
1371 }
1372 }
1373 }
1374
1375 if (RT_FAILURE(rc))
1376 vmdkFreeGrainDirectory(pExtent);
1377 return rc;
1378}
1379
1380/**
1381 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1382 * free.
1383 */
1384static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1385 char **ppszUnquoted, char **ppszNext)
1386{
1387 const char *pszStart = pszStr;
1388 char *pszQ;
1389 char *pszUnquoted;
1390
1391 /* Skip over whitespace. */
1392 while (*pszStr == ' ' || *pszStr == '\t')
1393 pszStr++;
1394
1395 if (*pszStr != '"')
1396 {
1397 pszQ = (char *)pszStr;
1398 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1399 pszQ++;
1400 }
1401 else
1402 {
1403 pszStr++;
1404 pszQ = (char *)strchr(pszStr, '"');
1405 if (pszQ == NULL)
1406 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1407 pImage->pszFilename, pszStart);
1408 }
1409
1410 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1411 if (!pszUnquoted)
1412 return VERR_NO_MEMORY;
1413 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1414 pszUnquoted[pszQ - pszStr] = '\0';
1415 *ppszUnquoted = pszUnquoted;
1416 if (ppszNext)
1417 *ppszNext = pszQ + 1;
1418 return VINF_SUCCESS;
1419}
1420
1421static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1422 const char *pszLine)
1423{
1424 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1425 ssize_t cbDiff = strlen(pszLine) + 1;
1426
1427 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1428 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1429 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1430
1431 memcpy(pEnd, pszLine, cbDiff);
1432 pDescriptor->cLines++;
1433 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1434 pDescriptor->fDirty = true;
1435
1436 return VINF_SUCCESS;
1437}
1438
1439static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1440 const char *pszKey, const char **ppszValue)
1441{
1442 size_t cbKey = strlen(pszKey);
1443 const char *pszValue;
1444
1445 while (uStart != 0)
1446 {
1447 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1448 {
1449 /* Key matches, check for a '=' (preceded by whitespace). */
1450 pszValue = pDescriptor->aLines[uStart] + cbKey;
1451 while (*pszValue == ' ' || *pszValue == '\t')
1452 pszValue++;
1453 if (*pszValue == '=')
1454 {
1455 *ppszValue = pszValue + 1;
1456 break;
1457 }
1458 }
1459 uStart = pDescriptor->aNextLines[uStart];
1460 }
1461 return !!uStart;
1462}
1463
1464static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1465 unsigned uStart,
1466 const char *pszKey, const char *pszValue)
1467{
1468 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1469 size_t cbKey = strlen(pszKey);
1470 unsigned uLast = 0;
1471
1472 while (uStart != 0)
1473 {
1474 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1475 {
1476 /* Key matches, check for a '=' (preceded by whitespace). */
1477 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1478 while (*pszTmp == ' ' || *pszTmp == '\t')
1479 pszTmp++;
1480 if (*pszTmp == '=')
1481 {
1482 pszTmp++;
1483 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1484 * bloat and potentially out of space error? */
1485 while (*pszTmp == ' ' || *pszTmp == '\t')
1486 pszTmp++;
1487 break;
1488 }
1489 }
1490 if (!pDescriptor->aNextLines[uStart])
1491 uLast = uStart;
1492 uStart = pDescriptor->aNextLines[uStart];
1493 }
1494 if (uStart)
1495 {
1496 if (pszValue)
1497 {
1498 /* Key already exists, replace existing value. */
1499 size_t cbOldVal = strlen(pszTmp);
1500 size_t cbNewVal = strlen(pszValue);
1501 ssize_t cbDiff = cbNewVal - cbOldVal;
1502 /* Check for buffer overflow. */
1503 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1504 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1505 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1506
1507 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1508 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1509 memcpy(pszTmp, pszValue, cbNewVal + 1);
1510 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1511 pDescriptor->aLines[i] += cbDiff;
1512 }
1513 else
1514 {
1515 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1516 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1517 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1518 {
1519 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1520 if (pDescriptor->aNextLines[i])
1521 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1522 else
1523 pDescriptor->aNextLines[i-1] = 0;
1524 }
1525 pDescriptor->cLines--;
1526 /* Adjust starting line numbers of following descriptor sections. */
1527 if (uStart < pDescriptor->uFirstExtent)
1528 pDescriptor->uFirstExtent--;
1529 if (uStart < pDescriptor->uFirstDDB)
1530 pDescriptor->uFirstDDB--;
1531 }
1532 }
1533 else
1534 {
1535 /* Key doesn't exist, append after the last entry in this category. */
1536 if (!pszValue)
1537 {
1538 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1539 return VINF_SUCCESS;
1540 }
1541 cbKey = strlen(pszKey);
1542 size_t cbValue = strlen(pszValue);
1543 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1544 /* Check for buffer overflow. */
1545 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1546 || ( pDescriptor->aLines[pDescriptor->cLines]
1547 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1548 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1549 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1550 {
1551 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1552 if (pDescriptor->aNextLines[i - 1])
1553 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1554 else
1555 pDescriptor->aNextLines[i] = 0;
1556 }
1557 uStart = uLast + 1;
1558 pDescriptor->aNextLines[uLast] = uStart;
1559 pDescriptor->aNextLines[uStart] = 0;
1560 pDescriptor->cLines++;
1561 pszTmp = pDescriptor->aLines[uStart];
1562 memmove(pszTmp + cbDiff, pszTmp,
1563 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1564 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1565 pDescriptor->aLines[uStart][cbKey] = '=';
1566 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1567 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1568 pDescriptor->aLines[i] += cbDiff;
1569
1570 /* Adjust starting line numbers of following descriptor sections. */
1571 if (uStart <= pDescriptor->uFirstExtent)
1572 pDescriptor->uFirstExtent++;
1573 if (uStart <= pDescriptor->uFirstDDB)
1574 pDescriptor->uFirstDDB++;
1575 }
1576 pDescriptor->fDirty = true;
1577 return VINF_SUCCESS;
1578}
1579
1580static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1581 uint32_t *puValue)
1582{
1583 const char *pszValue;
1584
1585 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1586 &pszValue))
1587 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1588 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1589}
1590
1591/**
1592 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1593 * free.
1594 */
1595static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1596 const char *pszKey, char **ppszValue)
1597{
1598 const char *pszValue;
1599 char *pszValueUnquoted;
1600
1601 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1602 &pszValue))
1603 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1604 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1605 if (RT_FAILURE(rc))
1606 return rc;
1607 *ppszValue = pszValueUnquoted;
1608 return rc;
1609}
1610
1611static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1612 const char *pszKey, const char *pszValue)
1613{
1614 char *pszValueQuoted;
1615
1616 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1617 if (!pszValueQuoted)
1618 return VERR_NO_STR_MEMORY;
1619 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1620 pszValueQuoted);
1621 RTStrFree(pszValueQuoted);
1622 return rc;
1623}
1624
1625static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1626 PVMDKDESCRIPTOR pDescriptor)
1627{
1628 RT_NOREF1(pImage);
1629 unsigned uEntry = pDescriptor->uFirstExtent;
1630 ssize_t cbDiff;
1631
1632 if (!uEntry)
1633 return;
1634
1635 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1636 /* Move everything including \0 in the entry marking the end of buffer. */
1637 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1638 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1639 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1640 {
1641 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1642 if (pDescriptor->aNextLines[i])
1643 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1644 else
1645 pDescriptor->aNextLines[i - 1] = 0;
1646 }
1647 pDescriptor->cLines--;
1648 if (pDescriptor->uFirstDDB)
1649 pDescriptor->uFirstDDB--;
1650
1651 return;
1652}
1653
1654static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1655 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1656 VMDKETYPE enmType, const char *pszBasename,
1657 uint64_t uSectorOffset)
1658{
1659 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1660 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1661 char *pszTmp;
1662 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1663 char szExt[1024];
1664 ssize_t cbDiff;
1665
1666 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1667 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1668
1669 /* Find last entry in extent description. */
1670 while (uStart)
1671 {
1672 if (!pDescriptor->aNextLines[uStart])
1673 uLast = uStart;
1674 uStart = pDescriptor->aNextLines[uStart];
1675 }
1676
1677 if (enmType == VMDKETYPE_ZERO)
1678 {
1679 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1680 cNominalSectors, apszType[enmType]);
1681 }
1682 else if (enmType == VMDKETYPE_FLAT)
1683 {
1684 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1685 apszAccess[enmAccess], cNominalSectors,
1686 apszType[enmType], pszBasename, uSectorOffset);
1687 }
1688 else
1689 {
1690 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1691 apszAccess[enmAccess], cNominalSectors,
1692 apszType[enmType], pszBasename);
1693 }
1694 cbDiff = strlen(szExt) + 1;
1695
1696 /* Check for buffer overflow. */
1697 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1698 || ( pDescriptor->aLines[pDescriptor->cLines]
1699 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1700 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1701
1702 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1703 {
1704 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1705 if (pDescriptor->aNextLines[i - 1])
1706 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1707 else
1708 pDescriptor->aNextLines[i] = 0;
1709 }
1710 uStart = uLast + 1;
1711 pDescriptor->aNextLines[uLast] = uStart;
1712 pDescriptor->aNextLines[uStart] = 0;
1713 pDescriptor->cLines++;
1714 pszTmp = pDescriptor->aLines[uStart];
1715 memmove(pszTmp + cbDiff, pszTmp,
1716 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1717 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1718 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1719 pDescriptor->aLines[i] += cbDiff;
1720
1721 /* Adjust starting line numbers of following descriptor sections. */
1722 if (uStart <= pDescriptor->uFirstDDB)
1723 pDescriptor->uFirstDDB++;
1724
1725 pDescriptor->fDirty = true;
1726 return VINF_SUCCESS;
1727}
1728
1729/**
1730 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1731 * free.
1732 */
1733static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1734 const char *pszKey, char **ppszValue)
1735{
1736 const char *pszValue;
1737 char *pszValueUnquoted;
1738
1739 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1740 &pszValue))
1741 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1742 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1743 if (RT_FAILURE(rc))
1744 return rc;
1745 *ppszValue = pszValueUnquoted;
1746 return rc;
1747}
1748
1749static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1750 const char *pszKey, uint32_t *puValue)
1751{
1752 const char *pszValue;
1753 char *pszValueUnquoted;
1754
1755 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1756 &pszValue))
1757 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1758 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1759 if (RT_FAILURE(rc))
1760 return rc;
1761 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1762 RTMemTmpFree(pszValueUnquoted);
1763 return rc;
1764}
1765
1766static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1767 const char *pszKey, PRTUUID pUuid)
1768{
1769 const char *pszValue;
1770 char *pszValueUnquoted;
1771
1772 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1773 &pszValue))
1774 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1775 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1776 if (RT_FAILURE(rc))
1777 return rc;
1778 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1779 RTMemTmpFree(pszValueUnquoted);
1780 return rc;
1781}
1782
1783static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1784 const char *pszKey, const char *pszVal)
1785{
1786 int rc;
1787 char *pszValQuoted;
1788
1789 if (pszVal)
1790 {
1791 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1792 if (!pszValQuoted)
1793 return VERR_NO_STR_MEMORY;
1794 }
1795 else
1796 pszValQuoted = NULL;
1797 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1798 pszValQuoted);
1799 if (pszValQuoted)
1800 RTStrFree(pszValQuoted);
1801 return rc;
1802}
1803
1804static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1805 const char *pszKey, PCRTUUID pUuid)
1806{
1807 char *pszUuid;
1808
1809 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1810 if (!pszUuid)
1811 return VERR_NO_STR_MEMORY;
1812 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1813 pszUuid);
1814 RTStrFree(pszUuid);
1815 return rc;
1816}
1817
1818static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1819 const char *pszKey, uint32_t uValue)
1820{
1821 char *pszValue;
1822
1823 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1824 if (!pszValue)
1825 return VERR_NO_STR_MEMORY;
1826 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1827 pszValue);
1828 RTStrFree(pszValue);
1829 return rc;
1830}
1831
1832/**
1833 * Splits the descriptor data into individual lines checking for correct line
1834 * endings and descriptor size.
1835 *
1836 * @returns VBox status code.
1837 * @param pImage The image instance.
1838 * @param pDesc The descriptor.
1839 * @param pszTmp The raw descriptor data from the image.
1840 */
1841static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
1842{
1843 unsigned cLine = 0;
1844 int rc = VINF_SUCCESS;
1845
1846 while ( RT_SUCCESS(rc)
1847 && *pszTmp != '\0')
1848 {
1849 pDesc->aLines[cLine++] = pszTmp;
1850 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1851 {
1852 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1853 rc = VERR_VD_VMDK_INVALID_HEADER;
1854 break;
1855 }
1856
1857 while (*pszTmp != '\0' && *pszTmp != '\n')
1858 {
1859 if (*pszTmp == '\r')
1860 {
1861 if (*(pszTmp + 1) != '\n')
1862 {
1863 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1864 break;
1865 }
1866 else
1867 {
1868 /* Get rid of CR character. */
1869 *pszTmp = '\0';
1870 }
1871 }
1872 pszTmp++;
1873 }
1874
1875 if (RT_FAILURE(rc))
1876 break;
1877
1878 /* Get rid of LF character. */
1879 if (*pszTmp == '\n')
1880 {
1881 *pszTmp = '\0';
1882 pszTmp++;
1883 }
1884 }
1885
1886 if (RT_SUCCESS(rc))
1887 {
1888 pDesc->cLines = cLine;
1889 /* Pointer right after the end of the used part of the buffer. */
1890 pDesc->aLines[cLine] = pszTmp;
1891 }
1892
1893 return rc;
1894}
1895
1896static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1897 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1898{
1899 pDescriptor->cbDescAlloc = cbDescData;
1900 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
1901 if (RT_SUCCESS(rc))
1902 {
1903 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1904 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
1905 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1906 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1907 else
1908 {
1909 unsigned uLastNonEmptyLine = 0;
1910
1911 /* Initialize those, because we need to be able to reopen an image. */
1912 pDescriptor->uFirstDesc = 0;
1913 pDescriptor->uFirstExtent = 0;
1914 pDescriptor->uFirstDDB = 0;
1915 for (unsigned i = 0; i < pDescriptor->cLines; i++)
1916 {
1917 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1918 {
1919 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1920 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1921 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1922 {
1923 /* An extent descriptor. */
1924 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1925 {
1926 /* Incorrect ordering of entries. */
1927 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1928 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1929 break;
1930 }
1931 if (!pDescriptor->uFirstExtent)
1932 {
1933 pDescriptor->uFirstExtent = i;
1934 uLastNonEmptyLine = 0;
1935 }
1936 }
1937 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1938 {
1939 /* A disk database entry. */
1940 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1941 {
1942 /* Incorrect ordering of entries. */
1943 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1944 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1945 break;
1946 }
1947 if (!pDescriptor->uFirstDDB)
1948 {
1949 pDescriptor->uFirstDDB = i;
1950 uLastNonEmptyLine = 0;
1951 }
1952 }
1953 else
1954 {
1955 /* A normal entry. */
1956 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1957 {
1958 /* Incorrect ordering of entries. */
1959 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1960 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1961 break;
1962 }
1963 if (!pDescriptor->uFirstDesc)
1964 {
1965 pDescriptor->uFirstDesc = i;
1966 uLastNonEmptyLine = 0;
1967 }
1968 }
1969 if (uLastNonEmptyLine)
1970 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1971 uLastNonEmptyLine = i;
1972 }
1973 }
1974 }
1975 }
1976
1977 return rc;
1978}
1979
1980static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
1981 PCVDGEOMETRY pPCHSGeometry)
1982{
1983 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1984 VMDK_DDB_GEO_PCHS_CYLINDERS,
1985 pPCHSGeometry->cCylinders);
1986 if (RT_FAILURE(rc))
1987 return rc;
1988 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1989 VMDK_DDB_GEO_PCHS_HEADS,
1990 pPCHSGeometry->cHeads);
1991 if (RT_FAILURE(rc))
1992 return rc;
1993 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1994 VMDK_DDB_GEO_PCHS_SECTORS,
1995 pPCHSGeometry->cSectors);
1996 return rc;
1997}
1998
1999static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2000 PCVDGEOMETRY pLCHSGeometry)
2001{
2002 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2003 VMDK_DDB_GEO_LCHS_CYLINDERS,
2004 pLCHSGeometry->cCylinders);
2005 if (RT_FAILURE(rc))
2006 return rc;
2007 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2008 VMDK_DDB_GEO_LCHS_HEADS,
2009
2010 pLCHSGeometry->cHeads);
2011 if (RT_FAILURE(rc))
2012 return rc;
2013 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2014 VMDK_DDB_GEO_LCHS_SECTORS,
2015 pLCHSGeometry->cSectors);
2016 return rc;
2017}
2018
2019static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2020 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2021{
2022 pDescriptor->uFirstDesc = 0;
2023 pDescriptor->uFirstExtent = 0;
2024 pDescriptor->uFirstDDB = 0;
2025 pDescriptor->cLines = 0;
2026 pDescriptor->cbDescAlloc = cbDescData;
2027 pDescriptor->fDirty = false;
2028 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2029 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2030
2031 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2032 if (RT_SUCCESS(rc))
2033 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2034 if (RT_SUCCESS(rc))
2035 {
2036 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2037 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2038 }
2039 if (RT_SUCCESS(rc))
2040 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2041 if (RT_SUCCESS(rc))
2042 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2043 if (RT_SUCCESS(rc))
2044 {
2045 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2046 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2047 }
2048 if (RT_SUCCESS(rc))
2049 {
2050 /* The trailing space is created by VMware, too. */
2051 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2052 }
2053 if (RT_SUCCESS(rc))
2054 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2055 if (RT_SUCCESS(rc))
2056 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2057 if (RT_SUCCESS(rc))
2058 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2059 if (RT_SUCCESS(rc))
2060 {
2061 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2062
2063 /* Now that the framework is in place, use the normal functions to insert
2064 * the remaining keys. */
2065 char szBuf[9];
2066 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2067 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2068 "CID", szBuf);
2069 }
2070 if (RT_SUCCESS(rc))
2071 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2072 "parentCID", "ffffffff");
2073 if (RT_SUCCESS(rc))
2074 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2075
2076 return rc;
2077}
2078
2079static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2080{
2081 int rc;
2082 unsigned cExtents;
2083 unsigned uLine;
2084 unsigned i;
2085
2086 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2087 &pImage->Descriptor);
2088 if (RT_FAILURE(rc))
2089 return rc;
2090
2091 /* Check version, must be 1. */
2092 uint32_t uVersion;
2093 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2094 if (RT_FAILURE(rc))
2095 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2096 if (uVersion != 1)
2097 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2098
2099 /* Get image creation type and determine image flags. */
2100 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2101 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2102 &pszCreateType);
2103 if (RT_FAILURE(rc))
2104 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2105 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2106 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2107 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2108 else if ( !strcmp(pszCreateType, "partitionedDevice")
2109 || !strcmp(pszCreateType, "fullDevice"))
2110 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2111 else if (!strcmp(pszCreateType, "streamOptimized"))
2112 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2113 else if (!strcmp(pszCreateType, "vmfs"))
2114 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2115 RTMemTmpFree(pszCreateType);
2116
2117 /* Count the number of extent config entries. */
2118 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2119 uLine != 0;
2120 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2121 /* nothing */;
2122
2123 if (!pImage->pDescData && cExtents != 1)
2124 {
2125 /* Monolithic image, must have only one extent (already opened). */
2126 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2127 }
2128
2129 if (pImage->pDescData)
2130 {
2131 /* Non-monolithic image, extents need to be allocated. */
2132 rc = vmdkCreateExtents(pImage, cExtents);
2133 if (RT_FAILURE(rc))
2134 return rc;
2135 }
2136
2137 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2138 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2139 {
2140 char *pszLine = pImage->Descriptor.aLines[uLine];
2141
2142 /* Access type of the extent. */
2143 if (!strncmp(pszLine, "RW", 2))
2144 {
2145 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2146 pszLine += 2;
2147 }
2148 else if (!strncmp(pszLine, "RDONLY", 6))
2149 {
2150 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2151 pszLine += 6;
2152 }
2153 else if (!strncmp(pszLine, "NOACCESS", 8))
2154 {
2155 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2156 pszLine += 8;
2157 }
2158 else
2159 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2160 if (*pszLine++ != ' ')
2161 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2162
2163 /* Nominal size of the extent. */
2164 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2165 &pImage->pExtents[i].cNominalSectors);
2166 if (RT_FAILURE(rc))
2167 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2168 if (*pszLine++ != ' ')
2169 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2170
2171 /* Type of the extent. */
2172 if (!strncmp(pszLine, "SPARSE", 6))
2173 {
2174 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2175 pszLine += 6;
2176 }
2177 else if (!strncmp(pszLine, "FLAT", 4))
2178 {
2179 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2180 pszLine += 4;
2181 }
2182 else if (!strncmp(pszLine, "ZERO", 4))
2183 {
2184 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2185 pszLine += 4;
2186 }
2187 else if (!strncmp(pszLine, "VMFS", 4))
2188 {
2189 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2190 pszLine += 4;
2191 }
2192 else
2193 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2194
2195 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2196 {
2197 /* This one has no basename or offset. */
2198 if (*pszLine == ' ')
2199 pszLine++;
2200 if (*pszLine != '\0')
2201 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2202 pImage->pExtents[i].pszBasename = NULL;
2203 }
2204 else
2205 {
2206 /* All other extent types have basename and optional offset. */
2207 if (*pszLine++ != ' ')
2208 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2209
2210 /* Basename of the image. Surrounded by quotes. */
2211 char *pszBasename;
2212 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2213 if (RT_FAILURE(rc))
2214 return rc;
2215 pImage->pExtents[i].pszBasename = pszBasename;
2216 if (*pszLine == ' ')
2217 {
2218 pszLine++;
2219 if (*pszLine != '\0')
2220 {
2221 /* Optional offset in extent specified. */
2222 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2223 &pImage->pExtents[i].uSectorOffset);
2224 if (RT_FAILURE(rc))
2225 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2226 }
2227 }
2228
2229 if (*pszLine != '\0')
2230 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2231 }
2232 }
2233
2234 /* Determine PCHS geometry (autogenerate if necessary). */
2235 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2236 VMDK_DDB_GEO_PCHS_CYLINDERS,
2237 &pImage->PCHSGeometry.cCylinders);
2238 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2239 pImage->PCHSGeometry.cCylinders = 0;
2240 else if (RT_FAILURE(rc))
2241 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2242 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2243 VMDK_DDB_GEO_PCHS_HEADS,
2244 &pImage->PCHSGeometry.cHeads);
2245 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2246 pImage->PCHSGeometry.cHeads = 0;
2247 else if (RT_FAILURE(rc))
2248 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2249 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2250 VMDK_DDB_GEO_PCHS_SECTORS,
2251 &pImage->PCHSGeometry.cSectors);
2252 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2253 pImage->PCHSGeometry.cSectors = 0;
2254 else if (RT_FAILURE(rc))
2255 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2256 if ( pImage->PCHSGeometry.cCylinders == 0
2257 || pImage->PCHSGeometry.cHeads == 0
2258 || pImage->PCHSGeometry.cHeads > 16
2259 || pImage->PCHSGeometry.cSectors == 0
2260 || pImage->PCHSGeometry.cSectors > 63)
2261 {
2262 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2263 * as the total image size isn't known yet). */
2264 pImage->PCHSGeometry.cCylinders = 0;
2265 pImage->PCHSGeometry.cHeads = 16;
2266 pImage->PCHSGeometry.cSectors = 63;
2267 }
2268
2269 /* Determine LCHS geometry (set to 0 if not specified). */
2270 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2271 VMDK_DDB_GEO_LCHS_CYLINDERS,
2272 &pImage->LCHSGeometry.cCylinders);
2273 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2274 pImage->LCHSGeometry.cCylinders = 0;
2275 else if (RT_FAILURE(rc))
2276 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2277 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2278 VMDK_DDB_GEO_LCHS_HEADS,
2279 &pImage->LCHSGeometry.cHeads);
2280 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2281 pImage->LCHSGeometry.cHeads = 0;
2282 else if (RT_FAILURE(rc))
2283 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2284 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2285 VMDK_DDB_GEO_LCHS_SECTORS,
2286 &pImage->LCHSGeometry.cSectors);
2287 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2288 pImage->LCHSGeometry.cSectors = 0;
2289 else if (RT_FAILURE(rc))
2290 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2291 if ( pImage->LCHSGeometry.cCylinders == 0
2292 || pImage->LCHSGeometry.cHeads == 0
2293 || pImage->LCHSGeometry.cSectors == 0)
2294 {
2295 pImage->LCHSGeometry.cCylinders = 0;
2296 pImage->LCHSGeometry.cHeads = 0;
2297 pImage->LCHSGeometry.cSectors = 0;
2298 }
2299
2300 /* Get image UUID. */
2301 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2302 &pImage->ImageUuid);
2303 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2304 {
2305 /* Image without UUID. Probably created by VMware and not yet used
2306 * by VirtualBox. Can only be added for images opened in read/write
2307 * mode, so don't bother producing a sensible UUID otherwise. */
2308 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2309 RTUuidClear(&pImage->ImageUuid);
2310 else
2311 {
2312 rc = RTUuidCreate(&pImage->ImageUuid);
2313 if (RT_FAILURE(rc))
2314 return rc;
2315 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2316 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2317 if (RT_FAILURE(rc))
2318 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2319 }
2320 }
2321 else if (RT_FAILURE(rc))
2322 return rc;
2323
2324 /* Get image modification UUID. */
2325 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2326 VMDK_DDB_MODIFICATION_UUID,
2327 &pImage->ModificationUuid);
2328 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2329 {
2330 /* Image without UUID. Probably created by VMware and not yet used
2331 * by VirtualBox. Can only be added for images opened in read/write
2332 * mode, so don't bother producing a sensible UUID otherwise. */
2333 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2334 RTUuidClear(&pImage->ModificationUuid);
2335 else
2336 {
2337 rc = RTUuidCreate(&pImage->ModificationUuid);
2338 if (RT_FAILURE(rc))
2339 return rc;
2340 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2341 VMDK_DDB_MODIFICATION_UUID,
2342 &pImage->ModificationUuid);
2343 if (RT_FAILURE(rc))
2344 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2345 }
2346 }
2347 else if (RT_FAILURE(rc))
2348 return rc;
2349
2350 /* Get UUID of parent image. */
2351 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2352 &pImage->ParentUuid);
2353 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2354 {
2355 /* Image without UUID. Probably created by VMware and not yet used
2356 * by VirtualBox. Can only be added for images opened in read/write
2357 * mode, so don't bother producing a sensible UUID otherwise. */
2358 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2359 RTUuidClear(&pImage->ParentUuid);
2360 else
2361 {
2362 rc = RTUuidClear(&pImage->ParentUuid);
2363 if (RT_FAILURE(rc))
2364 return rc;
2365 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2366 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2367 if (RT_FAILURE(rc))
2368 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2369 }
2370 }
2371 else if (RT_FAILURE(rc))
2372 return rc;
2373
2374 /* Get parent image modification UUID. */
2375 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2376 VMDK_DDB_PARENT_MODIFICATION_UUID,
2377 &pImage->ParentModificationUuid);
2378 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2379 {
2380 /* Image without UUID. Probably created by VMware and not yet used
2381 * by VirtualBox. Can only be added for images opened in read/write
2382 * mode, so don't bother producing a sensible UUID otherwise. */
2383 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2384 RTUuidClear(&pImage->ParentModificationUuid);
2385 else
2386 {
2387 RTUuidClear(&pImage->ParentModificationUuid);
2388 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2389 VMDK_DDB_PARENT_MODIFICATION_UUID,
2390 &pImage->ParentModificationUuid);
2391 if (RT_FAILURE(rc))
2392 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2393 }
2394 }
2395 else if (RT_FAILURE(rc))
2396 return rc;
2397
2398 return VINF_SUCCESS;
2399}
2400
2401/**
2402 * Internal : Prepares the descriptor to write to the image.
2403 */
2404static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2405 void **ppvData, size_t *pcbData)
2406{
2407 int rc = VINF_SUCCESS;
2408
2409 /*
2410 * Allocate temporary descriptor buffer.
2411 * In case there is no limit allocate a default
2412 * and increase if required.
2413 */
2414 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2415 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2416 size_t offDescriptor = 0;
2417
2418 if (!pszDescriptor)
2419 return VERR_NO_MEMORY;
2420
2421 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2422 {
2423 const char *psz = pImage->Descriptor.aLines[i];
2424 size_t cb = strlen(psz);
2425
2426 /*
2427 * Increase the descriptor if there is no limit and
2428 * there is not enough room left for this line.
2429 */
2430 if (offDescriptor + cb + 1 > cbDescriptor)
2431 {
2432 if (cbLimit)
2433 {
2434 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2435 break;
2436 }
2437 else
2438 {
2439 char *pszDescriptorNew = NULL;
2440 LogFlow(("Increasing descriptor cache\n"));
2441
2442 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2443 if (!pszDescriptorNew)
2444 {
2445 rc = VERR_NO_MEMORY;
2446 break;
2447 }
2448 pszDescriptor = pszDescriptorNew;
2449 cbDescriptor += cb + 4 * _1K;
2450 }
2451 }
2452
2453 if (cb > 0)
2454 {
2455 memcpy(pszDescriptor + offDescriptor, psz, cb);
2456 offDescriptor += cb;
2457 }
2458
2459 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2460 offDescriptor++;
2461 }
2462
2463 if (RT_SUCCESS(rc))
2464 {
2465 *ppvData = pszDescriptor;
2466 *pcbData = offDescriptor;
2467 }
2468 else if (pszDescriptor)
2469 RTMemFree(pszDescriptor);
2470
2471 return rc;
2472}
2473
2474/**
2475 * Internal: write/update the descriptor part of the image.
2476 */
2477static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2478{
2479 int rc = VINF_SUCCESS;
2480 uint64_t cbLimit;
2481 uint64_t uOffset;
2482 PVMDKFILE pDescFile;
2483 void *pvDescriptor = NULL;
2484 size_t cbDescriptor;
2485
2486 if (pImage->pDescData)
2487 {
2488 /* Separate descriptor file. */
2489 uOffset = 0;
2490 cbLimit = 0;
2491 pDescFile = pImage->pFile;
2492 }
2493 else
2494 {
2495 /* Embedded descriptor file. */
2496 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2497 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2498 pDescFile = pImage->pExtents[0].pFile;
2499 }
2500 /* Bail out if there is no file to write to. */
2501 if (pDescFile == NULL)
2502 return VERR_INVALID_PARAMETER;
2503
2504 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2505 if (RT_SUCCESS(rc))
2506 {
2507 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2508 uOffset, pvDescriptor,
2509 cbLimit ? cbLimit : cbDescriptor,
2510 pIoCtx, NULL, NULL);
2511 if ( RT_FAILURE(rc)
2512 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2513 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2514 }
2515
2516 if (RT_SUCCESS(rc) && !cbLimit)
2517 {
2518 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2519 if (RT_FAILURE(rc))
2520 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2521 }
2522
2523 if (RT_SUCCESS(rc))
2524 pImage->Descriptor.fDirty = false;
2525
2526 if (pvDescriptor)
2527 RTMemFree(pvDescriptor);
2528 return rc;
2529
2530}
2531
2532/**
2533 * Internal: validate the consistency check values in a binary header.
2534 */
2535static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2536{
2537 int rc = VINF_SUCCESS;
2538 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2539 {
2540 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2541 return rc;
2542 }
2543 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2544 {
2545 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2546 return rc;
2547 }
2548 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2549 && ( pHeader->singleEndLineChar != '\n'
2550 || pHeader->nonEndLineChar != ' '
2551 || pHeader->doubleEndLineChar1 != '\r'
2552 || pHeader->doubleEndLineChar2 != '\n') )
2553 {
2554 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2555 return rc;
2556 }
2557 return rc;
2558}
2559
2560/**
2561 * Internal: read metadata belonging to an extent with binary header, i.e.
2562 * as found in monolithic files.
2563 */
2564static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2565 bool fMagicAlreadyRead)
2566{
2567 SparseExtentHeader Header;
2568 int rc;
2569
2570 if (!fMagicAlreadyRead)
2571 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2572 &Header, sizeof(Header));
2573 else
2574 {
2575 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2576 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2577 RT_OFFSETOF(SparseExtentHeader, version),
2578 &Header.version,
2579 sizeof(Header)
2580 - RT_OFFSETOF(SparseExtentHeader, version));
2581 }
2582
2583 if (RT_SUCCESS(rc))
2584 {
2585 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2586 if (RT_SUCCESS(rc))
2587 {
2588 uint64_t cbFile = 0;
2589
2590 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2591 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2592 pExtent->fFooter = true;
2593
2594 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2595 || ( pExtent->fFooter
2596 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2597 {
2598 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2599 if (RT_FAILURE(rc))
2600 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2601 }
2602
2603 if (RT_SUCCESS(rc))
2604 {
2605 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2606 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2607
2608 if ( pExtent->fFooter
2609 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2610 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2611 {
2612 /* Read the footer, which comes before the end-of-stream marker. */
2613 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2614 cbFile - 2*512, &Header,
2615 sizeof(Header));
2616 if (RT_FAILURE(rc))
2617 {
2618 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2619 rc = VERR_VD_VMDK_INVALID_HEADER;
2620 }
2621
2622 if (RT_SUCCESS(rc))
2623 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2624 /* Prohibit any writes to this extent. */
2625 pExtent->uAppendPosition = 0;
2626 }
2627
2628 if (RT_SUCCESS(rc))
2629 {
2630 pExtent->uVersion = RT_LE2H_U32(Header.version);
2631 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2632 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2633 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2634 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2635 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2636 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2637 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2638 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2639 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2640 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2641 {
2642 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2643 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2644 }
2645 else
2646 {
2647 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2648 pExtent->uSectorRGD = 0;
2649 }
2650
2651 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2652 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2653 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2654
2655 if ( RT_SUCCESS(rc)
2656 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2657 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2658 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2659 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2660 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2661 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2662
2663 if (RT_SUCCESS(rc))
2664 {
2665 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2666 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2667 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2668 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2669 else
2670 {
2671 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2672 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2673
2674 /* Fix up the number of descriptor sectors, as some flat images have
2675 * really just one, and this causes failures when inserting the UUID
2676 * values and other extra information. */
2677 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2678 {
2679 /* Do it the easy way - just fix it for flat images which have no
2680 * other complicated metadata which needs space too. */
2681 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2682 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2683 pExtent->cDescriptorSectors = 4;
2684 }
2685 }
2686 }
2687 }
2688 }
2689 }
2690 }
2691 else
2692 {
2693 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2694 rc = VERR_VD_VMDK_INVALID_HEADER;
2695 }
2696
2697 if (RT_FAILURE(rc))
2698 vmdkFreeExtentData(pImage, pExtent, false);
2699
2700 return rc;
2701}
2702
2703/**
2704 * Internal: read additional metadata belonging to an extent. For those
2705 * extents which have no additional metadata just verify the information.
2706 */
2707static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2708{
2709 int rc = VINF_SUCCESS;
2710
2711/* disabled the check as there are too many truncated vmdk images out there */
2712#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2713 uint64_t cbExtentSize;
2714 /* The image must be a multiple of a sector in size and contain the data
2715 * area (flat images only). If not, it means the image is at least
2716 * truncated, or even seriously garbled. */
2717 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2718 if (RT_FAILURE(rc))
2719 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2720 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2721 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2722 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2723 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2724#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2725 if ( RT_SUCCESS(rc)
2726 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2727 {
2728 /* The spec says that this must be a power of two and greater than 8,
2729 * but probably they meant not less than 8. */
2730 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2731 || pExtent->cSectorsPerGrain < 8)
2732 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2733 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2734 else
2735 {
2736 /* This code requires that a grain table must hold a power of two multiple
2737 * of the number of entries per GT cache entry. */
2738 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2739 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2740 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2741 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2742 else
2743 {
2744 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2745 if (RT_SUCCESS(rc))
2746 {
2747 /* Prohibit any writes to this streamOptimized extent. */
2748 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2749 pExtent->uAppendPosition = 0;
2750
2751 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2752 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2753 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2754 rc = vmdkReadGrainDirectory(pImage, pExtent);
2755 else
2756 {
2757 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2758 pExtent->cbGrainStreamRead = 0;
2759 }
2760 }
2761 }
2762 }
2763 }
2764
2765 if (RT_FAILURE(rc))
2766 vmdkFreeExtentData(pImage, pExtent, false);
2767
2768 return rc;
2769}
2770
2771/**
2772 * Internal: write/update the metadata for a sparse extent.
2773 */
2774static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2775 uint64_t uOffset, PVDIOCTX pIoCtx)
2776{
2777 SparseExtentHeader Header;
2778
2779 memset(&Header, '\0', sizeof(Header));
2780 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2781 Header.version = RT_H2LE_U32(pExtent->uVersion);
2782 Header.flags = RT_H2LE_U32(RT_BIT(0));
2783 if (pExtent->pRGD)
2784 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2785 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2786 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2787 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2788 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2789 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2790 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2791 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2792 if (pExtent->fFooter && uOffset == 0)
2793 {
2794 if (pExtent->pRGD)
2795 {
2796 Assert(pExtent->uSectorRGD);
2797 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2798 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2799 }
2800 else
2801 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2802 }
2803 else
2804 {
2805 if (pExtent->pRGD)
2806 {
2807 Assert(pExtent->uSectorRGD);
2808 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2809 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2810 }
2811 else
2812 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2813 }
2814 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2815 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2816 Header.singleEndLineChar = '\n';
2817 Header.nonEndLineChar = ' ';
2818 Header.doubleEndLineChar1 = '\r';
2819 Header.doubleEndLineChar2 = '\n';
2820 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2821
2822 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
2823 uOffset, &Header, sizeof(Header),
2824 pIoCtx, NULL, NULL);
2825 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2826 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2827 return rc;
2828}
2829
2830/**
2831 * Internal: free the buffers used for streamOptimized images.
2832 */
2833static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
2834{
2835 if (pExtent->pvCompGrain)
2836 {
2837 RTMemFree(pExtent->pvCompGrain);
2838 pExtent->pvCompGrain = NULL;
2839 }
2840 if (pExtent->pvGrain)
2841 {
2842 RTMemFree(pExtent->pvGrain);
2843 pExtent->pvGrain = NULL;
2844 }
2845}
2846
2847/**
2848 * Internal: free the memory used by the extent data structure, optionally
2849 * deleting the referenced files.
2850 *
2851 * @returns VBox status code.
2852 * @param pImage Pointer to the image instance data.
2853 * @param pExtent The extent to free.
2854 * @param fDelete Flag whether to delete the backing storage.
2855 */
2856static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2857 bool fDelete)
2858{
2859 int rc = VINF_SUCCESS;
2860
2861 vmdkFreeGrainDirectory(pExtent);
2862 if (pExtent->pDescData)
2863 {
2864 RTMemFree(pExtent->pDescData);
2865 pExtent->pDescData = NULL;
2866 }
2867 if (pExtent->pFile != NULL)
2868 {
2869 /* Do not delete raw extents, these have full and base names equal. */
2870 rc = vmdkFileClose(pImage, &pExtent->pFile,
2871 fDelete
2872 && pExtent->pszFullname
2873 && pExtent->pszBasename
2874 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2875 }
2876 if (pExtent->pszBasename)
2877 {
2878 RTMemTmpFree((void *)pExtent->pszBasename);
2879 pExtent->pszBasename = NULL;
2880 }
2881 if (pExtent->pszFullname)
2882 {
2883 RTStrFree((char *)(void *)pExtent->pszFullname);
2884 pExtent->pszFullname = NULL;
2885 }
2886 vmdkFreeStreamBuffers(pExtent);
2887
2888 return rc;
2889}
2890
2891/**
2892 * Internal: allocate grain table cache if necessary for this image.
2893 */
2894static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2895{
2896 PVMDKEXTENT pExtent;
2897
2898 /* Allocate grain table cache if any sparse extent is present. */
2899 for (unsigned i = 0; i < pImage->cExtents; i++)
2900 {
2901 pExtent = &pImage->pExtents[i];
2902 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2903 {
2904 /* Allocate grain table cache. */
2905 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
2906 if (!pImage->pGTCache)
2907 return VERR_NO_MEMORY;
2908 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
2909 {
2910 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
2911 pGCE->uExtent = UINT32_MAX;
2912 }
2913 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
2914 break;
2915 }
2916 }
2917
2918 return VINF_SUCCESS;
2919}
2920
2921/**
2922 * Internal: allocate the given number of extents.
2923 */
2924static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
2925{
2926 int rc = VINF_SUCCESS;
2927 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
2928 if (pExtents)
2929 {
2930 for (unsigned i = 0; i < cExtents; i++)
2931 {
2932 pExtents[i].pFile = NULL;
2933 pExtents[i].pszBasename = NULL;
2934 pExtents[i].pszFullname = NULL;
2935 pExtents[i].pGD = NULL;
2936 pExtents[i].pRGD = NULL;
2937 pExtents[i].pDescData = NULL;
2938 pExtents[i].uVersion = 1;
2939 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
2940 pExtents[i].uExtent = i;
2941 pExtents[i].pImage = pImage;
2942 }
2943 pImage->pExtents = pExtents;
2944 pImage->cExtents = cExtents;
2945 }
2946 else
2947 rc = VERR_NO_MEMORY;
2948
2949 return rc;
2950}
2951
2952/**
2953 * Reads and processes the descriptor embedded in sparse images.
2954 *
2955 * @returns VBox status code.
2956 * @param pImage VMDK image instance.
2957 * @param pFile The sparse file handle.
2958 */
2959static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
2960{
2961 /* It's a hosted single-extent image. */
2962 int rc = vmdkCreateExtents(pImage, 1);
2963 if (RT_SUCCESS(rc))
2964 {
2965 /* The opened file is passed to the extent. No separate descriptor
2966 * file, so no need to keep anything open for the image. */
2967 PVMDKEXTENT pExtent = &pImage->pExtents[0];
2968 pExtent->pFile = pFile;
2969 pImage->pFile = NULL;
2970 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
2971 if (RT_LIKELY(pExtent->pszFullname))
2972 {
2973 /* As we're dealing with a monolithic image here, there must
2974 * be a descriptor embedded in the image file. */
2975 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
2976 if ( RT_SUCCESS(rc)
2977 && pExtent->uDescriptorSector
2978 && pExtent->cDescriptorSectors)
2979 {
2980 /* HACK: extend the descriptor if it is unusually small and it fits in
2981 * the unused space after the image header. Allows opening VMDK files
2982 * with extremely small descriptor in read/write mode.
2983 *
2984 * The previous version introduced a possible regression for VMDK stream
2985 * optimized images from VMware which tend to have only a single sector sized
2986 * descriptor. Increasing the descriptor size resulted in adding the various uuid
2987 * entries required to make it work with VBox but for stream optimized images
2988 * the updated binary header wasn't written to the disk creating a mismatch
2989 * between advertised and real descriptor size.
2990 *
2991 * The descriptor size will be increased even if opened readonly now if there
2992 * enough room but the new value will not be written back to the image.
2993 */
2994 if ( pExtent->cDescriptorSectors < 3
2995 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
2996 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
2997 {
2998 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
2999
3000 pExtent->cDescriptorSectors = 4;
3001 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3002 {
3003 /*
3004 * Update the on disk number now to make sure we don't introduce inconsistencies
3005 * in case of stream optimized images from VMware where the descriptor is just
3006 * one sector big (the binary header is not written to disk for complete
3007 * stream optimized images in vmdkFlushImage()).
3008 */
3009 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
3010 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage, RT_OFFSETOF(SparseExtentHeader, descriptorSize),
3011 &u64DescSizeNew, sizeof(u64DescSizeNew));
3012 if (RT_FAILURE(rc))
3013 {
3014 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
3015 /* Restore the old size and carry on. */
3016 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
3017 }
3018 }
3019 }
3020 /* Read the descriptor from the extent. */
3021 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3022 if (RT_LIKELY(pExtent->pDescData))
3023 {
3024 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
3025 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3026 pExtent->pDescData,
3027 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3028 if (RT_SUCCESS(rc))
3029 {
3030 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3031 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3032 if ( RT_SUCCESS(rc)
3033 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3034 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3035 {
3036 rc = vmdkReadMetaExtent(pImage, pExtent);
3037 if (RT_SUCCESS(rc))
3038 {
3039 /* Mark the extent as unclean if opened in read-write mode. */
3040 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3041 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3042 {
3043 pExtent->fUncleanShutdown = true;
3044 pExtent->fMetaDirty = true;
3045 }
3046 }
3047 }
3048 else if (RT_SUCCESS(rc))
3049 rc = VERR_NOT_SUPPORTED;
3050 }
3051 else
3052 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3053 }
3054 else
3055 rc = VERR_NO_MEMORY;
3056 }
3057 else if (RT_SUCCESS(rc))
3058 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3059 }
3060 else
3061 rc = VERR_NO_MEMORY;
3062 }
3063
3064 return rc;
3065}
3066
3067/**
3068 * Reads the descriptor from a pure text file.
3069 *
3070 * @returns VBox status code.
3071 * @param pImage VMDK image instance.
3072 * @param pFile The descriptor file handle.
3073 */
3074static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3075{
3076 /* Allocate at least 10K, and make sure that there is 5K free space
3077 * in case new entries need to be added to the descriptor. Never
3078 * allocate more than 128K, because that's no valid descriptor file
3079 * and will result in the correct "truncated read" error handling. */
3080 uint64_t cbFileSize;
3081 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3082 if ( RT_SUCCESS(rc)
3083 && cbFileSize >= 50)
3084 {
3085 uint64_t cbSize = cbFileSize;
3086 if (cbSize % VMDK_SECTOR2BYTE(10))
3087 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3088 else
3089 cbSize += VMDK_SECTOR2BYTE(10);
3090 cbSize = RT_MIN(cbSize, _128K);
3091 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3092 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3093 if (RT_LIKELY(pImage->pDescData))
3094 {
3095 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3096 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3097 if (RT_SUCCESS(rc))
3098 {
3099#if 0 /** @todo Revisit */
3100 cbRead += sizeof(u32Magic);
3101 if (cbRead == pImage->cbDescAlloc)
3102 {
3103 /* Likely the read is truncated. Better fail a bit too early
3104 * (normally the descriptor is much smaller than our buffer). */
3105 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3106 goto out;
3107 }
3108#endif
3109 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3110 pImage->cbDescAlloc);
3111 if (RT_SUCCESS(rc))
3112 {
3113 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3114 {
3115 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3116 if (pExtent->pszBasename)
3117 {
3118 /* Hack to figure out whether the specified name in the
3119 * extent descriptor is absolute. Doesn't always work, but
3120 * should be good enough for now. */
3121 char *pszFullname;
3122 /** @todo implement proper path absolute check. */
3123 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3124 {
3125 pszFullname = RTStrDup(pExtent->pszBasename);
3126 if (!pszFullname)
3127 {
3128 rc = VERR_NO_MEMORY;
3129 break;
3130 }
3131 }
3132 else
3133 {
3134 char *pszDirname = RTStrDup(pImage->pszFilename);
3135 if (!pszDirname)
3136 {
3137 rc = VERR_NO_MEMORY;
3138 break;
3139 }
3140 RTPathStripFilename(pszDirname);
3141 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3142 RTStrFree(pszDirname);
3143 if (!pszFullname)
3144 {
3145 rc = VERR_NO_STR_MEMORY;
3146 break;
3147 }
3148 }
3149 pExtent->pszFullname = pszFullname;
3150 }
3151 else
3152 pExtent->pszFullname = NULL;
3153
3154 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3155 switch (pExtent->enmType)
3156 {
3157 case VMDKETYPE_HOSTED_SPARSE:
3158 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3159 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3160 if (RT_FAILURE(rc))
3161 {
3162 /* Do NOT signal an appropriate error here, as the VD
3163 * layer has the choice of retrying the open if it
3164 * failed. */
3165 break;
3166 }
3167 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3168 false /* fMagicAlreadyRead */);
3169 if (RT_FAILURE(rc))
3170 break;
3171 rc = vmdkReadMetaExtent(pImage, pExtent);
3172 if (RT_FAILURE(rc))
3173 break;
3174
3175 /* Mark extent as unclean if opened in read-write mode. */
3176 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3177 {
3178 pExtent->fUncleanShutdown = true;
3179 pExtent->fMetaDirty = true;
3180 }
3181 break;
3182 case VMDKETYPE_VMFS:
3183 case VMDKETYPE_FLAT:
3184 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3185 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3186 if (RT_FAILURE(rc))
3187 {
3188 /* Do NOT signal an appropriate error here, as the VD
3189 * layer has the choice of retrying the open if it
3190 * failed. */
3191 break;
3192 }
3193 break;
3194 case VMDKETYPE_ZERO:
3195 /* Nothing to do. */
3196 break;
3197 default:
3198 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3199 }
3200 }
3201 }
3202 }
3203 else
3204 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3205 }
3206 else
3207 rc = VERR_NO_MEMORY;
3208 }
3209 else if (RT_SUCCESS(rc))
3210 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3211
3212 return rc;
3213}
3214
3215/**
3216 * Read and process the descriptor based on the image type.
3217 *
3218 * @returns VBox status code.
3219 * @param pImage VMDK image instance.
3220 * @param pFile VMDK file handle.
3221 */
3222static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3223{
3224 uint32_t u32Magic;
3225
3226 /* Read magic (if present). */
3227 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3228 &u32Magic, sizeof(u32Magic));
3229 if (RT_SUCCESS(rc))
3230 {
3231 /* Handle the file according to its magic number. */
3232 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3233 rc = vmdkDescriptorReadSparse(pImage, pFile);
3234 else
3235 rc = vmdkDescriptorReadAscii(pImage, pFile);
3236 }
3237 else
3238 {
3239 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3240 rc = VERR_VD_VMDK_INVALID_HEADER;
3241 }
3242
3243 return rc;
3244}
3245
3246/**
3247 * Internal: Open an image, constructing all necessary data structures.
3248 */
3249static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3250{
3251 pImage->uOpenFlags = uOpenFlags;
3252 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3253 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3254 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3255
3256 /*
3257 * Open the image.
3258 * We don't have to check for asynchronous access because
3259 * we only support raw access and the opened file is a description
3260 * file were no data is stored.
3261 */
3262 PVMDKFILE pFile;
3263 int rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
3264 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3265 if (RT_SUCCESS(rc))
3266 {
3267 pImage->pFile = pFile;
3268
3269 rc = vmdkDescriptorRead(pImage, pFile);
3270 if (RT_SUCCESS(rc))
3271 {
3272 /* Determine PCHS geometry if not set. */
3273 if (pImage->PCHSGeometry.cCylinders == 0)
3274 {
3275 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3276 / pImage->PCHSGeometry.cHeads
3277 / pImage->PCHSGeometry.cSectors;
3278 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3279 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3280 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3281 {
3282 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3283 AssertRC(rc);
3284 }
3285 }
3286
3287 /* Update the image metadata now in case has changed. */
3288 rc = vmdkFlushImage(pImage, NULL);
3289 if (RT_SUCCESS(rc))
3290 {
3291 /* Figure out a few per-image constants from the extents. */
3292 pImage->cbSize = 0;
3293 for (unsigned i = 0; i < pImage->cExtents; i++)
3294 {
3295 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3296 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3297 {
3298 /* Here used to be a check whether the nominal size of an extent
3299 * is a multiple of the grain size. The spec says that this is
3300 * always the case, but unfortunately some files out there in the
3301 * wild violate the spec (e.g. ReactOS 0.3.1). */
3302 }
3303 else if ( pExtent->enmType == VMDKETYPE_FLAT
3304 || pExtent->enmType == VMDKETYPE_ZERO)
3305 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3306
3307 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3308 }
3309
3310 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3311 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3312 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3313 rc = vmdkAllocateGrainTableCache(pImage);
3314 }
3315 }
3316 }
3317 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3318 * choice of retrying the open if it failed. */
3319
3320 if (RT_FAILURE(rc))
3321 vmdkFreeImage(pImage, false);
3322 return rc;
3323}
3324
3325/**
3326 * Internal: create VMDK images for raw disk/partition access.
3327 */
3328static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3329 uint64_t cbSize)
3330{
3331 int rc = VINF_SUCCESS;
3332 PVMDKEXTENT pExtent;
3333
3334 if (pRaw->uFlags & VBOXHDDRAW_DISK)
3335 {
3336 /* Full raw disk access. This requires setting up a descriptor
3337 * file and open the (flat) raw disk. */
3338 rc = vmdkCreateExtents(pImage, 1);
3339 if (RT_FAILURE(rc))
3340 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3341 pExtent = &pImage->pExtents[0];
3342 /* Create raw disk descriptor file. */
3343 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3344 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3345 true /* fCreate */));
3346 if (RT_FAILURE(rc))
3347 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3348
3349 /* Set up basename for extent description. Cannot use StrDup. */
3350 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3351 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3352 if (!pszBasename)
3353 return VERR_NO_MEMORY;
3354 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3355 pExtent->pszBasename = pszBasename;
3356 /* For raw disks the full name is identical to the base name. */
3357 pExtent->pszFullname = RTStrDup(pszBasename);
3358 if (!pExtent->pszFullname)
3359 return VERR_NO_MEMORY;
3360 pExtent->enmType = VMDKETYPE_FLAT;
3361 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3362 pExtent->uSectorOffset = 0;
3363 pExtent->enmAccess = (pRaw->uFlags & VBOXHDDRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
3364 pExtent->fMetaDirty = false;
3365
3366 /* Open flat image, the raw disk. */
3367 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3368 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
3369 false /* fCreate */));
3370 if (RT_FAILURE(rc))
3371 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3372 }
3373 else
3374 {
3375 /* Raw partition access. This requires setting up a descriptor
3376 * file, write the partition information to a flat extent and
3377 * open all the (flat) raw disk partitions. */
3378
3379 /* First pass over the partition data areas to determine how many
3380 * extents we need. One data area can require up to 2 extents, as
3381 * it might be necessary to skip over unpartitioned space. */
3382 unsigned cExtents = 0;
3383 uint64_t uStart = 0;
3384 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3385 {
3386 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3387 if (uStart > pPart->uStart)
3388 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3389 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
3390
3391 if (uStart < pPart->uStart)
3392 cExtents++;
3393 uStart = pPart->uStart + pPart->cbData;
3394 cExtents++;
3395 }
3396 /* Another extent for filling up the rest of the image. */
3397 if (uStart != cbSize)
3398 cExtents++;
3399
3400 rc = vmdkCreateExtents(pImage, cExtents);
3401 if (RT_FAILURE(rc))
3402 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3403
3404 /* Create raw partition descriptor file. */
3405 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3406 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3407 true /* fCreate */));
3408 if (RT_FAILURE(rc))
3409 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3410
3411 /* Create base filename for the partition table extent. */
3412 /** @todo remove fixed buffer without creating memory leaks. */
3413 char pszPartition[1024];
3414 const char *pszBase = RTPathFilename(pImage->pszFilename);
3415 const char *pszSuff = RTPathSuffix(pszBase);
3416 if (pszSuff == NULL)
3417 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3418 char *pszBaseBase = RTStrDup(pszBase);
3419 if (!pszBaseBase)
3420 return VERR_NO_MEMORY;
3421 RTPathStripSuffix(pszBaseBase);
3422 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3423 pszBaseBase, pszSuff);
3424 RTStrFree(pszBaseBase);
3425
3426 /* Second pass over the partitions, now define all extents. */
3427 uint64_t uPartOffset = 0;
3428 cExtents = 0;
3429 uStart = 0;
3430 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3431 {
3432 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3433 pExtent = &pImage->pExtents[cExtents++];
3434
3435 if (uStart < pPart->uStart)
3436 {
3437 pExtent->pszBasename = NULL;
3438 pExtent->pszFullname = NULL;
3439 pExtent->enmType = VMDKETYPE_ZERO;
3440 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uStart - uStart);
3441 pExtent->uSectorOffset = 0;
3442 pExtent->enmAccess = VMDKACCESS_READWRITE;
3443 pExtent->fMetaDirty = false;
3444 /* go to next extent */
3445 pExtent = &pImage->pExtents[cExtents++];
3446 }
3447 uStart = pPart->uStart + pPart->cbData;
3448
3449 if (pPart->pvPartitionData)
3450 {
3451 /* Set up basename for extent description. Can't use StrDup. */
3452 size_t cbBasename = strlen(pszPartition) + 1;
3453 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3454 if (!pszBasename)
3455 return VERR_NO_MEMORY;
3456 memcpy(pszBasename, pszPartition, cbBasename);
3457 pExtent->pszBasename = pszBasename;
3458
3459 /* Set up full name for partition extent. */
3460 char *pszDirname = RTStrDup(pImage->pszFilename);
3461 if (!pszDirname)
3462 return VERR_NO_STR_MEMORY;
3463 RTPathStripFilename(pszDirname);
3464 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3465 RTStrFree(pszDirname);
3466 if (!pszFullname)
3467 return VERR_NO_STR_MEMORY;
3468 pExtent->pszFullname = pszFullname;
3469 pExtent->enmType = VMDKETYPE_FLAT;
3470 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3471 pExtent->uSectorOffset = uPartOffset;
3472 pExtent->enmAccess = VMDKACCESS_READWRITE;
3473 pExtent->fMetaDirty = false;
3474
3475 /* Create partition table flat image. */
3476 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3477 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
3478 true /* fCreate */));
3479 if (RT_FAILURE(rc))
3480 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3481 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
3482 VMDK_SECTOR2BYTE(uPartOffset),
3483 pPart->pvPartitionData,
3484 pPart->cbData);
3485 if (RT_FAILURE(rc))
3486 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3487 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
3488 }
3489 else
3490 {
3491 if (pPart->pszRawDevice)
3492 {
3493 /* Set up basename for extent descr. Can't use StrDup. */
3494 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3495 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3496 if (!pszBasename)
3497 return VERR_NO_MEMORY;
3498 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3499 pExtent->pszBasename = pszBasename;
3500 /* For raw disks full name is identical to base name. */
3501 pExtent->pszFullname = RTStrDup(pszBasename);
3502 if (!pExtent->pszFullname)
3503 return VERR_NO_MEMORY;
3504 pExtent->enmType = VMDKETYPE_FLAT;
3505 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3506 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uStartOffset);
3507 pExtent->enmAccess = (pPart->uFlags & VBOXHDDRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
3508 pExtent->fMetaDirty = false;
3509
3510 /* Open flat image, the raw partition. */
3511 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3512 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
3513 false /* fCreate */));
3514 if (RT_FAILURE(rc))
3515 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3516 }
3517 else
3518 {
3519 pExtent->pszBasename = NULL;
3520 pExtent->pszFullname = NULL;
3521 pExtent->enmType = VMDKETYPE_ZERO;
3522 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3523 pExtent->uSectorOffset = 0;
3524 pExtent->enmAccess = VMDKACCESS_READWRITE;
3525 pExtent->fMetaDirty = false;
3526 }
3527 }
3528 }
3529 /* Another extent for filling up the rest of the image. */
3530 if (uStart != cbSize)
3531 {
3532 pExtent = &pImage->pExtents[cExtents++];
3533 pExtent->pszBasename = NULL;
3534 pExtent->pszFullname = NULL;
3535 pExtent->enmType = VMDKETYPE_ZERO;
3536 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3537 pExtent->uSectorOffset = 0;
3538 pExtent->enmAccess = VMDKACCESS_READWRITE;
3539 pExtent->fMetaDirty = false;
3540 }
3541 }
3542
3543 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3544 (pRaw->uFlags & VBOXHDDRAW_DISK) ?
3545 "fullDevice" : "partitionedDevice");
3546 if (RT_FAILURE(rc))
3547 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3548 return rc;
3549}
3550
3551/**
3552 * Internal: create a regular (i.e. file-backed) VMDK image.
3553 */
3554static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3555 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
3556 unsigned uPercentStart, unsigned uPercentSpan)
3557{
3558 int rc = VINF_SUCCESS;
3559 unsigned cExtents = 1;
3560 uint64_t cbOffset = 0;
3561 uint64_t cbRemaining = cbSize;
3562
3563 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3564 {
3565 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3566 /* Do proper extent computation: need one smaller extent if the total
3567 * size isn't evenly divisible by the split size. */
3568 if (cbSize % VMDK_2G_SPLIT_SIZE)
3569 cExtents++;
3570 }
3571 rc = vmdkCreateExtents(pImage, cExtents);
3572 if (RT_FAILURE(rc))
3573 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3574
3575 /* Basename strings needed for constructing the extent names. */
3576 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3577 AssertPtr(pszBasenameSubstr);
3578 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3579
3580 /* Create separate descriptor file if necessary. */
3581 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3582 {
3583 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3584 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3585 true /* fCreate */));
3586 if (RT_FAILURE(rc))
3587 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3588 }
3589 else
3590 pImage->pFile = NULL;
3591
3592 /* Set up all extents. */
3593 for (unsigned i = 0; i < cExtents; i++)
3594 {
3595 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3596 uint64_t cbExtent = cbRemaining;
3597
3598 /* Set up fullname/basename for extent description. Cannot use StrDup
3599 * for basename, as it is not guaranteed that the memory can be freed
3600 * with RTMemTmpFree, which must be used as in other code paths
3601 * StrDup is not usable. */
3602 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3603 {
3604 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3605 if (!pszBasename)
3606 return VERR_NO_MEMORY;
3607 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3608 pExtent->pszBasename = pszBasename;
3609 }
3610 else
3611 {
3612 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
3613 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3614 RTPathStripSuffix(pszBasenameBase);
3615 char *pszTmp;
3616 size_t cbTmp;
3617 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3618 {
3619 if (cExtents == 1)
3620 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3621 pszBasenameSuff);
3622 else
3623 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3624 i+1, pszBasenameSuff);
3625 }
3626 else
3627 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3628 pszBasenameSuff);
3629 RTStrFree(pszBasenameBase);
3630 if (!pszTmp)
3631 return VERR_NO_STR_MEMORY;
3632 cbTmp = strlen(pszTmp) + 1;
3633 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3634 if (!pszBasename)
3635 {
3636 RTStrFree(pszTmp);
3637 return VERR_NO_MEMORY;
3638 }
3639 memcpy(pszBasename, pszTmp, cbTmp);
3640 RTStrFree(pszTmp);
3641 pExtent->pszBasename = pszBasename;
3642 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3643 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3644 }
3645 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3646 if (!pszBasedirectory)
3647 return VERR_NO_STR_MEMORY;
3648 RTPathStripFilename(pszBasedirectory);
3649 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3650 RTStrFree(pszBasedirectory);
3651 if (!pszFullname)
3652 return VERR_NO_STR_MEMORY;
3653 pExtent->pszFullname = pszFullname;
3654
3655 /* Create file for extent. */
3656 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3657 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3658 true /* fCreate */));
3659 if (RT_FAILURE(rc))
3660 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3661 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3662 {
3663 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
3664 0 /* fFlags */, pIfProgress,
3665 uPercentStart + cbOffset * uPercentSpan / cbSize,
3666 cbExtent * uPercentSpan / cbSize);
3667 if (RT_FAILURE(rc))
3668 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3669 }
3670
3671 /* Place descriptor file information (where integrated). */
3672 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3673 {
3674 pExtent->uDescriptorSector = 1;
3675 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3676 /* The descriptor is part of the (only) extent. */
3677 pExtent->pDescData = pImage->pDescData;
3678 pImage->pDescData = NULL;
3679 }
3680
3681 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3682 {
3683 uint64_t cSectorsPerGDE, cSectorsPerGD;
3684 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3685 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
3686 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3687 pExtent->cGTEntries = 512;
3688 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3689 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3690 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3691 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3692 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3693 {
3694 /* The spec says version is 1 for all VMDKs, but the vast
3695 * majority of streamOptimized VMDKs actually contain
3696 * version 3 - so go with the majority. Both are accepted. */
3697 pExtent->uVersion = 3;
3698 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3699 }
3700 }
3701 else
3702 {
3703 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3704 pExtent->enmType = VMDKETYPE_VMFS;
3705 else
3706 pExtent->enmType = VMDKETYPE_FLAT;
3707 }
3708
3709 pExtent->enmAccess = VMDKACCESS_READWRITE;
3710 pExtent->fUncleanShutdown = true;
3711 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3712 pExtent->uSectorOffset = 0;
3713 pExtent->fMetaDirty = true;
3714
3715 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3716 {
3717 /* fPreAlloc should never be false because VMware can't use such images. */
3718 rc = vmdkCreateGrainDirectory(pImage, pExtent,
3719 RT_MAX( pExtent->uDescriptorSector
3720 + pExtent->cDescriptorSectors,
3721 1),
3722 true /* fPreAlloc */);
3723 if (RT_FAILURE(rc))
3724 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3725 }
3726
3727 cbOffset += cbExtent;
3728
3729 if (RT_SUCCESS(rc))
3730 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
3731
3732 cbRemaining -= cbExtent;
3733 }
3734
3735 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3736 {
3737 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
3738 * controller type is set in an image. */
3739 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
3740 if (RT_FAILURE(rc))
3741 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
3742 }
3743
3744 const char *pszDescType = NULL;
3745 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3746 {
3747 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3748 pszDescType = "vmfs";
3749 else
3750 pszDescType = (cExtents == 1)
3751 ? "monolithicFlat" : "twoGbMaxExtentFlat";
3752 }
3753 else
3754 {
3755 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3756 pszDescType = "streamOptimized";
3757 else
3758 {
3759 pszDescType = (cExtents == 1)
3760 ? "monolithicSparse" : "twoGbMaxExtentSparse";
3761 }
3762 }
3763 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3764 pszDescType);
3765 if (RT_FAILURE(rc))
3766 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3767 return rc;
3768}
3769
3770/**
3771 * Internal: Create a real stream optimized VMDK using only linear writes.
3772 */
3773static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
3774{
3775 int rc = vmdkCreateExtents(pImage, 1);
3776 if (RT_FAILURE(rc))
3777 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3778
3779 /* Basename strings needed for constructing the extent names. */
3780 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3781 AssertPtr(pszBasenameSubstr);
3782 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3783
3784 /* No separate descriptor file. */
3785 pImage->pFile = NULL;
3786
3787 /* Set up all extents. */
3788 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3789
3790 /* Set up fullname/basename for extent description. Cannot use StrDup
3791 * for basename, as it is not guaranteed that the memory can be freed
3792 * with RTMemTmpFree, which must be used as in other code paths
3793 * StrDup is not usable. */
3794 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3795 if (!pszBasename)
3796 return VERR_NO_MEMORY;
3797 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3798 pExtent->pszBasename = pszBasename;
3799
3800 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3801 RTPathStripFilename(pszBasedirectory);
3802 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3803 RTStrFree(pszBasedirectory);
3804 if (!pszFullname)
3805 return VERR_NO_STR_MEMORY;
3806 pExtent->pszFullname = pszFullname;
3807
3808 /* Create file for extent. Make it write only, no reading allowed. */
3809 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3810 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3811 true /* fCreate */)
3812 & ~RTFILE_O_READ);
3813 if (RT_FAILURE(rc))
3814 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3815
3816 /* Place descriptor file information. */
3817 pExtent->uDescriptorSector = 1;
3818 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3819 /* The descriptor is part of the (only) extent. */
3820 pExtent->pDescData = pImage->pDescData;
3821 pImage->pDescData = NULL;
3822
3823 uint64_t cSectorsPerGDE, cSectorsPerGD;
3824 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3825 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
3826 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3827 pExtent->cGTEntries = 512;
3828 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3829 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3830 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3831 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3832
3833 /* The spec says version is 1 for all VMDKs, but the vast
3834 * majority of streamOptimized VMDKs actually contain
3835 * version 3 - so go with the majority. Both are accepted. */
3836 pExtent->uVersion = 3;
3837 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3838 pExtent->fFooter = true;
3839
3840 pExtent->enmAccess = VMDKACCESS_READONLY;
3841 pExtent->fUncleanShutdown = false;
3842 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3843 pExtent->uSectorOffset = 0;
3844 pExtent->fMetaDirty = true;
3845
3846 /* Create grain directory, without preallocating it straight away. It will
3847 * be constructed on the fly when writing out the data and written when
3848 * closing the image. The end effect is that the full grain directory is
3849 * allocated, which is a requirement of the VMDK specs. */
3850 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
3851 false /* fPreAlloc */);
3852 if (RT_FAILURE(rc))
3853 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3854
3855 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3856 "streamOptimized");
3857 if (RT_FAILURE(rc))
3858 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3859
3860 return rc;
3861}
3862
3863/**
3864 * Initializes the UUID fields in the DDB.
3865 *
3866 * @returns VBox status code.
3867 * @param pImage The VMDK image instance.
3868 */
3869static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
3870{
3871 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
3872 if (RT_SUCCESS(rc))
3873 {
3874 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
3875 if (RT_SUCCESS(rc))
3876 {
3877 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
3878 &pImage->ModificationUuid);
3879 if (RT_SUCCESS(rc))
3880 {
3881 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
3882 &pImage->ParentModificationUuid);
3883 if (RT_FAILURE(rc))
3884 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3885 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3886 }
3887 else
3888 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3889 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3890 }
3891 else
3892 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3893 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
3894 }
3895 else
3896 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3897 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
3898
3899 return rc;
3900}
3901
3902/**
3903 * Internal: The actual code for creating any VMDK variant currently in
3904 * existence on hosted environments.
3905 */
3906static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
3907 unsigned uImageFlags, const char *pszComment,
3908 PCVDGEOMETRY pPCHSGeometry,
3909 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
3910 PVDINTERFACEPROGRESS pIfProgress,
3911 unsigned uPercentStart, unsigned uPercentSpan)
3912{
3913 pImage->uImageFlags = uImageFlags;
3914
3915 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3916 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3917 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3918
3919 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
3920 &pImage->Descriptor);
3921 if (RT_SUCCESS(rc))
3922 {
3923 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3924 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3925 {
3926 /* Raw disk image (includes raw partition). */
3927 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
3928 /* As the comment is misused, zap it so that no garbage comment
3929 * is set below. */
3930 pszComment = NULL;
3931 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
3932 }
3933 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3934 {
3935 /* Stream optimized sparse image (monolithic). */
3936 rc = vmdkCreateStreamImage(pImage, cbSize);
3937 }
3938 else
3939 {
3940 /* Regular fixed or sparse image (monolithic or split). */
3941 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
3942 pIfProgress, uPercentStart,
3943 uPercentSpan * 95 / 100);
3944 }
3945
3946 if (RT_SUCCESS(rc))
3947 {
3948 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
3949
3950 pImage->cbSize = cbSize;
3951
3952 for (unsigned i = 0; i < pImage->cExtents; i++)
3953 {
3954 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3955
3956 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3957 pExtent->cNominalSectors, pExtent->enmType,
3958 pExtent->pszBasename, pExtent->uSectorOffset);
3959 if (RT_FAILURE(rc))
3960 {
3961 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
3962 break;
3963 }
3964 }
3965
3966 if (RT_SUCCESS(rc))
3967 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
3968
3969 if ( RT_SUCCESS(rc)
3970 && pPCHSGeometry->cCylinders != 0
3971 && pPCHSGeometry->cHeads != 0
3972 && pPCHSGeometry->cSectors != 0)
3973 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
3974
3975 if ( RT_SUCCESS(rc)
3976 && pLCHSGeometry->cCylinders != 0
3977 && pLCHSGeometry->cHeads != 0
3978 && pLCHSGeometry->cSectors != 0)
3979 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
3980
3981 pImage->LCHSGeometry = *pLCHSGeometry;
3982 pImage->PCHSGeometry = *pPCHSGeometry;
3983
3984 pImage->ImageUuid = *pUuid;
3985 RTUuidClear(&pImage->ParentUuid);
3986 RTUuidClear(&pImage->ModificationUuid);
3987 RTUuidClear(&pImage->ParentModificationUuid);
3988
3989 if (RT_SUCCESS(rc))
3990 rc = vmdkCreateImageDdbUuidsInit(pImage);
3991
3992 if (RT_SUCCESS(rc))
3993 rc = vmdkAllocateGrainTableCache(pImage);
3994
3995 if (RT_SUCCESS(rc))
3996 {
3997 rc = vmdkSetImageComment(pImage, pszComment);
3998 if (RT_FAILURE(rc))
3999 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4000 }
4001
4002 if (RT_SUCCESS(rc))
4003 {
4004 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
4005
4006 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4007 {
4008 /* streamOptimized is a bit special, we cannot trigger the flush
4009 * until all data has been written. So we write the necessary
4010 * information explicitly. */
4011 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
4012 - pImage->Descriptor.aLines[0], 512));
4013 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
4014 if (RT_SUCCESS(rc))
4015 {
4016 rc = vmdkWriteDescriptor(pImage, NULL);
4017 if (RT_FAILURE(rc))
4018 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
4019 }
4020 else
4021 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
4022 }
4023 else
4024 rc = vmdkFlushImage(pImage, NULL);
4025 }
4026 }
4027 }
4028 else
4029 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
4030
4031
4032 if (RT_SUCCESS(rc))
4033 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
4034 else
4035 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
4036 return rc;
4037}
4038
4039/**
4040 * Internal: Update image comment.
4041 */
4042static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
4043{
4044 char *pszCommentEncoded = NULL;
4045 if (pszComment)
4046 {
4047 pszCommentEncoded = vmdkEncodeString(pszComment);
4048 if (!pszCommentEncoded)
4049 return VERR_NO_MEMORY;
4050 }
4051
4052 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
4053 "ddb.comment", pszCommentEncoded);
4054 if (pszCommentEncoded)
4055 RTStrFree(pszCommentEncoded);
4056 if (RT_FAILURE(rc))
4057 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
4058 return VINF_SUCCESS;
4059}
4060
4061/**
4062 * Internal. Clear the grain table buffer for real stream optimized writing.
4063 */
4064static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
4065{
4066 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
4067 for (uint32_t i = 0; i < cCacheLines; i++)
4068 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
4069 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
4070}
4071
4072/**
4073 * Internal. Flush the grain table buffer for real stream optimized writing.
4074 */
4075static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4076 uint32_t uGDEntry)
4077{
4078 int rc = VINF_SUCCESS;
4079 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
4080
4081 /* VMware does not write out completely empty grain tables in the case
4082 * of streamOptimized images, which according to my interpretation of
4083 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
4084 * handle it without problems do it the same way and save some bytes. */
4085 bool fAllZero = true;
4086 for (uint32_t i = 0; i < cCacheLines; i++)
4087 {
4088 /* Convert the grain table to little endian in place, as it will not
4089 * be used at all after this function has been called. */
4090 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
4091 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
4092 if (*pGTTmp)
4093 {
4094 fAllZero = false;
4095 break;
4096 }
4097 if (!fAllZero)
4098 break;
4099 }
4100 if (fAllZero)
4101 return VINF_SUCCESS;
4102
4103 uint64_t uFileOffset = pExtent->uAppendPosition;
4104 if (!uFileOffset)
4105 return VERR_INTERNAL_ERROR;
4106 /* Align to sector, as the previous write could have been any size. */
4107 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4108
4109 /* Grain table marker. */
4110 uint8_t aMarker[512];
4111 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
4112 memset(pMarker, '\0', sizeof(aMarker));
4113 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
4114 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
4115 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
4116 aMarker, sizeof(aMarker));
4117 AssertRC(rc);
4118 uFileOffset += 512;
4119
4120 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
4121 return VERR_INTERNAL_ERROR;
4122
4123 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
4124
4125 for (uint32_t i = 0; i < cCacheLines; i++)
4126 {
4127 /* Convert the grain table to little endian in place, as it will not
4128 * be used at all after this function has been called. */
4129 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
4130 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
4131 *pGTTmp = RT_H2LE_U32(*pGTTmp);
4132
4133 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
4134 &pImage->pGTCache->aGTCache[i].aGTData[0],
4135 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
4136 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
4137 if (RT_FAILURE(rc))
4138 break;
4139 }
4140 Assert(!(uFileOffset % 512));
4141 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
4142 return rc;
4143}
4144
4145/**
4146 * Internal. Free all allocated space for representing an image, and optionally
4147 * delete the image from disk.
4148 */
4149static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
4150{
4151 int rc = VINF_SUCCESS;
4152
4153 /* Freeing a never allocated image (e.g. because the open failed) is
4154 * not signalled as an error. After all nothing bad happens. */
4155 if (pImage)
4156 {
4157 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4158 {
4159 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4160 {
4161 /* Check if all extents are clean. */
4162 for (unsigned i = 0; i < pImage->cExtents; i++)
4163 {
4164 Assert(!pImage->pExtents[i].fUncleanShutdown);
4165 }
4166 }
4167 else
4168 {
4169 /* Mark all extents as clean. */
4170 for (unsigned i = 0; i < pImage->cExtents; i++)
4171 {
4172 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4173 && pImage->pExtents[i].fUncleanShutdown)
4174 {
4175 pImage->pExtents[i].fUncleanShutdown = false;
4176 pImage->pExtents[i].fMetaDirty = true;
4177 }
4178
4179 /* From now on it's not safe to append any more data. */
4180 pImage->pExtents[i].uAppendPosition = 0;
4181 }
4182 }
4183 }
4184
4185 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4186 {
4187 /* No need to write any pending data if the file will be deleted
4188 * or if the new file wasn't successfully created. */
4189 if ( !fDelete && pImage->pExtents
4190 && pImage->pExtents[0].cGTEntries
4191 && pImage->pExtents[0].uAppendPosition)
4192 {
4193 PVMDKEXTENT pExtent = &pImage->pExtents[0];
4194 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
4195 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
4196 AssertRC(rc);
4197 vmdkStreamClearGT(pImage, pExtent);
4198 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
4199 {
4200 rc = vmdkStreamFlushGT(pImage, pExtent, i);
4201 AssertRC(rc);
4202 }
4203
4204 uint64_t uFileOffset = pExtent->uAppendPosition;
4205 if (!uFileOffset)
4206 return VERR_INTERNAL_ERROR;
4207 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4208
4209 /* From now on it's not safe to append any more data. */
4210 pExtent->uAppendPosition = 0;
4211
4212 /* Grain directory marker. */
4213 uint8_t aMarker[512];
4214 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
4215 memset(pMarker, '\0', sizeof(aMarker));
4216 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
4217 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
4218 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
4219 aMarker, sizeof(aMarker));
4220 AssertRC(rc);
4221 uFileOffset += 512;
4222
4223 /* Write grain directory in little endian style. The array will
4224 * not be used after this, so convert in place. */
4225 uint32_t *pGDTmp = pExtent->pGD;
4226 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
4227 *pGDTmp = RT_H2LE_U32(*pGDTmp);
4228 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4229 uFileOffset, pExtent->pGD,
4230 pExtent->cGDEntries * sizeof(uint32_t));
4231 AssertRC(rc);
4232
4233 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
4234 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
4235 uFileOffset = RT_ALIGN_64( uFileOffset
4236 + pExtent->cGDEntries * sizeof(uint32_t),
4237 512);
4238
4239 /* Footer marker. */
4240 memset(pMarker, '\0', sizeof(aMarker));
4241 pMarker->uSector = VMDK_BYTE2SECTOR(512);
4242 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
4243 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4244 uFileOffset, aMarker, sizeof(aMarker));
4245 AssertRC(rc);
4246
4247 uFileOffset += 512;
4248 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
4249 AssertRC(rc);
4250
4251 uFileOffset += 512;
4252 /* End-of-stream marker. */
4253 memset(pMarker, '\0', sizeof(aMarker));
4254 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4255 uFileOffset, aMarker, sizeof(aMarker));
4256 AssertRC(rc);
4257 }
4258 }
4259 else if (!fDelete)
4260 vmdkFlushImage(pImage, NULL);
4261
4262 if (pImage->pExtents != NULL)
4263 {
4264 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4265 {
4266 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4267 if (RT_SUCCESS(rc))
4268 rc = rc2; /* Propogate any error when closing the file. */
4269 }
4270 RTMemFree(pImage->pExtents);
4271 pImage->pExtents = NULL;
4272 }
4273 pImage->cExtents = 0;
4274 if (pImage->pFile != NULL)
4275 {
4276 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
4277 if (RT_SUCCESS(rc))
4278 rc = rc2; /* Propogate any error when closing the file. */
4279 }
4280 int rc2 = vmdkFileCheckAllClose(pImage);
4281 if (RT_SUCCESS(rc))
4282 rc = rc2; /* Propogate any error when closing the file. */
4283
4284 if (pImage->pGTCache)
4285 {
4286 RTMemFree(pImage->pGTCache);
4287 pImage->pGTCache = NULL;
4288 }
4289 if (pImage->pDescData)
4290 {
4291 RTMemFree(pImage->pDescData);
4292 pImage->pDescData = NULL;
4293 }
4294 }
4295
4296 LogFlowFunc(("returns %Rrc\n", rc));
4297 return rc;
4298}
4299
4300/**
4301 * Internal. Flush image data (and metadata) to disk.
4302 */
4303static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
4304{
4305 PVMDKEXTENT pExtent;
4306 int rc = VINF_SUCCESS;
4307
4308 /* Update descriptor if changed. */
4309 if (pImage->Descriptor.fDirty)
4310 rc = vmdkWriteDescriptor(pImage, pIoCtx);
4311
4312 if (RT_SUCCESS(rc))
4313 {
4314 for (unsigned i = 0; i < pImage->cExtents; i++)
4315 {
4316 pExtent = &pImage->pExtents[i];
4317 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4318 {
4319 switch (pExtent->enmType)
4320 {
4321 case VMDKETYPE_HOSTED_SPARSE:
4322 if (!pExtent->fFooter)
4323 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
4324 else
4325 {
4326 uint64_t uFileOffset = pExtent->uAppendPosition;
4327 /* Simply skip writing anything if the streamOptimized
4328 * image hasn't been just created. */
4329 if (!uFileOffset)
4330 break;
4331 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4332 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
4333 uFileOffset, pIoCtx);
4334 }
4335 break;
4336 case VMDKETYPE_VMFS:
4337 case VMDKETYPE_FLAT:
4338 /* Nothing to do. */
4339 break;
4340 case VMDKETYPE_ZERO:
4341 default:
4342 AssertMsgFailed(("extent with type %d marked as dirty\n",
4343 pExtent->enmType));
4344 break;
4345 }
4346 }
4347
4348 if (RT_FAILURE(rc))
4349 break;
4350
4351 switch (pExtent->enmType)
4352 {
4353 case VMDKETYPE_HOSTED_SPARSE:
4354 case VMDKETYPE_VMFS:
4355 case VMDKETYPE_FLAT:
4356 /** @todo implement proper path absolute check. */
4357 if ( pExtent->pFile != NULL
4358 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4359 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4360 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
4361 NULL, NULL);
4362 break;
4363 case VMDKETYPE_ZERO:
4364 /* No need to do anything for this extent. */
4365 break;
4366 default:
4367 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4368 break;
4369 }
4370 }
4371 }
4372
4373 return rc;
4374}
4375
4376/**
4377 * Internal. Find extent corresponding to the sector number in the disk.
4378 */
4379static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4380 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4381{
4382 PVMDKEXTENT pExtent = NULL;
4383 int rc = VINF_SUCCESS;
4384
4385 for (unsigned i = 0; i < pImage->cExtents; i++)
4386 {
4387 if (offSector < pImage->pExtents[i].cNominalSectors)
4388 {
4389 pExtent = &pImage->pExtents[i];
4390 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4391 break;
4392 }
4393 offSector -= pImage->pExtents[i].cNominalSectors;
4394 }
4395
4396 if (pExtent)
4397 *ppExtent = pExtent;
4398 else
4399 rc = VERR_IO_SECTOR_NOT_FOUND;
4400
4401 return rc;
4402}
4403
4404/**
4405 * Internal. Hash function for placing the grain table hash entries.
4406 */
4407static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4408 unsigned uExtent)
4409{
4410 /** @todo this hash function is quite simple, maybe use a better one which
4411 * scrambles the bits better. */
4412 return (uSector + uExtent) % pCache->cEntries;
4413}
4414
4415/**
4416 * Internal. Get sector number in the extent file from the relative sector
4417 * number in the extent.
4418 */
4419static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
4420 PVMDKEXTENT pExtent, uint64_t uSector,
4421 uint64_t *puExtentSector)
4422{
4423 PVMDKGTCACHE pCache = pImage->pGTCache;
4424 uint64_t uGDIndex, uGTSector, uGTBlock;
4425 uint32_t uGTHash, uGTBlockIndex;
4426 PVMDKGTCACHEENTRY pGTCacheEntry;
4427 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4428 int rc;
4429
4430 /* For newly created and readonly/sequentially opened streamOptimized
4431 * images this must be a no-op, as the grain directory is not there. */
4432 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
4433 && pExtent->uAppendPosition)
4434 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
4435 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
4436 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
4437 {
4438 *puExtentSector = 0;
4439 return VINF_SUCCESS;
4440 }
4441
4442 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4443 if (uGDIndex >= pExtent->cGDEntries)
4444 return VERR_OUT_OF_RANGE;
4445 uGTSector = pExtent->pGD[uGDIndex];
4446 if (!uGTSector)
4447 {
4448 /* There is no grain table referenced by this grain directory
4449 * entry. So there is absolutely no data in this area. */
4450 *puExtentSector = 0;
4451 return VINF_SUCCESS;
4452 }
4453
4454 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4455 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4456 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4457 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4458 || pGTCacheEntry->uGTBlock != uGTBlock)
4459 {
4460 /* Cache miss, fetch data from disk. */
4461 PVDMETAXFER pMetaXfer;
4462 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4463 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4464 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
4465 if (RT_FAILURE(rc))
4466 return rc;
4467 /* We can release the metadata transfer immediately. */
4468 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
4469 pGTCacheEntry->uExtent = pExtent->uExtent;
4470 pGTCacheEntry->uGTBlock = uGTBlock;
4471 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4472 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4473 }
4474 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4475 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4476 if (uGrainSector)
4477 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4478 else
4479 *puExtentSector = 0;
4480 return VINF_SUCCESS;
4481}
4482
4483/**
4484 * Internal. Writes the grain and also if necessary the grain tables.
4485 * Uses the grain table cache as a true grain table.
4486 */
4487static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4488 uint64_t uSector, PVDIOCTX pIoCtx,
4489 uint64_t cbWrite)
4490{
4491 uint32_t uGrain;
4492 uint32_t uGDEntry, uLastGDEntry;
4493 uint32_t cbGrain = 0;
4494 uint32_t uCacheLine, uCacheEntry;
4495 const void *pData;
4496 int rc;
4497
4498 /* Very strict requirements: always write at least one full grain, with
4499 * proper alignment. Everything else would require reading of already
4500 * written data, which we don't support for obvious reasons. The only
4501 * exception is the last grain, and only if the image size specifies
4502 * that only some portion holds data. In any case the write must be
4503 * within the image limits, no "overshoot" allowed. */
4504 if ( cbWrite == 0
4505 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
4506 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
4507 || uSector % pExtent->cSectorsPerGrain
4508 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
4509 return VERR_INVALID_PARAMETER;
4510
4511 /* Clip write range to at most the rest of the grain. */
4512 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
4513
4514 /* Do not allow to go back. */
4515 uGrain = uSector / pExtent->cSectorsPerGrain;
4516 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4517 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
4518 uGDEntry = uGrain / pExtent->cGTEntries;
4519 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
4520 if (uGrain < pExtent->uLastGrainAccess)
4521 return VERR_VD_VMDK_INVALID_WRITE;
4522
4523 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
4524 * to allocate something, we also need to detect the situation ourself. */
4525 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
4526 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
4527 return VINF_SUCCESS;
4528
4529 if (uGDEntry != uLastGDEntry)
4530 {
4531 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
4532 if (RT_FAILURE(rc))
4533 return rc;
4534 vmdkStreamClearGT(pImage, pExtent);
4535 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
4536 {
4537 rc = vmdkStreamFlushGT(pImage, pExtent, i);
4538 if (RT_FAILURE(rc))
4539 return rc;
4540 }
4541 }
4542
4543 uint64_t uFileOffset;
4544 uFileOffset = pExtent->uAppendPosition;
4545 if (!uFileOffset)
4546 return VERR_INTERNAL_ERROR;
4547 /* Align to sector, as the previous write could have been any size. */
4548 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4549
4550 /* Paranoia check: extent type, grain table buffer presence and
4551 * grain table buffer space. Also grain table entry must be clear. */
4552 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
4553 || !pImage->pGTCache
4554 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
4555 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
4556 return VERR_INTERNAL_ERROR;
4557
4558 /* Update grain table entry. */
4559 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
4560
4561 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
4562 {
4563 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
4564 memset((char *)pExtent->pvGrain + cbWrite, '\0',
4565 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
4566 pData = pExtent->pvGrain;
4567 }
4568 else
4569 {
4570 RTSGSEG Segment;
4571 unsigned cSegments = 1;
4572 size_t cbSeg = 0;
4573
4574 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
4575 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4576 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4577 pData = Segment.pvSeg;
4578 }
4579 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
4580 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
4581 uSector, &cbGrain);
4582 if (RT_FAILURE(rc))
4583 {
4584 pExtent->uGrainSectorAbs = 0;
4585 AssertRC(rc);
4586 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
4587 }
4588 pExtent->uLastGrainAccess = uGrain;
4589 pExtent->uAppendPosition += cbGrain;
4590
4591 return rc;
4592}
4593
4594/**
4595 * Internal: Updates the grain table during grain allocation.
4596 */
4597static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
4598 PVMDKGRAINALLOCASYNC pGrainAlloc)
4599{
4600 int rc = VINF_SUCCESS;
4601 PVMDKGTCACHE pCache = pImage->pGTCache;
4602 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4603 uint32_t uGTHash, uGTBlockIndex;
4604 uint64_t uGTSector, uRGTSector, uGTBlock;
4605 uint64_t uSector = pGrainAlloc->uSector;
4606 PVMDKGTCACHEENTRY pGTCacheEntry;
4607
4608 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
4609 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
4610
4611 uGTSector = pGrainAlloc->uGTSector;
4612 uRGTSector = pGrainAlloc->uRGTSector;
4613 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
4614
4615 /* Update the grain table (and the cache). */
4616 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4617 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4618 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4619 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4620 || pGTCacheEntry->uGTBlock != uGTBlock)
4621 {
4622 /* Cache miss, fetch data from disk. */
4623 LogFlow(("Cache miss, fetch data from disk\n"));
4624 PVDMETAXFER pMetaXfer = NULL;
4625 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4626 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4627 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4628 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
4629 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4630 {
4631 pGrainAlloc->cIoXfersPending++;
4632 pGrainAlloc->fGTUpdateNeeded = true;
4633 /* Leave early, we will be called again after the read completed. */
4634 LogFlowFunc(("Metadata read in progress, leaving\n"));
4635 return rc;
4636 }
4637 else if (RT_FAILURE(rc))
4638 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4639 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
4640 pGTCacheEntry->uExtent = pExtent->uExtent;
4641 pGTCacheEntry->uGTBlock = uGTBlock;
4642 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4643 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4644 }
4645 else
4646 {
4647 /* Cache hit. Convert grain table block back to disk format, otherwise
4648 * the code below will write garbage for all but the updated entry. */
4649 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4650 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4651 }
4652 pGrainAlloc->fGTUpdateNeeded = false;
4653 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4654 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
4655 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
4656 /* Update grain table on disk. */
4657 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4658 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4659 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4660 vmdkAllocGrainComplete, pGrainAlloc);
4661 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4662 pGrainAlloc->cIoXfersPending++;
4663 else if (RT_FAILURE(rc))
4664 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4665 if (pExtent->pRGD)
4666 {
4667 /* Update backup grain table on disk. */
4668 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4669 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4670 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4671 vmdkAllocGrainComplete, pGrainAlloc);
4672 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4673 pGrainAlloc->cIoXfersPending++;
4674 else if (RT_FAILURE(rc))
4675 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4676 }
4677
4678 LogFlowFunc(("leaving rc=%Rrc\n", rc));
4679 return rc;
4680}
4681
4682/**
4683 * Internal - complete the grain allocation by updating disk grain table if required.
4684 */
4685static int vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
4686{
4687 RT_NOREF1(rcReq);
4688 int rc = VINF_SUCCESS;
4689 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4690 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
4691
4692 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
4693 pBackendData, pIoCtx, pvUser, rcReq));
4694
4695 pGrainAlloc->cIoXfersPending--;
4696 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
4697 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
4698
4699 if (!pGrainAlloc->cIoXfersPending)
4700 {
4701 /* Grain allocation completed. */
4702 RTMemFree(pGrainAlloc);
4703 }
4704
4705 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
4706 return rc;
4707}
4708
4709/**
4710 * Internal. Allocates a new grain table (if necessary).
4711 */
4712static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
4713 uint64_t uSector, uint64_t cbWrite)
4714{
4715 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
4716 uint64_t uGDIndex, uGTSector, uRGTSector;
4717 uint64_t uFileOffset;
4718 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
4719 int rc;
4720
4721 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
4722 pCache, pExtent, pIoCtx, uSector, cbWrite));
4723
4724 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
4725 if (!pGrainAlloc)
4726 return VERR_NO_MEMORY;
4727
4728 pGrainAlloc->pExtent = pExtent;
4729 pGrainAlloc->uSector = uSector;
4730
4731 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4732 if (uGDIndex >= pExtent->cGDEntries)
4733 {
4734 RTMemFree(pGrainAlloc);
4735 return VERR_OUT_OF_RANGE;
4736 }
4737 uGTSector = pExtent->pGD[uGDIndex];
4738 if (pExtent->pRGD)
4739 uRGTSector = pExtent->pRGD[uGDIndex];
4740 else
4741 uRGTSector = 0; /**< avoid compiler warning */
4742 if (!uGTSector)
4743 {
4744 LogFlow(("Allocating new grain table\n"));
4745
4746 /* There is no grain table referenced by this grain directory
4747 * entry. So there is absolutely no data in this area. Allocate
4748 * a new grain table and put the reference to it in the GDs. */
4749 uFileOffset = pExtent->uAppendPosition;
4750 if (!uFileOffset)
4751 {
4752 RTMemFree(pGrainAlloc);
4753 return VERR_INTERNAL_ERROR;
4754 }
4755 Assert(!(uFileOffset % 512));
4756
4757 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4758 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
4759
4760 /* Normally the grain table is preallocated for hosted sparse extents
4761 * that support more than 32 bit sector numbers. So this shouldn't
4762 * ever happen on a valid extent. */
4763 if (uGTSector > UINT32_MAX)
4764 {
4765 RTMemFree(pGrainAlloc);
4766 return VERR_VD_VMDK_INVALID_HEADER;
4767 }
4768
4769 /* Write grain table by writing the required number of grain table
4770 * cache chunks. Allocate memory dynamically here or we flood the
4771 * metadata cache with very small entries. */
4772 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
4773 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
4774
4775 if (!paGTDataTmp)
4776 {
4777 RTMemFree(pGrainAlloc);
4778 return VERR_NO_MEMORY;
4779 }
4780
4781 memset(paGTDataTmp, '\0', cbGTDataTmp);
4782 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4783 VMDK_SECTOR2BYTE(uGTSector),
4784 paGTDataTmp, cbGTDataTmp, pIoCtx,
4785 vmdkAllocGrainComplete, pGrainAlloc);
4786 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4787 pGrainAlloc->cIoXfersPending++;
4788 else if (RT_FAILURE(rc))
4789 {
4790 RTMemTmpFree(paGTDataTmp);
4791 RTMemFree(pGrainAlloc);
4792 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4793 }
4794 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
4795 + cbGTDataTmp, 512);
4796
4797 if (pExtent->pRGD)
4798 {
4799 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4800 uFileOffset = pExtent->uAppendPosition;
4801 if (!uFileOffset)
4802 return VERR_INTERNAL_ERROR;
4803 Assert(!(uFileOffset % 512));
4804 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
4805
4806 /* Normally the redundant grain table is preallocated for hosted
4807 * sparse extents that support more than 32 bit sector numbers. So
4808 * this shouldn't ever happen on a valid extent. */
4809 if (uRGTSector > UINT32_MAX)
4810 {
4811 RTMemTmpFree(paGTDataTmp);
4812 return VERR_VD_VMDK_INVALID_HEADER;
4813 }
4814
4815 /* Write grain table by writing the required number of grain table
4816 * cache chunks. Allocate memory dynamically here or we flood the
4817 * metadata cache with very small entries. */
4818 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4819 VMDK_SECTOR2BYTE(uRGTSector),
4820 paGTDataTmp, cbGTDataTmp, pIoCtx,
4821 vmdkAllocGrainComplete, pGrainAlloc);
4822 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4823 pGrainAlloc->cIoXfersPending++;
4824 else if (RT_FAILURE(rc))
4825 {
4826 RTMemTmpFree(paGTDataTmp);
4827 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4828 }
4829
4830 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
4831 }
4832
4833 RTMemTmpFree(paGTDataTmp);
4834
4835 /* Update the grain directory on disk (doing it before writing the
4836 * grain table will result in a garbled extent if the operation is
4837 * aborted for some reason. Otherwise the worst that can happen is
4838 * some unused sectors in the extent. */
4839 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4840 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4841 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4842 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
4843 vmdkAllocGrainComplete, pGrainAlloc);
4844 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4845 pGrainAlloc->cIoXfersPending++;
4846 else if (RT_FAILURE(rc))
4847 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4848 if (pExtent->pRGD)
4849 {
4850 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4851 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4852 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
4853 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
4854 vmdkAllocGrainComplete, pGrainAlloc);
4855 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4856 pGrainAlloc->cIoXfersPending++;
4857 else if (RT_FAILURE(rc))
4858 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4859 }
4860
4861 /* As the final step update the in-memory copy of the GDs. */
4862 pExtent->pGD[uGDIndex] = uGTSector;
4863 if (pExtent->pRGD)
4864 pExtent->pRGD[uGDIndex] = uRGTSector;
4865 }
4866
4867 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
4868 pGrainAlloc->uGTSector = uGTSector;
4869 pGrainAlloc->uRGTSector = uRGTSector;
4870
4871 uFileOffset = pExtent->uAppendPosition;
4872 if (!uFileOffset)
4873 return VERR_INTERNAL_ERROR;
4874 Assert(!(uFileOffset % 512));
4875
4876 pGrainAlloc->uGrainOffset = uFileOffset;
4877
4878 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4879 {
4880 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
4881 ("Accesses to stream optimized images must be synchronous\n"),
4882 VERR_INVALID_STATE);
4883
4884 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
4885 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
4886
4887 /* Invalidate cache, just in case some code incorrectly allows mixing
4888 * of reads and writes. Normally shouldn't be needed. */
4889 pExtent->uGrainSectorAbs = 0;
4890
4891 /* Write compressed data block and the markers. */
4892 uint32_t cbGrain = 0;
4893 size_t cbSeg = 0;
4894 RTSGSEG Segment;
4895 unsigned cSegments = 1;
4896
4897 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
4898 &cSegments, cbWrite);
4899 Assert(cbSeg == cbWrite);
4900
4901 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
4902 Segment.pvSeg, cbWrite, uSector, &cbGrain);
4903 if (RT_FAILURE(rc))
4904 {
4905 AssertRC(rc);
4906 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4907 }
4908 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
4909 pExtent->uAppendPosition += cbGrain;
4910 }
4911 else
4912 {
4913 /* Write the data. Always a full grain, or we're in big trouble. */
4914 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
4915 uFileOffset, pIoCtx, cbWrite,
4916 vmdkAllocGrainComplete, pGrainAlloc);
4917 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4918 pGrainAlloc->cIoXfersPending++;
4919 else if (RT_FAILURE(rc))
4920 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4921
4922 pExtent->uAppendPosition += cbWrite;
4923 }
4924
4925 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
4926
4927 if (!pGrainAlloc->cIoXfersPending)
4928 {
4929 /* Grain allocation completed. */
4930 RTMemFree(pGrainAlloc);
4931 }
4932
4933 LogFlowFunc(("leaving rc=%Rrc\n", rc));
4934
4935 return rc;
4936}
4937
4938/**
4939 * Internal. Reads the contents by sequentially going over the compressed
4940 * grains (hoping that they are in sequence).
4941 */
4942static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4943 uint64_t uSector, PVDIOCTX pIoCtx,
4944 uint64_t cbRead)
4945{
4946 int rc;
4947
4948 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
4949 pImage, pExtent, uSector, pIoCtx, cbRead));
4950
4951 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
4952 ("Async I/O not supported for sequential stream optimized images\n"),
4953 VERR_INVALID_STATE);
4954
4955 /* Do not allow to go back. */
4956 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
4957 if (uGrain < pExtent->uLastGrainAccess)
4958 return VERR_VD_VMDK_INVALID_STATE;
4959 pExtent->uLastGrainAccess = uGrain;
4960
4961 /* After a previous error do not attempt to recover, as it would need
4962 * seeking (in the general case backwards which is forbidden). */
4963 if (!pExtent->uGrainSectorAbs)
4964 return VERR_VD_VMDK_INVALID_STATE;
4965
4966 /* Check if we need to read something from the image or if what we have
4967 * in the buffer is good to fulfill the request. */
4968 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
4969 {
4970 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
4971 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
4972
4973 /* Get the marker from the next data block - and skip everything which
4974 * is not a compressed grain. If it's a compressed grain which is for
4975 * the requested sector (or after), read it. */
4976 VMDKMARKER Marker;
4977 do
4978 {
4979 RT_ZERO(Marker);
4980 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
4981 VMDK_SECTOR2BYTE(uGrainSectorAbs),
4982 &Marker, RT_OFFSETOF(VMDKMARKER, uType));
4983 if (RT_FAILURE(rc))
4984 return rc;
4985 Marker.uSector = RT_LE2H_U64(Marker.uSector);
4986 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
4987
4988 if (Marker.cbSize == 0)
4989 {
4990 /* A marker for something else than a compressed grain. */
4991 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
4992 VMDK_SECTOR2BYTE(uGrainSectorAbs)
4993 + RT_OFFSETOF(VMDKMARKER, uType),
4994 &Marker.uType, sizeof(Marker.uType));
4995 if (RT_FAILURE(rc))
4996 return rc;
4997 Marker.uType = RT_LE2H_U32(Marker.uType);
4998 switch (Marker.uType)
4999 {
5000 case VMDK_MARKER_EOS:
5001 uGrainSectorAbs++;
5002 /* Read (or mostly skip) to the end of file. Uses the
5003 * Marker (LBA sector) as it is unused anyway. This
5004 * makes sure that really everything is read in the
5005 * success case. If this read fails it means the image
5006 * is truncated, but this is harmless so ignore. */
5007 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
5008 VMDK_SECTOR2BYTE(uGrainSectorAbs)
5009 + 511,
5010 &Marker.uSector, 1);
5011 break;
5012 case VMDK_MARKER_GT:
5013 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
5014 break;
5015 case VMDK_MARKER_GD:
5016 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
5017 break;
5018 case VMDK_MARKER_FOOTER:
5019 uGrainSectorAbs += 2;
5020 break;
5021 case VMDK_MARKER_UNSPECIFIED:
5022 /* Skip over the contents of the unspecified marker
5023 * type 4 which exists in some vSphere created files. */
5024 /** @todo figure out what the payload means. */
5025 uGrainSectorAbs += 1;
5026 break;
5027 default:
5028 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
5029 pExtent->uGrainSectorAbs = 0;
5030 return VERR_VD_VMDK_INVALID_STATE;
5031 }
5032 pExtent->cbGrainStreamRead = 0;
5033 }
5034 else
5035 {
5036 /* A compressed grain marker. If it is at/after what we're
5037 * interested in read and decompress data. */
5038 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
5039 {
5040 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_OFFSETOF(VMDKMARKER, uType), 512));
5041 continue;
5042 }
5043 uint64_t uLBA = 0;
5044 uint32_t cbGrainStreamRead = 0;
5045 rc = vmdkFileInflateSync(pImage, pExtent,
5046 VMDK_SECTOR2BYTE(uGrainSectorAbs),
5047 pExtent->pvGrain,
5048 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5049 &Marker, &uLBA, &cbGrainStreamRead);
5050 if (RT_FAILURE(rc))
5051 {
5052 pExtent->uGrainSectorAbs = 0;
5053 return rc;
5054 }
5055 if ( pExtent->uGrain
5056 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
5057 {
5058 pExtent->uGrainSectorAbs = 0;
5059 return VERR_VD_VMDK_INVALID_STATE;
5060 }
5061 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
5062 pExtent->cbGrainStreamRead = cbGrainStreamRead;
5063 break;
5064 }
5065 } while (Marker.uType != VMDK_MARKER_EOS);
5066
5067 pExtent->uGrainSectorAbs = uGrainSectorAbs;
5068
5069 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
5070 {
5071 pExtent->uGrain = UINT32_MAX;
5072 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
5073 * the next read would try to get more data, and we're at EOF. */
5074 pExtent->cbGrainStreamRead = 1;
5075 }
5076 }
5077
5078 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
5079 {
5080 /* The next data block we have is not for this area, so just return
5081 * that there is no data. */
5082 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
5083 return VERR_VD_BLOCK_FREE;
5084 }
5085
5086 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
5087 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
5088 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
5089 cbRead);
5090 LogFlowFunc(("returns VINF_SUCCESS\n"));
5091 return VINF_SUCCESS;
5092}
5093
5094/**
5095 * Replaces a fragment of a string with the specified string.
5096 *
5097 * @returns Pointer to the allocated UTF-8 string.
5098 * @param pszWhere UTF-8 string to search in.
5099 * @param pszWhat UTF-8 string to search for.
5100 * @param pszByWhat UTF-8 string to replace the found string with.
5101 */
5102static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
5103 const char *pszByWhat)
5104{
5105 AssertPtr(pszWhere);
5106 AssertPtr(pszWhat);
5107 AssertPtr(pszByWhat);
5108 const char *pszFoundStr = strstr(pszWhere, pszWhat);
5109 if (!pszFoundStr)
5110 return NULL;
5111 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
5112 char *pszNewStr = (char *)RTMemAlloc(cFinal);
5113 if (pszNewStr)
5114 {
5115 char *pszTmp = pszNewStr;
5116 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
5117 pszTmp += pszFoundStr - pszWhere;
5118 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
5119 pszTmp += strlen(pszByWhat);
5120 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
5121 }
5122 return pszNewStr;
5123}
5124
5125
5126/** @copydoc VDIMAGEBACKEND::pfnProbe */
5127static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
5128 PVDINTERFACE pVDIfsImage, VDTYPE *penmType)
5129{
5130 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
5131 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
5132
5133 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
5134
5135 int rc = VINF_SUCCESS;
5136 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5137 if (RT_LIKELY(pImage))
5138 {
5139 pImage->pszFilename = pszFilename;
5140 pImage->pFile = NULL;
5141 pImage->pExtents = NULL;
5142 pImage->pFiles = NULL;
5143 pImage->pGTCache = NULL;
5144 pImage->pDescData = NULL;
5145 pImage->pVDIfsDisk = pVDIfsDisk;
5146 pImage->pVDIfsImage = pVDIfsImage;
5147 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
5148 * much as possible in vmdkOpenImage. */
5149 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
5150 vmdkFreeImage(pImage, false);
5151 RTMemFree(pImage);
5152
5153 if (RT_SUCCESS(rc))
5154 *penmType = VDTYPE_HDD;
5155 }
5156 else
5157 rc = VERR_NO_MEMORY;
5158
5159 LogFlowFunc(("returns %Rrc\n", rc));
5160 return rc;
5161}
5162
5163/** @copydoc VDIMAGEBACKEND::pfnOpen */
5164static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
5165 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5166 VDTYPE enmType, void **ppBackendData)
5167{
5168 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
5169
5170 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
5171 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
5172 int rc;
5173
5174 /* Check open flags. All valid flags are supported. */
5175 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
5176 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
5177
5178 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5179 if (RT_LIKELY(pImage))
5180 {
5181 pImage->pszFilename = pszFilename;
5182 pImage->pFile = NULL;
5183 pImage->pExtents = NULL;
5184 pImage->pFiles = NULL;
5185 pImage->pGTCache = NULL;
5186 pImage->pDescData = NULL;
5187 pImage->pVDIfsDisk = pVDIfsDisk;
5188 pImage->pVDIfsImage = pVDIfsImage;
5189
5190 rc = vmdkOpenImage(pImage, uOpenFlags);
5191 if (RT_SUCCESS(rc))
5192 *ppBackendData = pImage;
5193 else
5194 RTMemFree(pImage);
5195 }
5196 else
5197 rc = VERR_NO_MEMORY;
5198
5199 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5200 return rc;
5201}
5202
5203/** @copydoc VDIMAGEBACKEND::pfnCreate */
5204static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
5205 unsigned uImageFlags, const char *pszComment,
5206 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
5207 PCRTUUID pUuid, unsigned uOpenFlags,
5208 unsigned uPercentStart, unsigned uPercentSpan,
5209 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5210 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
5211 void **ppBackendData)
5212{
5213 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
5214 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
5215 int rc;
5216
5217 /* Check the VD container type and image flags. */
5218 if ( enmType != VDTYPE_HDD
5219 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
5220 return VERR_VD_INVALID_TYPE;
5221
5222 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
5223 if ( !cbSize
5224 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K))
5225 return VERR_VD_INVALID_SIZE;
5226
5227 /* Check image flags for invalid combinations. */
5228 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5229 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
5230 return VERR_INVALID_PARAMETER;
5231
5232 /* Check open flags. All valid flags are supported. */
5233 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
5234 AssertReturn( VALID_PTR(pszFilename)
5235 && *pszFilename
5236 && VALID_PTR(pPCHSGeometry)
5237 && VALID_PTR(pLCHSGeometry), VERR_INVALID_PARAMETER);
5238
5239 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5240 if (RT_LIKELY(pImage))
5241 {
5242 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
5243
5244 pImage->pszFilename = pszFilename;
5245 pImage->pFile = NULL;
5246 pImage->pExtents = NULL;
5247 pImage->pFiles = NULL;
5248 pImage->pGTCache = NULL;
5249 pImage->pDescData = NULL;
5250 pImage->pVDIfsDisk = pVDIfsDisk;
5251 pImage->pVDIfsImage = pVDIfsImage;
5252 /* Descriptors for split images can be pretty large, especially if the
5253 * filename is long. So prepare for the worst, and allocate quite some
5254 * memory for the descriptor in this case. */
5255 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5256 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
5257 else
5258 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
5259 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
5260 if (RT_LIKELY(pImage->pDescData))
5261 {
5262 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
5263 pPCHSGeometry, pLCHSGeometry, pUuid,
5264 pIfProgress, uPercentStart, uPercentSpan);
5265 if (RT_SUCCESS(rc))
5266 {
5267 /* So far the image is opened in read/write mode. Make sure the
5268 * image is opened in read-only mode if the caller requested that. */
5269 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
5270 {
5271 vmdkFreeImage(pImage, false);
5272 rc = vmdkOpenImage(pImage, uOpenFlags);
5273 }
5274
5275 if (RT_SUCCESS(rc))
5276 *ppBackendData = pImage;
5277 }
5278
5279 if (RT_FAILURE(rc))
5280 RTMemFree(pImage->pDescData);
5281 }
5282 else
5283 rc = VERR_NO_MEMORY;
5284
5285 if (RT_FAILURE(rc))
5286 RTMemFree(pImage);
5287 }
5288 else
5289 rc = VERR_NO_MEMORY;
5290
5291 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5292 return rc;
5293}
5294
5295/**
5296 * Prepares the state for renaming a VMDK image, setting up the state and allocating
5297 * memory.
5298 *
5299 * @returns VBox status code.
5300 * @param pImage VMDK image instance.
5301 * @param pRenameState The state to initialize.
5302 * @param pszFilename The new filename.
5303 */
5304static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
5305{
5306 int rc = VINF_SUCCESS;
5307
5308 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
5309
5310 /*
5311 * Allocate an array to store both old and new names of renamed files
5312 * in case we have to roll back the changes. Arrays are initialized
5313 * with zeros. We actually save stuff when and if we change it.
5314 */
5315 pRenameState->cExtents = pImage->cExtents;
5316 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char*));
5317 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char*));
5318 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char*));
5319 if ( pRenameState->apszOldName
5320 && pRenameState->apszNewName
5321 && pRenameState->apszNewLines)
5322 {
5323 /* Save the descriptor size and position. */
5324 if (pImage->pDescData)
5325 {
5326 /* Separate descriptor file. */
5327 pRenameState->fEmbeddedDesc = false;
5328 }
5329 else
5330 {
5331 /* Embedded descriptor file. */
5332 pRenameState->ExtentCopy = pImage->pExtents[0];
5333 pRenameState->fEmbeddedDesc = true;
5334 }
5335
5336 /* Save the descriptor content. */
5337 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
5338 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
5339 {
5340 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
5341 if (!pRenameState->DescriptorCopy.aLines[i])
5342 {
5343 rc = VERR_NO_MEMORY;
5344 break;
5345 }
5346 }
5347
5348 if (RT_SUCCESS(rc))
5349 {
5350 /* Prepare both old and new base names used for string replacement. */
5351 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
5352 RTPathStripSuffix(pRenameState->pszNewBaseName);
5353 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
5354 RTPathStripSuffix(pRenameState->pszOldBaseName);
5355 /* Prepare both old and new full names used for string replacement. */
5356 pRenameState->pszNewFullName = RTStrDup(pszFilename);
5357 RTPathStripSuffix(pRenameState->pszNewFullName);
5358 pRenameState->pszOldFullName = RTStrDup(pImage->pszFilename);
5359 RTPathStripSuffix(pRenameState->pszOldFullName);
5360
5361 /* Save the old name for easy access to the old descriptor file. */
5362 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
5363 /* Save old image name. */
5364 pRenameState->pszOldImageName = pImage->pszFilename;
5365 }
5366 }
5367 else
5368 rc = VERR_NO_MEMORY;
5369
5370 return rc;
5371}
5372
5373/**
5374 * Destroys the given rename state, freeing all allocated memory.
5375 *
5376 * @returns nothing.
5377 * @param pRenameState The rename state to destroy.
5378 */
5379static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
5380{
5381 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
5382 if (pRenameState->DescriptorCopy.aLines[i])
5383 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
5384 if (pRenameState->apszOldName)
5385 {
5386 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
5387 if (pRenameState->apszOldName[i])
5388 RTStrFree(pRenameState->apszOldName[i]);
5389 RTMemTmpFree(pRenameState->apszOldName);
5390 }
5391 if (pRenameState->apszNewName)
5392 {
5393 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
5394 if (pRenameState->apszNewName[i])
5395 RTStrFree(pRenameState->apszNewName[i]);
5396 RTMemTmpFree(pRenameState->apszNewName);
5397 }
5398 if (pRenameState->apszNewLines)
5399 {
5400 for (unsigned i = 0; i < pRenameState->cExtents; i++)
5401 if (pRenameState->apszNewLines[i])
5402 RTStrFree(pRenameState->apszNewLines[i]);
5403 RTMemTmpFree(pRenameState->apszNewLines);
5404 }
5405 if (pRenameState->pszOldDescName)
5406 RTStrFree(pRenameState->pszOldDescName);
5407 if (pRenameState->pszOldBaseName)
5408 RTStrFree(pRenameState->pszOldBaseName);
5409 if (pRenameState->pszNewBaseName)
5410 RTStrFree(pRenameState->pszNewBaseName);
5411 if (pRenameState->pszOldFullName)
5412 RTStrFree(pRenameState->pszOldFullName);
5413 if (pRenameState->pszNewFullName)
5414 RTStrFree(pRenameState->pszNewFullName);
5415}
5416
5417/**
5418 * Rolls back the rename operation to the original state.
5419 *
5420 * @returns VBox status code.
5421 * @param pImage VMDK image instance.
5422 * @param pRenameState The rename state.
5423 */
5424static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
5425{
5426 int rc = VINF_SUCCESS;
5427
5428 if (!pRenameState->fImageFreed)
5429 {
5430 /*
5431 * Some extents may have been closed, close the rest. We will
5432 * re-open the whole thing later.
5433 */
5434 vmdkFreeImage(pImage, false);
5435 }
5436
5437 /* Rename files back. */
5438 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
5439 {
5440 if (pRenameState->apszOldName[i])
5441 {
5442 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
5443 AssertRC(rc);
5444 }
5445 }
5446 /* Restore the old descriptor. */
5447 PVMDKFILE pFile;
5448 rc = vmdkFileOpen(pImage, &pFile, pRenameState->pszOldDescName,
5449 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
5450 false /* fCreate */));
5451 AssertRC(rc);
5452 if (pRenameState->fEmbeddedDesc)
5453 {
5454 pRenameState->ExtentCopy.pFile = pFile;
5455 pImage->pExtents = &pRenameState->ExtentCopy;
5456 }
5457 else
5458 {
5459 /* Shouldn't be null for separate descriptor.
5460 * There will be no access to the actual content.
5461 */
5462 pImage->pDescData = pRenameState->pszOldDescName;
5463 pImage->pFile = pFile;
5464 }
5465 pImage->Descriptor = pRenameState->DescriptorCopy;
5466 vmdkWriteDescriptor(pImage, NULL);
5467 vmdkFileClose(pImage, &pFile, false);
5468 /* Get rid of the stuff we implanted. */
5469 pImage->pExtents = NULL;
5470 pImage->pFile = NULL;
5471 pImage->pDescData = NULL;
5472 /* Re-open the image back. */
5473 pImage->pszFilename = pRenameState->pszOldImageName;
5474 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5475
5476 return rc;
5477}
5478
5479/**
5480 * Rename worker doing the real work.
5481 *
5482 * @returns VBox status code.
5483 * @param pImage VMDK image instance.
5484 * @param pRenameState The rename state.
5485 * @param pszFilename The new filename.
5486 */
5487static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
5488{
5489 int rc = VINF_SUCCESS;
5490 unsigned i, line;
5491
5492 /* Update the descriptor with modified extent names. */
5493 for (i = 0, line = pImage->Descriptor.uFirstExtent;
5494 i < pRenameState->cExtents;
5495 i++, line = pImage->Descriptor.aNextLines[line])
5496 {
5497 /* Update the descriptor. */
5498 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
5499 pRenameState->pszOldBaseName,
5500 pRenameState->pszNewBaseName);
5501 if (!pRenameState->apszNewLines[i])
5502 {
5503 rc = VERR_NO_MEMORY;
5504 break;
5505 }
5506 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
5507 }
5508
5509 if (RT_SUCCESS(rc))
5510 {
5511 /* Make sure the descriptor gets written back. */
5512 pImage->Descriptor.fDirty = true;
5513 /* Flush the descriptor now, in case it is embedded. */
5514 vmdkFlushImage(pImage, NULL);
5515
5516 /* Close and rename/move extents. */
5517 for (i = 0; i < pRenameState->cExtents; i++)
5518 {
5519 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5520 /* Compose new name for the extent. */
5521 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
5522 pRenameState->pszOldFullName,
5523 pRenameState->pszNewFullName);
5524 if (!pRenameState->apszNewName[i])
5525 {
5526 rc = VERR_NO_MEMORY;
5527 break;
5528 }
5529 /* Close the extent file. */
5530 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
5531 if (RT_FAILURE(rc))
5532 break;;
5533
5534 /* Rename the extent file. */
5535 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
5536 if (RT_FAILURE(rc))
5537 break;
5538 /* Remember the old name. */
5539 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
5540 }
5541
5542 if (RT_SUCCESS(rc))
5543 {
5544 /* Release all old stuff. */
5545 rc = vmdkFreeImage(pImage, false);
5546 if (RT_SUCCESS(rc))
5547 {
5548 pRenameState->fImageFreed = true;
5549
5550 /* Last elements of new/old name arrays are intended for
5551 * storing descriptor's names.
5552 */
5553 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
5554 /* Rename the descriptor file if it's separate. */
5555 if (!pRenameState->fEmbeddedDesc)
5556 {
5557 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
5558 if (RT_SUCCESS(rc))
5559 {
5560 /* Save old name only if we may need to change it back. */
5561 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
5562 }
5563 }
5564
5565 /* Update pImage with the new information. */
5566 pImage->pszFilename = pszFilename;
5567
5568 /* Open the new image. */
5569 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5570 }
5571 }
5572 }
5573
5574 return rc;
5575}
5576
5577/** @copydoc VDIMAGEBACKEND::pfnRename */
5578static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
5579{
5580 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
5581
5582 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5583 VMDKRENAMESTATE RenameState;
5584
5585 memset(&RenameState, 0, sizeof(RenameState));
5586
5587 /* Check arguments. */
5588 AssertReturn(( !pImage
5589 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5590 || !VALID_PTR(pszFilename)
5591 || !*pszFilename), VERR_INVALID_PARAMETER);
5592
5593 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
5594 if (RT_SUCCESS(rc))
5595 {
5596 /* --- Up to this point we have not done any damage yet. --- */
5597
5598 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
5599 /* Roll back all changes in case of failure. */
5600 if (RT_FAILURE(rc))
5601 {
5602 int rrc = vmdkRenameRollback(pImage, &RenameState);
5603 AssertRC(rrc);
5604 }
5605 }
5606
5607 vmdkRenameStateDestroy(&RenameState);
5608 LogFlowFunc(("returns %Rrc\n", rc));
5609 return rc;
5610}
5611
5612/** @copydoc VDIMAGEBACKEND::pfnClose */
5613static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
5614{
5615 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
5616 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5617
5618 int rc = vmdkFreeImage(pImage, fDelete);
5619 RTMemFree(pImage);
5620
5621 LogFlowFunc(("returns %Rrc\n", rc));
5622 return rc;
5623}
5624
5625/** @copydoc VDIMAGEBACKEND::pfnRead */
5626static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
5627 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
5628{
5629 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
5630 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
5631 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5632
5633 AssertPtr(pImage);
5634 Assert(uOffset % 512 == 0);
5635 Assert(cbToRead % 512 == 0);
5636 AssertReturn((VALID_PTR(pIoCtx) && cbToRead), VERR_INVALID_PARAMETER);
5637 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
5638
5639 /* Find the extent and check access permissions as defined in the extent descriptor. */
5640 PVMDKEXTENT pExtent;
5641 uint64_t uSectorExtentRel;
5642 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5643 &pExtent, &uSectorExtentRel);
5644 if ( RT_SUCCESS(rc)
5645 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
5646 {
5647 /* Clip read range to remain in this extent. */
5648 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5649
5650 /* Handle the read according to the current extent type. */
5651 switch (pExtent->enmType)
5652 {
5653 case VMDKETYPE_HOSTED_SPARSE:
5654 {
5655 uint64_t uSectorExtentAbs;
5656
5657 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
5658 if (RT_FAILURE(rc))
5659 break;
5660 /* Clip read range to at most the rest of the grain. */
5661 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5662 Assert(!(cbToRead % 512));
5663 if (uSectorExtentAbs == 0)
5664 {
5665 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5666 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5667 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
5668 rc = VERR_VD_BLOCK_FREE;
5669 else
5670 rc = vmdkStreamReadSequential(pImage, pExtent,
5671 uSectorExtentRel,
5672 pIoCtx, cbToRead);
5673 }
5674 else
5675 {
5676 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5677 {
5678 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
5679 ("Async I/O is not supported for stream optimized VMDK's\n"));
5680
5681 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5682 uSectorExtentAbs -= uSectorInGrain;
5683 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
5684 {
5685 uint64_t uLBA = 0; /* gcc maybe uninitialized */
5686 rc = vmdkFileInflateSync(pImage, pExtent,
5687 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5688 pExtent->pvGrain,
5689 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5690 NULL, &uLBA, NULL);
5691 if (RT_FAILURE(rc))
5692 {
5693 pExtent->uGrainSectorAbs = 0;
5694 break;
5695 }
5696 pExtent->uGrainSectorAbs = uSectorExtentAbs;
5697 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
5698 Assert(uLBA == uSectorExtentRel);
5699 }
5700 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
5701 (uint8_t *)pExtent->pvGrain
5702 + VMDK_SECTOR2BYTE(uSectorInGrain),
5703 cbToRead);
5704 }
5705 else
5706 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
5707 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5708 pIoCtx, cbToRead);
5709 }
5710 break;
5711 }
5712 case VMDKETYPE_VMFS:
5713 case VMDKETYPE_FLAT:
5714 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
5715 VMDK_SECTOR2BYTE(uSectorExtentRel),
5716 pIoCtx, cbToRead);
5717 break;
5718 case VMDKETYPE_ZERO:
5719 {
5720 size_t cbSet;
5721
5722 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
5723 Assert(cbSet == cbToRead);
5724 break;
5725 }
5726 }
5727 if (pcbActuallyRead)
5728 *pcbActuallyRead = cbToRead;
5729 }
5730 else if (RT_SUCCESS(rc))
5731 rc = VERR_VD_VMDK_INVALID_STATE;
5732
5733 LogFlowFunc(("returns %Rrc\n", rc));
5734 return rc;
5735}
5736
5737/** @copydoc VDIMAGEBACKEND::pfnWrite */
5738static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
5739 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
5740 size_t *pcbPostRead, unsigned fWrite)
5741{
5742 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
5743 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5744 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5745 int rc;
5746
5747 AssertPtr(pImage);
5748 Assert(uOffset % 512 == 0);
5749 Assert(cbToWrite % 512 == 0);
5750 AssertReturn((VALID_PTR(pIoCtx) && cbToWrite), VERR_INVALID_PARAMETER);
5751
5752 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5753 {
5754 PVMDKEXTENT pExtent;
5755 uint64_t uSectorExtentRel;
5756 uint64_t uSectorExtentAbs;
5757
5758 /* No size check here, will do that later when the extent is located.
5759 * There are sparse images out there which according to the spec are
5760 * invalid, because the total size is not a multiple of the grain size.
5761 * Also for sparse images which are stitched together in odd ways (not at
5762 * grain boundaries, and with the nominal size not being a multiple of the
5763 * grain size), this would prevent writing to the last grain. */
5764
5765 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5766 &pExtent, &uSectorExtentRel);
5767 if (RT_SUCCESS(rc))
5768 {
5769 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
5770 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5771 && !pImage->pExtents[0].uAppendPosition
5772 && pExtent->enmAccess != VMDKACCESS_READONLY))
5773 rc = VERR_VD_VMDK_INVALID_STATE;
5774 else
5775 {
5776 /* Handle the write according to the current extent type. */
5777 switch (pExtent->enmType)
5778 {
5779 case VMDKETYPE_HOSTED_SPARSE:
5780 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
5781 if (RT_SUCCESS(rc))
5782 {
5783 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5784 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
5785 rc = VERR_VD_VMDK_INVALID_WRITE;
5786 else
5787 {
5788 /* Clip write range to at most the rest of the grain. */
5789 cbToWrite = RT_MIN(cbToWrite,
5790 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
5791 - uSectorExtentRel % pExtent->cSectorsPerGrain));
5792 if (uSectorExtentAbs == 0)
5793 {
5794 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
5795 {
5796 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5797 {
5798 /* Full block write to a previously unallocated block.
5799 * Check if the caller wants to avoid the automatic alloc. */
5800 if (!(fWrite & VD_WRITE_NO_ALLOC))
5801 {
5802 /* Allocate GT and find out where to store the grain. */
5803 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
5804 uSectorExtentRel, cbToWrite);
5805 }
5806 else
5807 rc = VERR_VD_BLOCK_FREE;
5808 *pcbPreRead = 0;
5809 *pcbPostRead = 0;
5810 }
5811 else
5812 {
5813 /* Clip write range to remain in this extent. */
5814 cbToWrite = RT_MIN(cbToWrite,
5815 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
5816 + pExtent->cNominalSectors - uSectorExtentRel));
5817 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
5818 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
5819 rc = VERR_VD_BLOCK_FREE;
5820 }
5821 }
5822 else
5823 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
5824 pIoCtx, cbToWrite);
5825 }
5826 else
5827 {
5828 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5829 {
5830 /* A partial write to a streamOptimized image is simply
5831 * invalid. It requires rewriting already compressed data
5832 * which is somewhere between expensive and impossible. */
5833 rc = VERR_VD_VMDK_INVALID_STATE;
5834 pExtent->uGrainSectorAbs = 0;
5835 AssertRC(rc);
5836 }
5837 else
5838 {
5839 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
5840 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
5841 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5842 pIoCtx, cbToWrite, NULL, NULL);
5843 }
5844 }
5845 }
5846 }
5847 break;
5848 case VMDKETYPE_VMFS:
5849 case VMDKETYPE_FLAT:
5850 /* Clip write range to remain in this extent. */
5851 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5852 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
5853 VMDK_SECTOR2BYTE(uSectorExtentRel),
5854 pIoCtx, cbToWrite, NULL, NULL);
5855 break;
5856 case VMDKETYPE_ZERO:
5857 /* Clip write range to remain in this extent. */
5858 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5859 break;
5860 }
5861 }
5862
5863 if (pcbWriteProcess)
5864 *pcbWriteProcess = cbToWrite;
5865 }
5866 }
5867 else
5868 rc = VERR_VD_IMAGE_READ_ONLY;
5869
5870 LogFlowFunc(("returns %Rrc\n", rc));
5871 return rc;
5872}
5873
5874/** @copydoc VDIMAGEBACKEND::pfnFlush */
5875static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
5876{
5877 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5878
5879 return vmdkFlushImage(pImage, pIoCtx);
5880}
5881
5882/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
5883static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
5884{
5885 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5886 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5887
5888 AssertPtrReturn(pImage, 0);
5889
5890 return VMDK_IMAGE_VERSION;
5891}
5892
5893/** @copydoc VDIMAGEBACKEND::pfnGetSectorSize */
5894static DECLCALLBACK(uint32_t) vmdkGetSectorSize(void *pBackendData)
5895{
5896 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5897 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5898
5899 AssertPtrReturn(pImage, 0);
5900
5901 return 512;
5902}
5903
5904/** @copydoc VDIMAGEBACKEND::pfnGetSize */
5905static DECLCALLBACK(uint64_t) vmdkGetSize(void *pBackendData)
5906{
5907 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5908 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5909
5910 AssertPtrReturn(pImage, 0);
5911
5912 return pImage->cbSize;
5913}
5914
5915/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
5916static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
5917{
5918 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5919 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5920 uint64_t cb = 0;
5921
5922 AssertPtrReturn(pImage, 0);
5923
5924 if (pImage->pFile != NULL)
5925 {
5926 uint64_t cbFile;
5927 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
5928 if (RT_SUCCESS(rc))
5929 cb += cbFile;
5930 }
5931 for (unsigned i = 0; i < pImage->cExtents; i++)
5932 {
5933 if (pImage->pExtents[i].pFile != NULL)
5934 {
5935 uint64_t cbFile;
5936 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
5937 if (RT_SUCCESS(rc))
5938 cb += cbFile;
5939 }
5940 }
5941
5942 LogFlowFunc(("returns %lld\n", cb));
5943 return cb;
5944}
5945
5946/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
5947static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
5948{
5949 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
5950 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5951 int rc = VINF_SUCCESS;
5952
5953 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
5954
5955 if (pImage->PCHSGeometry.cCylinders)
5956 *pPCHSGeometry = pImage->PCHSGeometry;
5957 else
5958 rc = VERR_VD_GEOMETRY_NOT_SET;
5959
5960 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5961 return rc;
5962}
5963
5964/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
5965static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
5966{
5967 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
5968 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5969 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5970 int rc = VINF_SUCCESS;
5971
5972 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
5973
5974 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5975 {
5976 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
5977 {
5978 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5979 if (RT_SUCCESS(rc))
5980 pImage->PCHSGeometry = *pPCHSGeometry;
5981 }
5982 else
5983 rc = VERR_NOT_SUPPORTED;
5984 }
5985 else
5986 rc = VERR_VD_IMAGE_READ_ONLY;
5987
5988 LogFlowFunc(("returns %Rrc\n", rc));
5989 return rc;
5990}
5991
5992/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
5993static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
5994{
5995 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
5996 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5997 int rc = VINF_SUCCESS;
5998
5999 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6000
6001 if (pImage->LCHSGeometry.cCylinders)
6002 *pLCHSGeometry = pImage->LCHSGeometry;
6003 else
6004 rc = VERR_VD_GEOMETRY_NOT_SET;
6005
6006 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6007 return rc;
6008}
6009
6010/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
6011static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
6012{
6013 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
6014 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6015 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6016 int rc = VINF_SUCCESS;
6017
6018 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6019
6020 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6021 {
6022 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6023 {
6024 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
6025 if (RT_SUCCESS(rc))
6026 pImage->LCHSGeometry = *pLCHSGeometry;
6027 }
6028 else
6029 rc = VERR_NOT_SUPPORTED;
6030 }
6031 else
6032 rc = VERR_VD_IMAGE_READ_ONLY;
6033
6034 LogFlowFunc(("returns %Rrc\n", rc));
6035 return rc;
6036}
6037
6038/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
6039static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
6040{
6041 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6042 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6043
6044 AssertPtrReturn(pImage, 0);
6045
6046 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
6047 return pImage->uImageFlags;
6048}
6049
6050/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
6051static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
6052{
6053 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6054 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6055
6056 AssertPtrReturn(pImage, 0);
6057
6058 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
6059 return pImage->uOpenFlags;
6060}
6061
6062/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
6063static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
6064{
6065 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
6066 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6067 int rc;
6068
6069 /* Image must be opened and the new flags must be valid. */
6070 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
6071 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
6072 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
6073 rc = VERR_INVALID_PARAMETER;
6074 else
6075 {
6076 /* StreamOptimized images need special treatment: reopen is prohibited. */
6077 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6078 {
6079 if (pImage->uOpenFlags == uOpenFlags)
6080 rc = VINF_SUCCESS;
6081 else
6082 rc = VERR_INVALID_PARAMETER;
6083 }
6084 else
6085 {
6086 /* Implement this operation via reopening the image. */
6087 vmdkFreeImage(pImage, false);
6088 rc = vmdkOpenImage(pImage, uOpenFlags);
6089 }
6090 }
6091
6092 LogFlowFunc(("returns %Rrc\n", rc));
6093 return rc;
6094}
6095
6096/** @copydoc VDIMAGEBACKEND::pfnGetComment */
6097static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
6098{
6099 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
6100 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6101
6102 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6103
6104 char *pszCommentEncoded = NULL;
6105 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
6106 "ddb.comment", &pszCommentEncoded);
6107 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
6108 {
6109 pszCommentEncoded = NULL;
6110 rc = VINF_SUCCESS;
6111 }
6112
6113 if (RT_SUCCESS(rc))
6114 {
6115 if (pszComment && pszCommentEncoded)
6116 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
6117 else if (pszComment)
6118 *pszComment = '\0';
6119
6120 if (pszCommentEncoded)
6121 RTMemTmpFree(pszCommentEncoded);
6122 }
6123
6124 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
6125 return rc;
6126}
6127
6128/** @copydoc VDIMAGEBACKEND::pfnSetComment */
6129static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
6130{
6131 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
6132 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6133 int rc;
6134
6135 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6136
6137 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6138 {
6139 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6140 rc = vmdkSetImageComment(pImage, pszComment);
6141 else
6142 rc = VERR_NOT_SUPPORTED;
6143 }
6144 else
6145 rc = VERR_VD_IMAGE_READ_ONLY;
6146
6147 LogFlowFunc(("returns %Rrc\n", rc));
6148 return rc;
6149}
6150
6151/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
6152static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
6153{
6154 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6155 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6156
6157 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6158
6159 *pUuid = pImage->ImageUuid;
6160
6161 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
6162 return VINF_SUCCESS;
6163}
6164
6165/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
6166static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
6167{
6168 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6169 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6170 int rc = VINF_SUCCESS;
6171
6172 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6173
6174 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6175 {
6176 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6177 {
6178 pImage->ImageUuid = *pUuid;
6179 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6180 VMDK_DDB_IMAGE_UUID, pUuid);
6181 if (RT_FAILURE(rc))
6182 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
6183 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
6184 }
6185 else
6186 rc = VERR_NOT_SUPPORTED;
6187 }
6188 else
6189 rc = VERR_VD_IMAGE_READ_ONLY;
6190
6191 LogFlowFunc(("returns %Rrc\n", rc));
6192 return rc;
6193}
6194
6195/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
6196static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
6197{
6198 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6199 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6200
6201 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6202
6203 *pUuid = pImage->ModificationUuid;
6204
6205 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
6206 return VINF_SUCCESS;
6207}
6208
6209/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
6210static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
6211{
6212 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6213 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6214 int rc = VINF_SUCCESS;
6215
6216 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6217
6218 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6219 {
6220 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6221 {
6222 /* Only touch the modification uuid if it changed. */
6223 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
6224 {
6225 pImage->ModificationUuid = *pUuid;
6226 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6227 VMDK_DDB_MODIFICATION_UUID, pUuid);
6228 if (RT_FAILURE(rc))
6229 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
6230 }
6231 }
6232 else
6233 rc = VERR_NOT_SUPPORTED;
6234 }
6235 else
6236 rc = VERR_VD_IMAGE_READ_ONLY;
6237
6238 LogFlowFunc(("returns %Rrc\n", rc));
6239 return rc;
6240}
6241
6242/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
6243static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
6244{
6245 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6246 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6247
6248 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6249
6250 *pUuid = pImage->ParentUuid;
6251
6252 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
6253 return VINF_SUCCESS;
6254}
6255
6256/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
6257static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
6258{
6259 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6260 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6261 int rc = VINF_SUCCESS;
6262
6263 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6264
6265 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6266 {
6267 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6268 {
6269 pImage->ParentUuid = *pUuid;
6270 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6271 VMDK_DDB_PARENT_UUID, pUuid);
6272 if (RT_FAILURE(rc))
6273 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
6274 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6275 }
6276 else
6277 rc = VERR_NOT_SUPPORTED;
6278 }
6279 else
6280 rc = VERR_VD_IMAGE_READ_ONLY;
6281
6282 LogFlowFunc(("returns %Rrc\n", rc));
6283 return rc;
6284}
6285
6286/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
6287static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
6288{
6289 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6290 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6291
6292 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6293
6294 *pUuid = pImage->ParentModificationUuid;
6295
6296 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
6297 return VINF_SUCCESS;
6298}
6299
6300/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
6301static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
6302{
6303 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6304 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6305 int rc = VINF_SUCCESS;
6306
6307 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6308
6309 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6310 {
6311 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6312 {
6313 pImage->ParentModificationUuid = *pUuid;
6314 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6315 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
6316 if (RT_FAILURE(rc))
6317 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6318 }
6319 else
6320 rc = VERR_NOT_SUPPORTED;
6321 }
6322 else
6323 rc = VERR_VD_IMAGE_READ_ONLY;
6324
6325 LogFlowFunc(("returns %Rrc\n", rc));
6326 return rc;
6327}
6328
6329/** @copydoc VDIMAGEBACKEND::pfnDump */
6330static DECLCALLBACK(void) vmdkDump(void *pBackendData)
6331{
6332 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6333
6334 AssertPtrReturnVoid(pImage);
6335 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
6336 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
6337 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
6338 VMDK_BYTE2SECTOR(pImage->cbSize));
6339 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
6340 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
6341 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
6342 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
6343}
6344
6345
6346
6347const VDIMAGEBACKEND g_VmdkBackend =
6348{
6349 /* u32Version */
6350 VD_IMGBACKEND_VERSION,
6351 /* pszBackendName */
6352 "VMDK",
6353 /* uBackendCaps */
6354 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
6355 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
6356 | VD_CAP_VFS | VD_CAP_PREFERRED,
6357 /* paFileExtensions */
6358 s_aVmdkFileExtensions,
6359 /* paConfigInfo */
6360 NULL,
6361 /* pfnProbe */
6362 vmdkProbe,
6363 /* pfnOpen */
6364 vmdkOpen,
6365 /* pfnCreate */
6366 vmdkCreate,
6367 /* pfnRename */
6368 vmdkRename,
6369 /* pfnClose */
6370 vmdkClose,
6371 /* pfnRead */
6372 vmdkRead,
6373 /* pfnWrite */
6374 vmdkWrite,
6375 /* pfnFlush */
6376 vmdkFlush,
6377 /* pfnDiscard */
6378 NULL,
6379 /* pfnGetVersion */
6380 vmdkGetVersion,
6381 /* pfnGetSectorSize */
6382 vmdkGetSectorSize,
6383 /* pfnGetSize */
6384 vmdkGetSize,
6385 /* pfnGetFileSize */
6386 vmdkGetFileSize,
6387 /* pfnGetPCHSGeometry */
6388 vmdkGetPCHSGeometry,
6389 /* pfnSetPCHSGeometry */
6390 vmdkSetPCHSGeometry,
6391 /* pfnGetLCHSGeometry */
6392 vmdkGetLCHSGeometry,
6393 /* pfnSetLCHSGeometry */
6394 vmdkSetLCHSGeometry,
6395 /* pfnGetImageFlags */
6396 vmdkGetImageFlags,
6397 /* pfnGetOpenFlags */
6398 vmdkGetOpenFlags,
6399 /* pfnSetOpenFlags */
6400 vmdkSetOpenFlags,
6401 /* pfnGetComment */
6402 vmdkGetComment,
6403 /* pfnSetComment */
6404 vmdkSetComment,
6405 /* pfnGetUuid */
6406 vmdkGetUuid,
6407 /* pfnSetUuid */
6408 vmdkSetUuid,
6409 /* pfnGetModificationUuid */
6410 vmdkGetModificationUuid,
6411 /* pfnSetModificationUuid */
6412 vmdkSetModificationUuid,
6413 /* pfnGetParentUuid */
6414 vmdkGetParentUuid,
6415 /* pfnSetParentUuid */
6416 vmdkSetParentUuid,
6417 /* pfnGetParentModificationUuid */
6418 vmdkGetParentModificationUuid,
6419 /* pfnSetParentModificationUuid */
6420 vmdkSetParentModificationUuid,
6421 /* pfnDump */
6422 vmdkDump,
6423 /* pfnGetTimestamp */
6424 NULL,
6425 /* pfnGetParentTimestamp */
6426 NULL,
6427 /* pfnSetParentTimestamp */
6428 NULL,
6429 /* pfnGetParentFilename */
6430 NULL,
6431 /* pfnSetParentFilename */
6432 NULL,
6433 /* pfnComposeLocation */
6434 genericFileComposeLocation,
6435 /* pfnComposeName */
6436 genericFileComposeName,
6437 /* pfnCompact */
6438 NULL,
6439 /* pfnResize */
6440 NULL,
6441 /* pfnRepair */
6442 NULL,
6443 /* pfnTraverseMetadata */
6444 NULL,
6445 /* u32VersionEnd */
6446 VD_IMGBACKEND_VERSION
6447};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette