VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 94959

Last change on this file since 94959 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 339.7 KB
Line 
1/* $Id: VMDK.cpp 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5/*
6 * Copyright (C) 2006-2022 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16
17
18/*********************************************************************************************************************************
19* Header Files *
20*********************************************************************************************************************************/
21#define LOG_GROUP LOG_GROUP_VD_VMDK
22#include <VBox/log.h> /* before VBox/vd-ifs.h */
23#include <VBox/vd-plugin.h>
24#include <VBox/err.h>
25#include <iprt/assert.h>
26#include <iprt/alloc.h>
27#include <iprt/base64.h>
28#include <iprt/ctype.h>
29#include <iprt/crc.h>
30#include <iprt/dvm.h>
31#include <iprt/uuid.h>
32#include <iprt/path.h>
33#include <iprt/rand.h>
34#include <iprt/string.h>
35#include <iprt/sort.h>
36#include <iprt/zip.h>
37#include <iprt/asm.h>
38#ifdef RT_OS_WINDOWS
39# include <iprt/utf16.h>
40# include <iprt/uni.h>
41# include <iprt/uni.h>
42# include <iprt/nt/nt-and-windows.h>
43# include <winioctl.h>
44#endif
45#ifdef RT_OS_LINUX
46# include <errno.h>
47# include <sys/stat.h>
48# include <iprt/dir.h>
49# include <iprt/symlink.h>
50# include <iprt/linux/sysfs.h>
51#endif
52#ifdef RT_OS_FREEBSD
53#include <libgeom.h>
54#include <sys/stat.h>
55#include <stdlib.h>
56#endif
57#ifdef RT_OS_SOLARIS
58#include <sys/dkio.h>
59#include <sys/vtoc.h>
60#include <sys/efi_partition.h>
61#include <unistd.h>
62#include <errno.h>
63#endif
64#ifdef RT_OS_DARWIN
65# include <sys/stat.h>
66# include <sys/disk.h>
67# include <errno.h>
68/* The following structure and IOCTLs are defined in znu bsd/sys/disk.h but
69 inside KERNEL ifdefs and thus stripped from the SDK edition of the header.
70 While we could try include the header from the Kernel.framework, it's a lot
71 easier to just add the structure and 4 defines here. */
72typedef struct
73{
74 uint64_t offset;
75 uint64_t length;
76 uint8_t reserved0128[12];
77 dev_t dev;
78} dk_physical_extent_t;
79# define DKIOCGETBASE _IOR( 'd', 73, uint64_t)
80# define DKIOCLOCKPHYSICALEXTENTS _IO( 'd', 81)
81# define DKIOCGETPHYSICALEXTENT _IOWR('d', 82, dk_physical_extent_t)
82# define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83)
83#endif /* RT_OS_DARWIN */
84#include "VDBackends.h"
85
86
87/*********************************************************************************************************************************
88* Constants And Macros, Structures and Typedefs *
89*********************************************************************************************************************************/
90/** Maximum encoded string size (including NUL) we allow for VMDK images.
91 * Deliberately not set high to avoid running out of descriptor space. */
92#define VMDK_ENCODED_COMMENT_MAX 1024
93/** VMDK descriptor DDB entry for PCHS cylinders. */
94#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
95/** VMDK descriptor DDB entry for PCHS heads. */
96#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
97/** VMDK descriptor DDB entry for PCHS sectors. */
98#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
99/** VMDK descriptor DDB entry for LCHS cylinders. */
100#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
101/** VMDK descriptor DDB entry for LCHS heads. */
102#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
103/** VMDK descriptor DDB entry for LCHS sectors. */
104#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
105/** VMDK descriptor DDB entry for image UUID. */
106#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
107/** VMDK descriptor DDB entry for image modification UUID. */
108#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
109/** VMDK descriptor DDB entry for parent image UUID. */
110#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
111/** VMDK descriptor DDB entry for parent image modification UUID. */
112#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
113/** No compression for streamOptimized files. */
114#define VMDK_COMPRESSION_NONE 0
115/** Deflate compression for streamOptimized files. */
116#define VMDK_COMPRESSION_DEFLATE 1
117/** Marker that the actual GD value is stored in the footer. */
118#define VMDK_GD_AT_END 0xffffffffffffffffULL
119/** Marker for end-of-stream in streamOptimized images. */
120#define VMDK_MARKER_EOS 0
121/** Marker for grain table block in streamOptimized images. */
122#define VMDK_MARKER_GT 1
123/** Marker for grain directory block in streamOptimized images. */
124#define VMDK_MARKER_GD 2
125/** Marker for footer in streamOptimized images. */
126#define VMDK_MARKER_FOOTER 3
127/** Marker for unknown purpose in streamOptimized images.
128 * Shows up in very recent images created by vSphere, but only sporadically.
129 * They "forgot" to document that one in the VMDK specification. */
130#define VMDK_MARKER_UNSPECIFIED 4
131/** Dummy marker for "don't check the marker value". */
132#define VMDK_MARKER_IGNORE 0xffffffffU
133/**
134 * Magic number for hosted images created by VMware Workstation 4, VMware
135 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
136 */
137#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
138/**
139 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
140 * this header is also used for monolithic flat images.
141 */
142#pragma pack(1)
143typedef struct SparseExtentHeader
144{
145 uint32_t magicNumber;
146 uint32_t version;
147 uint32_t flags;
148 uint64_t capacity;
149 uint64_t grainSize;
150 uint64_t descriptorOffset;
151 uint64_t descriptorSize;
152 uint32_t numGTEsPerGT;
153 uint64_t rgdOffset;
154 uint64_t gdOffset;
155 uint64_t overHead;
156 bool uncleanShutdown;
157 char singleEndLineChar;
158 char nonEndLineChar;
159 char doubleEndLineChar1;
160 char doubleEndLineChar2;
161 uint16_t compressAlgorithm;
162 uint8_t pad[433];
163} SparseExtentHeader;
164#pragma pack()
165/** The maximum allowed descriptor size in the extent header in sectors. */
166#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
167/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
168 * divisible by the default grain size (64K) */
169#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
170/** VMDK streamOptimized file format marker. The type field may or may not
171 * be actually valid, but there's always data to read there. */
172#pragma pack(1)
173typedef struct VMDKMARKER
174{
175 uint64_t uSector;
176 uint32_t cbSize;
177 uint32_t uType;
178} VMDKMARKER, *PVMDKMARKER;
179#pragma pack()
180/** Convert sector number/size to byte offset/size. */
181#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
182/** Convert byte offset/size to sector number/size. */
183#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
184/**
185 * VMDK extent type.
186 */
187typedef enum VMDKETYPE
188{
189 /** Hosted sparse extent. */
190 VMDKETYPE_HOSTED_SPARSE = 1,
191 /** Flat extent. */
192 VMDKETYPE_FLAT,
193 /** Zero extent. */
194 VMDKETYPE_ZERO,
195 /** VMFS extent, used by ESX. */
196 VMDKETYPE_VMFS
197} VMDKETYPE, *PVMDKETYPE;
198/**
199 * VMDK access type for a extent.
200 */
201typedef enum VMDKACCESS
202{
203 /** No access allowed. */
204 VMDKACCESS_NOACCESS = 0,
205 /** Read-only access. */
206 VMDKACCESS_READONLY,
207 /** Read-write access. */
208 VMDKACCESS_READWRITE
209} VMDKACCESS, *PVMDKACCESS;
210/** Forward declaration for PVMDKIMAGE. */
211typedef struct VMDKIMAGE *PVMDKIMAGE;
212/**
213 * Extents files entry. Used for opening a particular file only once.
214 */
215typedef struct VMDKFILE
216{
217 /** Pointer to file path. Local copy. */
218 const char *pszFilename;
219 /** Pointer to base name. Local copy. */
220 const char *pszBasename;
221 /** File open flags for consistency checking. */
222 unsigned fOpen;
223 /** Handle for sync/async file abstraction.*/
224 PVDIOSTORAGE pStorage;
225 /** Reference counter. */
226 unsigned uReferences;
227 /** Flag whether the file should be deleted on last close. */
228 bool fDelete;
229 /** Pointer to the image we belong to (for debugging purposes). */
230 PVMDKIMAGE pImage;
231 /** Pointer to next file descriptor. */
232 struct VMDKFILE *pNext;
233 /** Pointer to the previous file descriptor. */
234 struct VMDKFILE *pPrev;
235} VMDKFILE, *PVMDKFILE;
236/**
237 * VMDK extent data structure.
238 */
239typedef struct VMDKEXTENT
240{
241 /** File handle. */
242 PVMDKFILE pFile;
243 /** Base name of the image extent. */
244 const char *pszBasename;
245 /** Full name of the image extent. */
246 const char *pszFullname;
247 /** Number of sectors in this extent. */
248 uint64_t cSectors;
249 /** Number of sectors per block (grain in VMDK speak). */
250 uint64_t cSectorsPerGrain;
251 /** Starting sector number of descriptor. */
252 uint64_t uDescriptorSector;
253 /** Size of descriptor in sectors. */
254 uint64_t cDescriptorSectors;
255 /** Starting sector number of grain directory. */
256 uint64_t uSectorGD;
257 /** Starting sector number of redundant grain directory. */
258 uint64_t uSectorRGD;
259 /** Total number of metadata sectors. */
260 uint64_t cOverheadSectors;
261 /** Nominal size (i.e. as described by the descriptor) of this extent. */
262 uint64_t cNominalSectors;
263 /** Sector offset (i.e. as described by the descriptor) of this extent. */
264 uint64_t uSectorOffset;
265 /** Number of entries in a grain table. */
266 uint32_t cGTEntries;
267 /** Number of sectors reachable via a grain directory entry. */
268 uint32_t cSectorsPerGDE;
269 /** Number of entries in the grain directory. */
270 uint32_t cGDEntries;
271 /** Pointer to the next free sector. Legacy information. Do not use. */
272 uint32_t uFreeSector;
273 /** Number of this extent in the list of images. */
274 uint32_t uExtent;
275 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
276 char *pDescData;
277 /** Pointer to the grain directory. */
278 uint32_t *pGD;
279 /** Pointer to the redundant grain directory. */
280 uint32_t *pRGD;
281 /** VMDK version of this extent. 1=1.0/1.1 */
282 uint32_t uVersion;
283 /** Type of this extent. */
284 VMDKETYPE enmType;
285 /** Access to this extent. */
286 VMDKACCESS enmAccess;
287 /** Flag whether this extent is marked as unclean. */
288 bool fUncleanShutdown;
289 /** Flag whether the metadata in the extent header needs to be updated. */
290 bool fMetaDirty;
291 /** Flag whether there is a footer in this extent. */
292 bool fFooter;
293 /** Compression type for this extent. */
294 uint16_t uCompression;
295 /** Append position for writing new grain. Only for sparse extents. */
296 uint64_t uAppendPosition;
297 /** Last grain which was accessed. Only for streamOptimized extents. */
298 uint32_t uLastGrainAccess;
299 /** Starting sector corresponding to the grain buffer. */
300 uint32_t uGrainSectorAbs;
301 /** Grain number corresponding to the grain buffer. */
302 uint32_t uGrain;
303 /** Actual size of the compressed data, only valid for reading. */
304 uint32_t cbGrainStreamRead;
305 /** Size of compressed grain buffer for streamOptimized extents. */
306 size_t cbCompGrain;
307 /** Compressed grain buffer for streamOptimized extents, with marker. */
308 void *pvCompGrain;
309 /** Decompressed grain buffer for streamOptimized extents. */
310 void *pvGrain;
311 /** Reference to the image in which this extent is used. Do not use this
312 * on a regular basis to avoid passing pImage references to functions
313 * explicitly. */
314 struct VMDKIMAGE *pImage;
315} VMDKEXTENT, *PVMDKEXTENT;
316/**
317 * Grain table cache size. Allocated per image.
318 */
319#define VMDK_GT_CACHE_SIZE 256
320/**
321 * Grain table block size. Smaller than an actual grain table block to allow
322 * more grain table blocks to be cached without having to allocate excessive
323 * amounts of memory for the cache.
324 */
325#define VMDK_GT_CACHELINE_SIZE 128
326/**
327 * Maximum number of lines in a descriptor file. Not worth the effort of
328 * making it variable. Descriptor files are generally very short (~20 lines),
329 * with the exception of sparse files split in 2G chunks, which need for the
330 * maximum size (almost 2T) exactly 1025 lines for the disk database.
331 */
332#define VMDK_DESCRIPTOR_LINES_MAX 1100U
333/**
334 * Parsed descriptor information. Allows easy access and update of the
335 * descriptor (whether separate file or not). Free form text files suck.
336 */
337typedef struct VMDKDESCRIPTOR
338{
339 /** Line number of first entry of the disk descriptor. */
340 unsigned uFirstDesc;
341 /** Line number of first entry in the extent description. */
342 unsigned uFirstExtent;
343 /** Line number of first disk database entry. */
344 unsigned uFirstDDB;
345 /** Total number of lines. */
346 unsigned cLines;
347 /** Total amount of memory available for the descriptor. */
348 size_t cbDescAlloc;
349 /** Set if descriptor has been changed and not yet written to disk. */
350 bool fDirty;
351 /** Array of pointers to the data in the descriptor. */
352 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
353 /** Array of line indices pointing to the next non-comment line. */
354 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
355} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
356/**
357 * Cache entry for translating extent/sector to a sector number in that
358 * extent.
359 */
360typedef struct VMDKGTCACHEENTRY
361{
362 /** Extent number for which this entry is valid. */
363 uint32_t uExtent;
364 /** GT data block number. */
365 uint64_t uGTBlock;
366 /** Data part of the cache entry. */
367 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
368} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
369/**
370 * Cache data structure for blocks of grain table entries. For now this is a
371 * fixed size direct mapping cache, but this should be adapted to the size of
372 * the sparse image and maybe converted to a set-associative cache. The
373 * implementation below implements a write-through cache with write allocate.
374 */
375typedef struct VMDKGTCACHE
376{
377 /** Cache entries. */
378 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
379 /** Number of cache entries (currently unused). */
380 unsigned cEntries;
381} VMDKGTCACHE, *PVMDKGTCACHE;
382/**
383 * Complete VMDK image data structure. Mainly a collection of extents and a few
384 * extra global data fields.
385 */
386typedef struct VMDKIMAGE
387{
388 /** Image name. */
389 const char *pszFilename;
390 /** Descriptor file if applicable. */
391 PVMDKFILE pFile;
392 /** Pointer to the per-disk VD interface list. */
393 PVDINTERFACE pVDIfsDisk;
394 /** Pointer to the per-image VD interface list. */
395 PVDINTERFACE pVDIfsImage;
396 /** Error interface. */
397 PVDINTERFACEERROR pIfError;
398 /** I/O interface. */
399 PVDINTERFACEIOINT pIfIo;
400 /** Pointer to the image extents. */
401 PVMDKEXTENT pExtents;
402 /** Number of image extents. */
403 unsigned cExtents;
404 /** Pointer to the files list, for opening a file referenced multiple
405 * times only once (happens mainly with raw partition access). */
406 PVMDKFILE pFiles;
407 /**
408 * Pointer to an array of segment entries for async I/O.
409 * This is an optimization because the task number to submit is not known
410 * and allocating/freeing an array in the read/write functions every time
411 * is too expensive.
412 */
413 PPDMDATASEG paSegments;
414 /** Entries available in the segments array. */
415 unsigned cSegments;
416 /** Open flags passed by VBoxHD layer. */
417 unsigned uOpenFlags;
418 /** Image flags defined during creation or determined during open. */
419 unsigned uImageFlags;
420 /** Total size of the image. */
421 uint64_t cbSize;
422 /** Physical geometry of this image. */
423 VDGEOMETRY PCHSGeometry;
424 /** Logical geometry of this image. */
425 VDGEOMETRY LCHSGeometry;
426 /** Image UUID. */
427 RTUUID ImageUuid;
428 /** Image modification UUID. */
429 RTUUID ModificationUuid;
430 /** Parent image UUID. */
431 RTUUID ParentUuid;
432 /** Parent image modification UUID. */
433 RTUUID ParentModificationUuid;
434 /** Pointer to grain table cache, if this image contains sparse extents. */
435 PVMDKGTCACHE pGTCache;
436 /** Pointer to the descriptor (NULL if no separate descriptor file). */
437 char *pDescData;
438 /** Allocation size of the descriptor file. */
439 size_t cbDescAlloc;
440 /** Parsed descriptor file content. */
441 VMDKDESCRIPTOR Descriptor;
442 /** The static region list. */
443 VDREGIONLIST RegionList;
444} VMDKIMAGE;
445/** State for the input/output callout of the inflate reader/deflate writer. */
446typedef struct VMDKCOMPRESSIO
447{
448 /* Image this operation relates to. */
449 PVMDKIMAGE pImage;
450 /* Current read position. */
451 ssize_t iOffset;
452 /* Size of the compressed grain buffer (available data). */
453 size_t cbCompGrain;
454 /* Pointer to the compressed grain buffer. */
455 void *pvCompGrain;
456} VMDKCOMPRESSIO;
457/** Tracks async grain allocation. */
458typedef struct VMDKGRAINALLOCASYNC
459{
460 /** Flag whether the allocation failed. */
461 bool fIoErr;
462 /** Current number of transfers pending.
463 * If reached 0 and there is an error the old state is restored. */
464 unsigned cIoXfersPending;
465 /** Sector number */
466 uint64_t uSector;
467 /** Flag whether the grain table needs to be updated. */
468 bool fGTUpdateNeeded;
469 /** Extent the allocation happens. */
470 PVMDKEXTENT pExtent;
471 /** Position of the new grain, required for the grain table update. */
472 uint64_t uGrainOffset;
473 /** Grain table sector. */
474 uint64_t uGTSector;
475 /** Backup grain table sector. */
476 uint64_t uRGTSector;
477} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
478/**
479 * State information for vmdkRename() and helpers.
480 */
481typedef struct VMDKRENAMESTATE
482{
483 /** Array of old filenames. */
484 char **apszOldName;
485 /** Array of new filenames. */
486 char **apszNewName;
487 /** Array of new lines in the extent descriptor. */
488 char **apszNewLines;
489 /** Name of the old descriptor file if not a sparse image. */
490 char *pszOldDescName;
491 /** Flag whether we called vmdkFreeImage(). */
492 bool fImageFreed;
493 /** Flag whther the descriptor is embedded in the image (sparse) or
494 * in a separate file. */
495 bool fEmbeddedDesc;
496 /** Number of extents in the image. */
497 unsigned cExtents;
498 /** New base filename. */
499 char *pszNewBaseName;
500 /** The old base filename. */
501 char *pszOldBaseName;
502 /** New full filename. */
503 char *pszNewFullName;
504 /** Old full filename. */
505 char *pszOldFullName;
506 /** The old image name. */
507 const char *pszOldImageName;
508 /** Copy of the original VMDK descriptor. */
509 VMDKDESCRIPTOR DescriptorCopy;
510 /** Copy of the extent state for sparse images. */
511 VMDKEXTENT ExtentCopy;
512} VMDKRENAMESTATE;
513/** Pointer to a VMDK rename state. */
514typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
515
516
517/*********************************************************************************************************************************
518* Static Variables *
519*********************************************************************************************************************************/
520/** NULL-terminated array of supported file extensions. */
521static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
522{
523 {"vmdk", VDTYPE_HDD},
524 {NULL, VDTYPE_INVALID}
525};
526/** NULL-terminated array of configuration option. */
527static const VDCONFIGINFO s_aVmdkConfigInfo[] =
528{
529 /* Options for VMDK raw disks */
530 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
531 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
532 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
533 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
534 /* End of options list */
535 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
536};
537
538
539/*********************************************************************************************************************************
540* Internal Functions *
541*********************************************************************************************************************************/
542static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
543static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
544 bool fDelete);
545static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
546static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
547static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
548static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
549static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
550 void *pvUser, int rcReq);
551/**
552 * Internal: open a file (using a file descriptor cache to ensure each file
553 * is only opened once - anything else can cause locking problems).
554 */
555static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
556 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
557{
558 int rc = VINF_SUCCESS;
559 PVMDKFILE pVmdkFile;
560 for (pVmdkFile = pImage->pFiles;
561 pVmdkFile != NULL;
562 pVmdkFile = pVmdkFile->pNext)
563 {
564 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
565 {
566 Assert(fOpen == pVmdkFile->fOpen);
567 pVmdkFile->uReferences++;
568 *ppVmdkFile = pVmdkFile;
569 return rc;
570 }
571 }
572 /* If we get here, there's no matching entry in the cache. */
573 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
574 if (!pVmdkFile)
575 {
576 *ppVmdkFile = NULL;
577 return VERR_NO_MEMORY;
578 }
579 pVmdkFile->pszFilename = RTStrDup(pszFilename);
580 if (!pVmdkFile->pszFilename)
581 {
582 RTMemFree(pVmdkFile);
583 *ppVmdkFile = NULL;
584 return VERR_NO_MEMORY;
585 }
586 if (pszBasename)
587 {
588 pVmdkFile->pszBasename = RTStrDup(pszBasename);
589 if (!pVmdkFile->pszBasename)
590 {
591 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
592 RTMemFree(pVmdkFile);
593 *ppVmdkFile = NULL;
594 return VERR_NO_MEMORY;
595 }
596 }
597 pVmdkFile->fOpen = fOpen;
598 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
599 &pVmdkFile->pStorage);
600 if (RT_SUCCESS(rc))
601 {
602 pVmdkFile->uReferences = 1;
603 pVmdkFile->pImage = pImage;
604 pVmdkFile->pNext = pImage->pFiles;
605 if (pImage->pFiles)
606 pImage->pFiles->pPrev = pVmdkFile;
607 pImage->pFiles = pVmdkFile;
608 *ppVmdkFile = pVmdkFile;
609 }
610 else
611 {
612 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
613 RTMemFree(pVmdkFile);
614 *ppVmdkFile = NULL;
615 }
616 return rc;
617}
618/**
619 * Internal: close a file, updating the file descriptor cache.
620 */
621static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
622{
623 int rc = VINF_SUCCESS;
624 PVMDKFILE pVmdkFile = *ppVmdkFile;
625 AssertPtr(pVmdkFile);
626 pVmdkFile->fDelete |= fDelete;
627 Assert(pVmdkFile->uReferences);
628 pVmdkFile->uReferences--;
629 if (pVmdkFile->uReferences == 0)
630 {
631 PVMDKFILE pPrev;
632 PVMDKFILE pNext;
633 /* Unchain the element from the list. */
634 pPrev = pVmdkFile->pPrev;
635 pNext = pVmdkFile->pNext;
636 if (pNext)
637 pNext->pPrev = pPrev;
638 if (pPrev)
639 pPrev->pNext = pNext;
640 else
641 pImage->pFiles = pNext;
642 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
643 bool fFileDel = pVmdkFile->fDelete;
644 if ( pVmdkFile->pszBasename
645 && fFileDel)
646 {
647 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
648 if ( RTPathHasPath(pVmdkFile->pszBasename)
649 || !pszSuffix
650 || ( strcmp(pszSuffix, ".vmdk")
651 && strcmp(pszSuffix, ".bin")
652 && strcmp(pszSuffix, ".img")))
653 fFileDel = false;
654 }
655 if (fFileDel)
656 {
657 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
658 if (RT_SUCCESS(rc))
659 rc = rc2;
660 }
661 else if (pVmdkFile->fDelete)
662 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
663 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
664 if (pVmdkFile->pszBasename)
665 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
666 RTMemFree(pVmdkFile);
667 }
668 *ppVmdkFile = NULL;
669 return rc;
670}
671/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
672#ifndef VMDK_USE_BLOCK_DECOMP_API
673static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
674{
675 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
676 size_t cbInjected = 0;
677 Assert(cbBuf);
678 if (pInflateState->iOffset < 0)
679 {
680 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
681 pvBuf = (uint8_t *)pvBuf + 1;
682 cbBuf--;
683 cbInjected = 1;
684 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
685 }
686 if (!cbBuf)
687 {
688 if (pcbBuf)
689 *pcbBuf = cbInjected;
690 return VINF_SUCCESS;
691 }
692 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
693 memcpy(pvBuf,
694 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
695 cbBuf);
696 pInflateState->iOffset += cbBuf;
697 Assert(pcbBuf);
698 *pcbBuf = cbBuf + cbInjected;
699 return VINF_SUCCESS;
700}
701#endif
702/**
703 * Internal: read from a file and inflate the compressed data,
704 * distinguishing between async and normal operation
705 */
706DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
707 uint64_t uOffset, void *pvBuf,
708 size_t cbToRead, const void *pcvMarker,
709 uint64_t *puLBA, uint32_t *pcbMarkerData)
710{
711 int rc;
712#ifndef VMDK_USE_BLOCK_DECOMP_API
713 PRTZIPDECOMP pZip = NULL;
714#endif
715 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
716 size_t cbCompSize, cbActuallyRead;
717 if (!pcvMarker)
718 {
719 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
720 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
721 if (RT_FAILURE(rc))
722 return rc;
723 }
724 else
725 {
726 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
727 /* pcvMarker endianness has already been partially transformed, fix it */
728 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
729 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
730 }
731 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
732 if (cbCompSize == 0)
733 {
734 AssertMsgFailed(("VMDK: corrupted marker\n"));
735 return VERR_VD_VMDK_INVALID_FORMAT;
736 }
737 /* Sanity check - the expansion ratio should be much less than 2. */
738 Assert(cbCompSize < 2 * cbToRead);
739 if (cbCompSize >= 2 * cbToRead)
740 return VERR_VD_VMDK_INVALID_FORMAT;
741 /* Compressed grain marker. Data follows immediately. */
742 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
743 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
744 (uint8_t *)pExtent->pvCompGrain
745 + RT_UOFFSETOF(VMDKMARKER, uType),
746 RT_ALIGN_Z( cbCompSize
747 + RT_UOFFSETOF(VMDKMARKER, uType),
748 512)
749 - RT_UOFFSETOF(VMDKMARKER, uType));
750 if (puLBA)
751 *puLBA = RT_LE2H_U64(pMarker->uSector);
752 if (pcbMarkerData)
753 *pcbMarkerData = RT_ALIGN( cbCompSize
754 + RT_UOFFSETOF(VMDKMARKER, uType),
755 512);
756#ifdef VMDK_USE_BLOCK_DECOMP_API
757 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
758 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
759 pvBuf, cbToRead, &cbActuallyRead);
760#else
761 VMDKCOMPRESSIO InflateState;
762 InflateState.pImage = pImage;
763 InflateState.iOffset = -1;
764 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
765 InflateState.pvCompGrain = pExtent->pvCompGrain;
766 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
767 if (RT_FAILURE(rc))
768 return rc;
769 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
770 RTZipDecompDestroy(pZip);
771#endif /* !VMDK_USE_BLOCK_DECOMP_API */
772 if (RT_FAILURE(rc))
773 {
774 if (rc == VERR_ZIP_CORRUPTED)
775 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
776 return rc;
777 }
778 if (cbActuallyRead != cbToRead)
779 rc = VERR_VD_VMDK_INVALID_FORMAT;
780 return rc;
781}
782static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
783{
784 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
785 Assert(cbBuf);
786 if (pDeflateState->iOffset < 0)
787 {
788 pvBuf = (const uint8_t *)pvBuf + 1;
789 cbBuf--;
790 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
791 }
792 if (!cbBuf)
793 return VINF_SUCCESS;
794 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
795 return VERR_BUFFER_OVERFLOW;
796 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
797 pvBuf, cbBuf);
798 pDeflateState->iOffset += cbBuf;
799 return VINF_SUCCESS;
800}
801/**
802 * Internal: deflate the uncompressed data and write to a file,
803 * distinguishing between async and normal operation
804 */
805DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
806 uint64_t uOffset, const void *pvBuf,
807 size_t cbToWrite, uint64_t uLBA,
808 uint32_t *pcbMarkerData)
809{
810 int rc;
811 PRTZIPCOMP pZip = NULL;
812 VMDKCOMPRESSIO DeflateState;
813 DeflateState.pImage = pImage;
814 DeflateState.iOffset = -1;
815 DeflateState.cbCompGrain = pExtent->cbCompGrain;
816 DeflateState.pvCompGrain = pExtent->pvCompGrain;
817 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
818 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
819 if (RT_FAILURE(rc))
820 return rc;
821 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
822 if (RT_SUCCESS(rc))
823 rc = RTZipCompFinish(pZip);
824 RTZipCompDestroy(pZip);
825 if (RT_SUCCESS(rc))
826 {
827 Assert( DeflateState.iOffset > 0
828 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
829 /* pad with zeroes to get to a full sector size */
830 uint32_t uSize = DeflateState.iOffset;
831 if (uSize % 512)
832 {
833 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
834 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
835 uSizeAlign - uSize);
836 uSize = uSizeAlign;
837 }
838 if (pcbMarkerData)
839 *pcbMarkerData = uSize;
840 /* Compressed grain marker. Data follows immediately. */
841 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
842 pMarker->uSector = RT_H2LE_U64(uLBA);
843 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
844 - RT_UOFFSETOF(VMDKMARKER, uType));
845 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
846 uOffset, pMarker, uSize);
847 if (RT_FAILURE(rc))
848 return rc;
849 }
850 return rc;
851}
852/**
853 * Internal: check if all files are closed, prevent leaking resources.
854 */
855static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
856{
857 int rc = VINF_SUCCESS, rc2;
858 PVMDKFILE pVmdkFile;
859 Assert(pImage->pFiles == NULL);
860 for (pVmdkFile = pImage->pFiles;
861 pVmdkFile != NULL;
862 pVmdkFile = pVmdkFile->pNext)
863 {
864 LogRel(("VMDK: leaking reference to file \"%s\"\n",
865 pVmdkFile->pszFilename));
866 pImage->pFiles = pVmdkFile->pNext;
867 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
868 if (RT_SUCCESS(rc))
869 rc = rc2;
870 }
871 return rc;
872}
873/**
874 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
875 * critical non-ASCII characters.
876 */
877static char *vmdkEncodeString(const char *psz)
878{
879 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
880 char *pszDst = szEnc;
881 AssertPtr(psz);
882 for (; *psz; psz = RTStrNextCp(psz))
883 {
884 char *pszDstPrev = pszDst;
885 RTUNICP Cp = RTStrGetCp(psz);
886 if (Cp == '\\')
887 {
888 pszDst = RTStrPutCp(pszDst, Cp);
889 pszDst = RTStrPutCp(pszDst, Cp);
890 }
891 else if (Cp == '\n')
892 {
893 pszDst = RTStrPutCp(pszDst, '\\');
894 pszDst = RTStrPutCp(pszDst, 'n');
895 }
896 else if (Cp == '\r')
897 {
898 pszDst = RTStrPutCp(pszDst, '\\');
899 pszDst = RTStrPutCp(pszDst, 'r');
900 }
901 else
902 pszDst = RTStrPutCp(pszDst, Cp);
903 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
904 {
905 pszDst = pszDstPrev;
906 break;
907 }
908 }
909 *pszDst = '\0';
910 return RTStrDup(szEnc);
911}
912/**
913 * Internal: decode a string and store it into the specified string.
914 */
915static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
916{
917 int rc = VINF_SUCCESS;
918 char szBuf[4];
919 if (!cb)
920 return VERR_BUFFER_OVERFLOW;
921 AssertPtr(psz);
922 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
923 {
924 char *pszDst = szBuf;
925 RTUNICP Cp = RTStrGetCp(pszEncoded);
926 if (Cp == '\\')
927 {
928 pszEncoded = RTStrNextCp(pszEncoded);
929 RTUNICP CpQ = RTStrGetCp(pszEncoded);
930 if (CpQ == 'n')
931 RTStrPutCp(pszDst, '\n');
932 else if (CpQ == 'r')
933 RTStrPutCp(pszDst, '\r');
934 else if (CpQ == '\0')
935 {
936 rc = VERR_VD_VMDK_INVALID_HEADER;
937 break;
938 }
939 else
940 RTStrPutCp(pszDst, CpQ);
941 }
942 else
943 pszDst = RTStrPutCp(pszDst, Cp);
944 /* Need to leave space for terminating NUL. */
945 if ((size_t)(pszDst - szBuf) + 1 >= cb)
946 {
947 rc = VERR_BUFFER_OVERFLOW;
948 break;
949 }
950 memcpy(psz, szBuf, pszDst - szBuf);
951 psz += pszDst - szBuf;
952 }
953 *psz = '\0';
954 return rc;
955}
956/**
957 * Internal: free all buffers associated with grain directories.
958 */
959static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
960{
961 if (pExtent->pGD)
962 {
963 RTMemFree(pExtent->pGD);
964 pExtent->pGD = NULL;
965 }
966 if (pExtent->pRGD)
967 {
968 RTMemFree(pExtent->pRGD);
969 pExtent->pRGD = NULL;
970 }
971}
972/**
973 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
974 * images.
975 */
976static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
977{
978 int rc = VINF_SUCCESS;
979 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
980 {
981 /* streamOptimized extents need a compressed grain buffer, which must
982 * be big enough to hold uncompressible data (which needs ~8 bytes
983 * more than the uncompressed data), the marker and padding. */
984 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
985 + 8 + sizeof(VMDKMARKER), 512);
986 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
987 if (RT_LIKELY(pExtent->pvCompGrain))
988 {
989 /* streamOptimized extents need a decompressed grain buffer. */
990 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
991 if (!pExtent->pvGrain)
992 rc = VERR_NO_MEMORY;
993 }
994 else
995 rc = VERR_NO_MEMORY;
996 }
997 if (RT_FAILURE(rc))
998 vmdkFreeStreamBuffers(pExtent);
999 return rc;
1000}
1001/**
1002 * Internal: allocate all buffers associated with grain directories.
1003 */
1004static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1005{
1006 RT_NOREF1(pImage);
1007 int rc = VINF_SUCCESS;
1008 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1009 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1010 if (RT_LIKELY(pExtent->pGD))
1011 {
1012 if (pExtent->uSectorRGD)
1013 {
1014 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1015 if (RT_UNLIKELY(!pExtent->pRGD))
1016 rc = VERR_NO_MEMORY;
1017 }
1018 }
1019 else
1020 rc = VERR_NO_MEMORY;
1021 if (RT_FAILURE(rc))
1022 vmdkFreeGrainDirectory(pExtent);
1023 return rc;
1024}
1025/**
1026 * Converts the grain directory from little to host endianess.
1027 *
1028 * @returns nothing.
1029 * @param pGD The grain directory.
1030 * @param cGDEntries Number of entries in the grain directory to convert.
1031 */
1032DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1033{
1034 uint32_t *pGDTmp = pGD;
1035 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1036 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1037}
1038/**
1039 * Read the grain directory and allocated grain tables verifying them against
1040 * their back up copies if available.
1041 *
1042 * @returns VBox status code.
1043 * @param pImage Image instance data.
1044 * @param pExtent The VMDK extent.
1045 */
1046static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1047{
1048 int rc = VINF_SUCCESS;
1049 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1050 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1051 && pExtent->uSectorGD != VMDK_GD_AT_END
1052 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1053 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1054 if (RT_SUCCESS(rc))
1055 {
1056 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1057 * but in reality they are not compressed. */
1058 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1059 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1060 pExtent->pGD, cbGD);
1061 if (RT_SUCCESS(rc))
1062 {
1063 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1064 if ( pExtent->uSectorRGD
1065 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1066 {
1067 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1068 * but in reality they are not compressed. */
1069 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1070 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1071 pExtent->pRGD, cbGD);
1072 if (RT_SUCCESS(rc))
1073 {
1074 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1075 /* Check grain table and redundant grain table for consistency. */
1076 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1077 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1078 size_t cbGTBuffersMax = _1M;
1079 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1080 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1081 if ( !pTmpGT1
1082 || !pTmpGT2)
1083 rc = VERR_NO_MEMORY;
1084 size_t i = 0;
1085 uint32_t *pGDTmp = pExtent->pGD;
1086 uint32_t *pRGDTmp = pExtent->pRGD;
1087 /* Loop through all entries. */
1088 while (i < pExtent->cGDEntries)
1089 {
1090 uint32_t uGTStart = *pGDTmp;
1091 uint32_t uRGTStart = *pRGDTmp;
1092 size_t cbGTRead = cbGT;
1093 /* If no grain table is allocated skip the entry. */
1094 if (*pGDTmp == 0 && *pRGDTmp == 0)
1095 {
1096 i++;
1097 continue;
1098 }
1099 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1100 {
1101 /* Just one grain directory entry refers to a not yet allocated
1102 * grain table or both grain directory copies refer to the same
1103 * grain table. Not allowed. */
1104 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1105 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1106 break;
1107 }
1108 i++;
1109 pGDTmp++;
1110 pRGDTmp++;
1111 /*
1112 * Read a few tables at once if adjacent to decrease the number
1113 * of I/O requests. Read at maximum 1MB at once.
1114 */
1115 while ( i < pExtent->cGDEntries
1116 && cbGTRead < cbGTBuffersMax)
1117 {
1118 /* If no grain table is allocated skip the entry. */
1119 if (*pGDTmp == 0 && *pRGDTmp == 0)
1120 {
1121 i++;
1122 continue;
1123 }
1124 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1125 {
1126 /* Just one grain directory entry refers to a not yet allocated
1127 * grain table or both grain directory copies refer to the same
1128 * grain table. Not allowed. */
1129 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1130 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1131 break;
1132 }
1133 /* Check that the start offsets are adjacent.*/
1134 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1135 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1136 break;
1137 i++;
1138 pGDTmp++;
1139 pRGDTmp++;
1140 cbGTRead += cbGT;
1141 }
1142 /* Increase buffers if required. */
1143 if ( RT_SUCCESS(rc)
1144 && cbGTBuffers < cbGTRead)
1145 {
1146 uint32_t *pTmp;
1147 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1148 if (pTmp)
1149 {
1150 pTmpGT1 = pTmp;
1151 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1152 if (pTmp)
1153 pTmpGT2 = pTmp;
1154 else
1155 rc = VERR_NO_MEMORY;
1156 }
1157 else
1158 rc = VERR_NO_MEMORY;
1159 if (rc == VERR_NO_MEMORY)
1160 {
1161 /* Reset to the old values. */
1162 rc = VINF_SUCCESS;
1163 i -= cbGTRead / cbGT;
1164 cbGTRead = cbGT;
1165 /* Don't try to increase the buffer again in the next run. */
1166 cbGTBuffersMax = cbGTBuffers;
1167 }
1168 }
1169 if (RT_SUCCESS(rc))
1170 {
1171 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1172 * but in reality they are not compressed. */
1173 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1174 VMDK_SECTOR2BYTE(uGTStart),
1175 pTmpGT1, cbGTRead);
1176 if (RT_FAILURE(rc))
1177 {
1178 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1179 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1180 break;
1181 }
1182 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1183 * but in reality they are not compressed. */
1184 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1185 VMDK_SECTOR2BYTE(uRGTStart),
1186 pTmpGT2, cbGTRead);
1187 if (RT_FAILURE(rc))
1188 {
1189 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1190 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1191 break;
1192 }
1193 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1194 {
1195 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1196 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1197 break;
1198 }
1199 }
1200 } /* while (i < pExtent->cGDEntries) */
1201 /** @todo figure out what to do for unclean VMDKs. */
1202 if (pTmpGT1)
1203 RTMemFree(pTmpGT1);
1204 if (pTmpGT2)
1205 RTMemFree(pTmpGT2);
1206 }
1207 else
1208 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1209 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1210 }
1211 }
1212 else
1213 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1214 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1215 }
1216 if (RT_FAILURE(rc))
1217 vmdkFreeGrainDirectory(pExtent);
1218 return rc;
1219}
1220/**
1221 * Creates a new grain directory for the given extent at the given start sector.
1222 *
1223 * @returns VBox status code.
1224 * @param pImage Image instance data.
1225 * @param pExtent The VMDK extent.
1226 * @param uStartSector Where the grain directory should be stored in the image.
1227 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1228 */
1229static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1230 uint64_t uStartSector, bool fPreAlloc)
1231{
1232 int rc = VINF_SUCCESS;
1233 unsigned i;
1234 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1235 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1236 size_t cbGTRounded;
1237 uint64_t cbOverhead;
1238 if (fPreAlloc)
1239 {
1240 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1241 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1242 }
1243 else
1244 {
1245 /* Use a dummy start sector for layout computation. */
1246 if (uStartSector == VMDK_GD_AT_END)
1247 uStartSector = 1;
1248 cbGTRounded = 0;
1249 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1250 }
1251 /* For streamOptimized extents there is only one grain directory,
1252 * and for all others take redundant grain directory into account. */
1253 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1254 {
1255 cbOverhead = RT_ALIGN_64(cbOverhead,
1256 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1257 }
1258 else
1259 {
1260 cbOverhead += cbGDRounded + cbGTRounded;
1261 cbOverhead = RT_ALIGN_64(cbOverhead,
1262 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1263 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1264 }
1265 if (RT_SUCCESS(rc))
1266 {
1267 pExtent->uAppendPosition = cbOverhead;
1268 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1269 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1270 {
1271 pExtent->uSectorRGD = 0;
1272 pExtent->uSectorGD = uStartSector;
1273 }
1274 else
1275 {
1276 pExtent->uSectorRGD = uStartSector;
1277 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1278 }
1279 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1280 if (RT_SUCCESS(rc))
1281 {
1282 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1283 if ( RT_SUCCESS(rc)
1284 && fPreAlloc)
1285 {
1286 uint32_t uGTSectorLE;
1287 uint64_t uOffsetSectors;
1288 if (pExtent->pRGD)
1289 {
1290 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1291 for (i = 0; i < pExtent->cGDEntries; i++)
1292 {
1293 pExtent->pRGD[i] = uOffsetSectors;
1294 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1295 /* Write the redundant grain directory entry to disk. */
1296 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1297 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1298 &uGTSectorLE, sizeof(uGTSectorLE));
1299 if (RT_FAILURE(rc))
1300 {
1301 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1302 break;
1303 }
1304 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1305 }
1306 }
1307 if (RT_SUCCESS(rc))
1308 {
1309 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1310 for (i = 0; i < pExtent->cGDEntries; i++)
1311 {
1312 pExtent->pGD[i] = uOffsetSectors;
1313 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1314 /* Write the grain directory entry to disk. */
1315 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1316 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1317 &uGTSectorLE, sizeof(uGTSectorLE));
1318 if (RT_FAILURE(rc))
1319 {
1320 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1321 break;
1322 }
1323 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1324 }
1325 }
1326 }
1327 }
1328 }
1329 if (RT_FAILURE(rc))
1330 vmdkFreeGrainDirectory(pExtent);
1331 return rc;
1332}
1333/**
1334 * Unquotes the given string returning the result in a separate buffer.
1335 *
1336 * @returns VBox status code.
1337 * @param pImage The VMDK image state.
1338 * @param pszStr The string to unquote.
1339 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1340 * free.
1341 * @param ppszNext Where to store the pointer to any character following
1342 * the quoted value, optional.
1343 */
1344static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1345 char **ppszUnquoted, char **ppszNext)
1346{
1347 const char *pszStart = pszStr;
1348 char *pszQ;
1349 char *pszUnquoted;
1350 /* Skip over whitespace. */
1351 while (*pszStr == ' ' || *pszStr == '\t')
1352 pszStr++;
1353 if (*pszStr != '"')
1354 {
1355 pszQ = (char *)pszStr;
1356 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1357 pszQ++;
1358 }
1359 else
1360 {
1361 pszStr++;
1362 pszQ = (char *)strchr(pszStr, '"');
1363 if (pszQ == NULL)
1364 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1365 pImage->pszFilename, pszStart);
1366 }
1367 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1368 if (!pszUnquoted)
1369 return VERR_NO_MEMORY;
1370 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1371 pszUnquoted[pszQ - pszStr] = '\0';
1372 *ppszUnquoted = pszUnquoted;
1373 if (ppszNext)
1374 *ppszNext = pszQ + 1;
1375 return VINF_SUCCESS;
1376}
1377static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1378 const char *pszLine)
1379{
1380 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1381 ssize_t cbDiff = strlen(pszLine) + 1;
1382 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1383 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1384 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1385 memcpy(pEnd, pszLine, cbDiff);
1386 pDescriptor->cLines++;
1387 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1388 pDescriptor->fDirty = true;
1389 return VINF_SUCCESS;
1390}
1391static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1392 const char *pszKey, const char **ppszValue)
1393{
1394 size_t cbKey = strlen(pszKey);
1395 const char *pszValue;
1396 while (uStart != 0)
1397 {
1398 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1399 {
1400 /* Key matches, check for a '=' (preceded by whitespace). */
1401 pszValue = pDescriptor->aLines[uStart] + cbKey;
1402 while (*pszValue == ' ' || *pszValue == '\t')
1403 pszValue++;
1404 if (*pszValue == '=')
1405 {
1406 *ppszValue = pszValue + 1;
1407 break;
1408 }
1409 }
1410 uStart = pDescriptor->aNextLines[uStart];
1411 }
1412 return !!uStart;
1413}
1414static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1415 unsigned uStart,
1416 const char *pszKey, const char *pszValue)
1417{
1418 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1419 size_t cbKey = strlen(pszKey);
1420 unsigned uLast = 0;
1421 while (uStart != 0)
1422 {
1423 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1424 {
1425 /* Key matches, check for a '=' (preceded by whitespace). */
1426 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1427 while (*pszTmp == ' ' || *pszTmp == '\t')
1428 pszTmp++;
1429 if (*pszTmp == '=')
1430 {
1431 pszTmp++;
1432 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1433 * bloat and potentially out of space error? */
1434 while (*pszTmp == ' ' || *pszTmp == '\t')
1435 pszTmp++;
1436 break;
1437 }
1438 }
1439 if (!pDescriptor->aNextLines[uStart])
1440 uLast = uStart;
1441 uStart = pDescriptor->aNextLines[uStart];
1442 }
1443 if (uStart)
1444 {
1445 if (pszValue)
1446 {
1447 /* Key already exists, replace existing value. */
1448 size_t cbOldVal = strlen(pszTmp);
1449 size_t cbNewVal = strlen(pszValue);
1450 ssize_t cbDiff = cbNewVal - cbOldVal;
1451 /* Check for buffer overflow. */
1452 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1453 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1454 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1455 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1456 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1457 memcpy(pszTmp, pszValue, cbNewVal + 1);
1458 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1459 pDescriptor->aLines[i] += cbDiff;
1460 }
1461 else
1462 {
1463 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1464 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1465 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1466 {
1467 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1468 if (pDescriptor->aNextLines[i])
1469 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1470 else
1471 pDescriptor->aNextLines[i-1] = 0;
1472 }
1473 pDescriptor->cLines--;
1474 /* Adjust starting line numbers of following descriptor sections. */
1475 if (uStart < pDescriptor->uFirstExtent)
1476 pDescriptor->uFirstExtent--;
1477 if (uStart < pDescriptor->uFirstDDB)
1478 pDescriptor->uFirstDDB--;
1479 }
1480 }
1481 else
1482 {
1483 /* Key doesn't exist, append after the last entry in this category. */
1484 if (!pszValue)
1485 {
1486 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1487 return VINF_SUCCESS;
1488 }
1489 cbKey = strlen(pszKey);
1490 size_t cbValue = strlen(pszValue);
1491 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1492 /* Check for buffer overflow. */
1493 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1494 || ( pDescriptor->aLines[pDescriptor->cLines]
1495 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1496 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1497 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1498 {
1499 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1500 if (pDescriptor->aNextLines[i - 1])
1501 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1502 else
1503 pDescriptor->aNextLines[i] = 0;
1504 }
1505 uStart = uLast + 1;
1506 pDescriptor->aNextLines[uLast] = uStart;
1507 pDescriptor->aNextLines[uStart] = 0;
1508 pDescriptor->cLines++;
1509 pszTmp = pDescriptor->aLines[uStart];
1510 memmove(pszTmp + cbDiff, pszTmp,
1511 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1512 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1513 pDescriptor->aLines[uStart][cbKey] = '=';
1514 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1515 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1516 pDescriptor->aLines[i] += cbDiff;
1517 /* Adjust starting line numbers of following descriptor sections. */
1518 if (uStart <= pDescriptor->uFirstExtent)
1519 pDescriptor->uFirstExtent++;
1520 if (uStart <= pDescriptor->uFirstDDB)
1521 pDescriptor->uFirstDDB++;
1522 }
1523 pDescriptor->fDirty = true;
1524 return VINF_SUCCESS;
1525}
1526static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1527 uint32_t *puValue)
1528{
1529 const char *pszValue;
1530 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1531 &pszValue))
1532 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1533 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1534}
1535/**
1536 * Returns the value of the given key as a string allocating the necessary memory.
1537 *
1538 * @returns VBox status code.
1539 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1540 * @param pImage The VMDK image state.
1541 * @param pDescriptor The descriptor to fetch the value from.
1542 * @param pszKey The key to get the value from.
1543 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1544 * free.
1545 */
1546static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1547 const char *pszKey, char **ppszValue)
1548{
1549 const char *pszValue;
1550 char *pszValueUnquoted;
1551 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1552 &pszValue))
1553 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1554 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1555 if (RT_FAILURE(rc))
1556 return rc;
1557 *ppszValue = pszValueUnquoted;
1558 return rc;
1559}
1560static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1561 const char *pszKey, const char *pszValue)
1562{
1563 char *pszValueQuoted;
1564 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1565 if (!pszValueQuoted)
1566 return VERR_NO_STR_MEMORY;
1567 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1568 pszValueQuoted);
1569 RTStrFree(pszValueQuoted);
1570 return rc;
1571}
1572static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1573 PVMDKDESCRIPTOR pDescriptor)
1574{
1575 RT_NOREF1(pImage);
1576 unsigned uEntry = pDescriptor->uFirstExtent;
1577 ssize_t cbDiff;
1578 if (!uEntry)
1579 return;
1580 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1581 /* Move everything including \0 in the entry marking the end of buffer. */
1582 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1583 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1584 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1585 {
1586 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1587 if (pDescriptor->aNextLines[i])
1588 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1589 else
1590 pDescriptor->aNextLines[i - 1] = 0;
1591 }
1592 pDescriptor->cLines--;
1593 if (pDescriptor->uFirstDDB)
1594 pDescriptor->uFirstDDB--;
1595 return;
1596}
1597static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1598 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1599 VMDKETYPE enmType, const char *pszBasename,
1600 uint64_t uSectorOffset)
1601{
1602 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1603 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1604 char *pszTmp;
1605 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1606 char szExt[1024];
1607 ssize_t cbDiff;
1608 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1609 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1610 /* Find last entry in extent description. */
1611 while (uStart)
1612 {
1613 if (!pDescriptor->aNextLines[uStart])
1614 uLast = uStart;
1615 uStart = pDescriptor->aNextLines[uStart];
1616 }
1617 if (enmType == VMDKETYPE_ZERO)
1618 {
1619 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1620 cNominalSectors, apszType[enmType]);
1621 }
1622 else if (enmType == VMDKETYPE_FLAT)
1623 {
1624 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1625 apszAccess[enmAccess], cNominalSectors,
1626 apszType[enmType], pszBasename, uSectorOffset);
1627 }
1628 else
1629 {
1630 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1631 apszAccess[enmAccess], cNominalSectors,
1632 apszType[enmType], pszBasename);
1633 }
1634 cbDiff = strlen(szExt) + 1;
1635 /* Check for buffer overflow. */
1636 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1637 || ( pDescriptor->aLines[pDescriptor->cLines]
1638 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1639 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1640 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1641 {
1642 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1643 if (pDescriptor->aNextLines[i - 1])
1644 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1645 else
1646 pDescriptor->aNextLines[i] = 0;
1647 }
1648 uStart = uLast + 1;
1649 pDescriptor->aNextLines[uLast] = uStart;
1650 pDescriptor->aNextLines[uStart] = 0;
1651 pDescriptor->cLines++;
1652 pszTmp = pDescriptor->aLines[uStart];
1653 memmove(pszTmp + cbDiff, pszTmp,
1654 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1655 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1656 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1657 pDescriptor->aLines[i] += cbDiff;
1658 /* Adjust starting line numbers of following descriptor sections. */
1659 if (uStart <= pDescriptor->uFirstDDB)
1660 pDescriptor->uFirstDDB++;
1661 pDescriptor->fDirty = true;
1662 return VINF_SUCCESS;
1663}
1664/**
1665 * Returns the value of the given key from the DDB as a string allocating
1666 * the necessary memory.
1667 *
1668 * @returns VBox status code.
1669 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1670 * @param pImage The VMDK image state.
1671 * @param pDescriptor The descriptor to fetch the value from.
1672 * @param pszKey The key to get the value from.
1673 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1674 * free.
1675 */
1676static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1677 const char *pszKey, char **ppszValue)
1678{
1679 const char *pszValue;
1680 char *pszValueUnquoted;
1681 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1682 &pszValue))
1683 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1684 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1685 if (RT_FAILURE(rc))
1686 return rc;
1687 *ppszValue = pszValueUnquoted;
1688 return rc;
1689}
1690static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1691 const char *pszKey, uint32_t *puValue)
1692{
1693 const char *pszValue;
1694 char *pszValueUnquoted;
1695 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1696 &pszValue))
1697 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1698 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1699 if (RT_FAILURE(rc))
1700 return rc;
1701 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1702 RTMemTmpFree(pszValueUnquoted);
1703 return rc;
1704}
1705static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1706 const char *pszKey, PRTUUID pUuid)
1707{
1708 const char *pszValue;
1709 char *pszValueUnquoted;
1710 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1711 &pszValue))
1712 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1713 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1714 if (RT_FAILURE(rc))
1715 return rc;
1716 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1717 RTMemTmpFree(pszValueUnquoted);
1718 return rc;
1719}
1720static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1721 const char *pszKey, const char *pszVal)
1722{
1723 int rc;
1724 char *pszValQuoted;
1725 if (pszVal)
1726 {
1727 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1728 if (!pszValQuoted)
1729 return VERR_NO_STR_MEMORY;
1730 }
1731 else
1732 pszValQuoted = NULL;
1733 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1734 pszValQuoted);
1735 if (pszValQuoted)
1736 RTStrFree(pszValQuoted);
1737 return rc;
1738}
1739static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1740 const char *pszKey, PCRTUUID pUuid)
1741{
1742 char *pszUuid;
1743 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1744 if (!pszUuid)
1745 return VERR_NO_STR_MEMORY;
1746 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1747 pszUuid);
1748 RTStrFree(pszUuid);
1749 return rc;
1750}
1751static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1752 const char *pszKey, uint32_t uValue)
1753{
1754 char *pszValue;
1755 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1756 if (!pszValue)
1757 return VERR_NO_STR_MEMORY;
1758 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1759 pszValue);
1760 RTStrFree(pszValue);
1761 return rc;
1762}
1763/**
1764 * Splits the descriptor data into individual lines checking for correct line
1765 * endings and descriptor size.
1766 *
1767 * @returns VBox status code.
1768 * @param pImage The image instance.
1769 * @param pDesc The descriptor.
1770 * @param pszTmp The raw descriptor data from the image.
1771 */
1772static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
1773{
1774 unsigned cLine = 0;
1775 int rc = VINF_SUCCESS;
1776 while ( RT_SUCCESS(rc)
1777 && *pszTmp != '\0')
1778 {
1779 pDesc->aLines[cLine++] = pszTmp;
1780 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1781 {
1782 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1783 rc = VERR_VD_VMDK_INVALID_HEADER;
1784 break;
1785 }
1786 while (*pszTmp != '\0' && *pszTmp != '\n')
1787 {
1788 if (*pszTmp == '\r')
1789 {
1790 if (*(pszTmp + 1) != '\n')
1791 {
1792 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1793 break;
1794 }
1795 else
1796 {
1797 /* Get rid of CR character. */
1798 *pszTmp = '\0';
1799 }
1800 }
1801 pszTmp++;
1802 }
1803 if (RT_FAILURE(rc))
1804 break;
1805 /* Get rid of LF character. */
1806 if (*pszTmp == '\n')
1807 {
1808 *pszTmp = '\0';
1809 pszTmp++;
1810 }
1811 }
1812 if (RT_SUCCESS(rc))
1813 {
1814 pDesc->cLines = cLine;
1815 /* Pointer right after the end of the used part of the buffer. */
1816 pDesc->aLines[cLine] = pszTmp;
1817 }
1818 return rc;
1819}
1820static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1821 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1822{
1823 pDescriptor->cbDescAlloc = cbDescData;
1824 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
1825 if (RT_SUCCESS(rc))
1826 {
1827 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1828 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
1829 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
1830 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
1831 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1832 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1833 else
1834 {
1835 unsigned uLastNonEmptyLine = 0;
1836 /* Initialize those, because we need to be able to reopen an image. */
1837 pDescriptor->uFirstDesc = 0;
1838 pDescriptor->uFirstExtent = 0;
1839 pDescriptor->uFirstDDB = 0;
1840 for (unsigned i = 0; i < pDescriptor->cLines; i++)
1841 {
1842 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1843 {
1844 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1845 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1846 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1847 {
1848 /* An extent descriptor. */
1849 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1850 {
1851 /* Incorrect ordering of entries. */
1852 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1853 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1854 break;
1855 }
1856 if (!pDescriptor->uFirstExtent)
1857 {
1858 pDescriptor->uFirstExtent = i;
1859 uLastNonEmptyLine = 0;
1860 }
1861 }
1862 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1863 {
1864 /* A disk database entry. */
1865 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1866 {
1867 /* Incorrect ordering of entries. */
1868 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1869 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1870 break;
1871 }
1872 if (!pDescriptor->uFirstDDB)
1873 {
1874 pDescriptor->uFirstDDB = i;
1875 uLastNonEmptyLine = 0;
1876 }
1877 }
1878 else
1879 {
1880 /* A normal entry. */
1881 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1882 {
1883 /* Incorrect ordering of entries. */
1884 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1885 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1886 break;
1887 }
1888 if (!pDescriptor->uFirstDesc)
1889 {
1890 pDescriptor->uFirstDesc = i;
1891 uLastNonEmptyLine = 0;
1892 }
1893 }
1894 if (uLastNonEmptyLine)
1895 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1896 uLastNonEmptyLine = i;
1897 }
1898 }
1899 }
1900 }
1901 return rc;
1902}
1903static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
1904 PCVDGEOMETRY pPCHSGeometry)
1905{
1906 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1907 VMDK_DDB_GEO_PCHS_CYLINDERS,
1908 pPCHSGeometry->cCylinders);
1909 if (RT_FAILURE(rc))
1910 return rc;
1911 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1912 VMDK_DDB_GEO_PCHS_HEADS,
1913 pPCHSGeometry->cHeads);
1914 if (RT_FAILURE(rc))
1915 return rc;
1916 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1917 VMDK_DDB_GEO_PCHS_SECTORS,
1918 pPCHSGeometry->cSectors);
1919 return rc;
1920}
1921static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
1922 PCVDGEOMETRY pLCHSGeometry)
1923{
1924 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1925 VMDK_DDB_GEO_LCHS_CYLINDERS,
1926 pLCHSGeometry->cCylinders);
1927 if (RT_FAILURE(rc))
1928 return rc;
1929 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1930 VMDK_DDB_GEO_LCHS_HEADS,
1931 pLCHSGeometry->cHeads);
1932 if (RT_FAILURE(rc))
1933 return rc;
1934 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1935 VMDK_DDB_GEO_LCHS_SECTORS,
1936 pLCHSGeometry->cSectors);
1937 return rc;
1938}
1939static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
1940 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1941{
1942 pDescriptor->uFirstDesc = 0;
1943 pDescriptor->uFirstExtent = 0;
1944 pDescriptor->uFirstDDB = 0;
1945 pDescriptor->cLines = 0;
1946 pDescriptor->cbDescAlloc = cbDescData;
1947 pDescriptor->fDirty = false;
1948 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
1949 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
1950 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
1951 if (RT_SUCCESS(rc))
1952 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
1953 if (RT_SUCCESS(rc))
1954 {
1955 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
1956 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1957 }
1958 if (RT_SUCCESS(rc))
1959 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
1960 if (RT_SUCCESS(rc))
1961 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
1962 if (RT_SUCCESS(rc))
1963 {
1964 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
1965 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1966 }
1967 if (RT_SUCCESS(rc))
1968 {
1969 /* The trailing space is created by VMware, too. */
1970 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
1971 }
1972 if (RT_SUCCESS(rc))
1973 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
1974 if (RT_SUCCESS(rc))
1975 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1976 if (RT_SUCCESS(rc))
1977 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
1978 if (RT_SUCCESS(rc))
1979 {
1980 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
1981 /* Now that the framework is in place, use the normal functions to insert
1982 * the remaining keys. */
1983 char szBuf[9];
1984 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
1985 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
1986 "CID", szBuf);
1987 }
1988 if (RT_SUCCESS(rc))
1989 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
1990 "parentCID", "ffffffff");
1991 if (RT_SUCCESS(rc))
1992 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
1993 return rc;
1994}
1995static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
1996{
1997 int rc;
1998 unsigned cExtents;
1999 unsigned uLine;
2000 unsigned i;
2001 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2002 &pImage->Descriptor);
2003 if (RT_FAILURE(rc))
2004 return rc;
2005 /* Check version, must be 1. */
2006 uint32_t uVersion;
2007 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2008 if (RT_FAILURE(rc))
2009 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2010 if (uVersion != 1)
2011 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2012 /* Get image creation type and determine image flags. */
2013 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2014 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2015 &pszCreateType);
2016 if (RT_FAILURE(rc))
2017 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2018 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2019 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2020 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2021 else if ( !strcmp(pszCreateType, "partitionedDevice")
2022 || !strcmp(pszCreateType, "fullDevice"))
2023 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2024 else if (!strcmp(pszCreateType, "streamOptimized"))
2025 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2026 else if (!strcmp(pszCreateType, "vmfs"))
2027 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2028 RTMemTmpFree(pszCreateType);
2029 /* Count the number of extent config entries. */
2030 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2031 uLine != 0;
2032 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2033 /* nothing */;
2034 if (!pImage->pDescData && cExtents != 1)
2035 {
2036 /* Monolithic image, must have only one extent (already opened). */
2037 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2038 }
2039 if (pImage->pDescData)
2040 {
2041 /* Non-monolithic image, extents need to be allocated. */
2042 rc = vmdkCreateExtents(pImage, cExtents);
2043 if (RT_FAILURE(rc))
2044 return rc;
2045 }
2046 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2047 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2048 {
2049 char *pszLine = pImage->Descriptor.aLines[uLine];
2050 /* Access type of the extent. */
2051 if (!strncmp(pszLine, "RW", 2))
2052 {
2053 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2054 pszLine += 2;
2055 }
2056 else if (!strncmp(pszLine, "RDONLY", 6))
2057 {
2058 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2059 pszLine += 6;
2060 }
2061 else if (!strncmp(pszLine, "NOACCESS", 8))
2062 {
2063 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2064 pszLine += 8;
2065 }
2066 else
2067 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2068 if (*pszLine++ != ' ')
2069 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2070 /* Nominal size of the extent. */
2071 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2072 &pImage->pExtents[i].cNominalSectors);
2073 if (RT_FAILURE(rc))
2074 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2075 if (*pszLine++ != ' ')
2076 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2077 /* Type of the extent. */
2078 if (!strncmp(pszLine, "SPARSE", 6))
2079 {
2080 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2081 pszLine += 6;
2082 }
2083 else if (!strncmp(pszLine, "FLAT", 4))
2084 {
2085 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2086 pszLine += 4;
2087 }
2088 else if (!strncmp(pszLine, "ZERO", 4))
2089 {
2090 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2091 pszLine += 4;
2092 }
2093 else if (!strncmp(pszLine, "VMFS", 4))
2094 {
2095 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2096 pszLine += 4;
2097 }
2098 else
2099 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2100 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2101 {
2102 /* This one has no basename or offset. */
2103 if (*pszLine == ' ')
2104 pszLine++;
2105 if (*pszLine != '\0')
2106 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2107 pImage->pExtents[i].pszBasename = NULL;
2108 }
2109 else
2110 {
2111 /* All other extent types have basename and optional offset. */
2112 if (*pszLine++ != ' ')
2113 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2114 /* Basename of the image. Surrounded by quotes. */
2115 char *pszBasename;
2116 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2117 if (RT_FAILURE(rc))
2118 return rc;
2119 pImage->pExtents[i].pszBasename = pszBasename;
2120 if (*pszLine == ' ')
2121 {
2122 pszLine++;
2123 if (*pszLine != '\0')
2124 {
2125 /* Optional offset in extent specified. */
2126 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2127 &pImage->pExtents[i].uSectorOffset);
2128 if (RT_FAILURE(rc))
2129 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2130 }
2131 }
2132 if (*pszLine != '\0')
2133 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2134 }
2135 }
2136 /* Determine PCHS geometry (autogenerate if necessary). */
2137 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2138 VMDK_DDB_GEO_PCHS_CYLINDERS,
2139 &pImage->PCHSGeometry.cCylinders);
2140 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2141 pImage->PCHSGeometry.cCylinders = 0;
2142 else if (RT_FAILURE(rc))
2143 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2144 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2145 VMDK_DDB_GEO_PCHS_HEADS,
2146 &pImage->PCHSGeometry.cHeads);
2147 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2148 pImage->PCHSGeometry.cHeads = 0;
2149 else if (RT_FAILURE(rc))
2150 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2151 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2152 VMDK_DDB_GEO_PCHS_SECTORS,
2153 &pImage->PCHSGeometry.cSectors);
2154 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2155 pImage->PCHSGeometry.cSectors = 0;
2156 else if (RT_FAILURE(rc))
2157 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2158 if ( pImage->PCHSGeometry.cCylinders == 0
2159 || pImage->PCHSGeometry.cHeads == 0
2160 || pImage->PCHSGeometry.cHeads > 16
2161 || pImage->PCHSGeometry.cSectors == 0
2162 || pImage->PCHSGeometry.cSectors > 63)
2163 {
2164 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2165 * as the total image size isn't known yet). */
2166 pImage->PCHSGeometry.cCylinders = 0;
2167 pImage->PCHSGeometry.cHeads = 16;
2168 pImage->PCHSGeometry.cSectors = 63;
2169 }
2170 /* Determine LCHS geometry (set to 0 if not specified). */
2171 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2172 VMDK_DDB_GEO_LCHS_CYLINDERS,
2173 &pImage->LCHSGeometry.cCylinders);
2174 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2175 pImage->LCHSGeometry.cCylinders = 0;
2176 else if (RT_FAILURE(rc))
2177 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2178 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2179 VMDK_DDB_GEO_LCHS_HEADS,
2180 &pImage->LCHSGeometry.cHeads);
2181 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2182 pImage->LCHSGeometry.cHeads = 0;
2183 else if (RT_FAILURE(rc))
2184 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2185 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2186 VMDK_DDB_GEO_LCHS_SECTORS,
2187 &pImage->LCHSGeometry.cSectors);
2188 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2189 pImage->LCHSGeometry.cSectors = 0;
2190 else if (RT_FAILURE(rc))
2191 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2192 if ( pImage->LCHSGeometry.cCylinders == 0
2193 || pImage->LCHSGeometry.cHeads == 0
2194 || pImage->LCHSGeometry.cSectors == 0)
2195 {
2196 pImage->LCHSGeometry.cCylinders = 0;
2197 pImage->LCHSGeometry.cHeads = 0;
2198 pImage->LCHSGeometry.cSectors = 0;
2199 }
2200 /* Get image UUID. */
2201 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2202 &pImage->ImageUuid);
2203 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2204 {
2205 /* Image without UUID. Probably created by VMware and not yet used
2206 * by VirtualBox. Can only be added for images opened in read/write
2207 * mode, so don't bother producing a sensible UUID otherwise. */
2208 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2209 RTUuidClear(&pImage->ImageUuid);
2210 else
2211 {
2212 rc = RTUuidCreate(&pImage->ImageUuid);
2213 if (RT_FAILURE(rc))
2214 return rc;
2215 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2216 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2217 if (RT_FAILURE(rc))
2218 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2219 }
2220 }
2221 else if (RT_FAILURE(rc))
2222 return rc;
2223 /* Get image modification UUID. */
2224 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2225 VMDK_DDB_MODIFICATION_UUID,
2226 &pImage->ModificationUuid);
2227 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2228 {
2229 /* Image without UUID. Probably created by VMware and not yet used
2230 * by VirtualBox. Can only be added for images opened in read/write
2231 * mode, so don't bother producing a sensible UUID otherwise. */
2232 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2233 RTUuidClear(&pImage->ModificationUuid);
2234 else
2235 {
2236 rc = RTUuidCreate(&pImage->ModificationUuid);
2237 if (RT_FAILURE(rc))
2238 return rc;
2239 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2240 VMDK_DDB_MODIFICATION_UUID,
2241 &pImage->ModificationUuid);
2242 if (RT_FAILURE(rc))
2243 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2244 }
2245 }
2246 else if (RT_FAILURE(rc))
2247 return rc;
2248 /* Get UUID of parent image. */
2249 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2250 &pImage->ParentUuid);
2251 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2252 {
2253 /* Image without UUID. Probably created by VMware and not yet used
2254 * by VirtualBox. Can only be added for images opened in read/write
2255 * mode, so don't bother producing a sensible UUID otherwise. */
2256 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2257 RTUuidClear(&pImage->ParentUuid);
2258 else
2259 {
2260 rc = RTUuidClear(&pImage->ParentUuid);
2261 if (RT_FAILURE(rc))
2262 return rc;
2263 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2264 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2265 if (RT_FAILURE(rc))
2266 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2267 }
2268 }
2269 else if (RT_FAILURE(rc))
2270 return rc;
2271 /* Get parent image modification UUID. */
2272 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2273 VMDK_DDB_PARENT_MODIFICATION_UUID,
2274 &pImage->ParentModificationUuid);
2275 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2276 {
2277 /* Image without UUID. Probably created by VMware and not yet used
2278 * by VirtualBox. Can only be added for images opened in read/write
2279 * mode, so don't bother producing a sensible UUID otherwise. */
2280 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2281 RTUuidClear(&pImage->ParentModificationUuid);
2282 else
2283 {
2284 RTUuidClear(&pImage->ParentModificationUuid);
2285 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2286 VMDK_DDB_PARENT_MODIFICATION_UUID,
2287 &pImage->ParentModificationUuid);
2288 if (RT_FAILURE(rc))
2289 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2290 }
2291 }
2292 else if (RT_FAILURE(rc))
2293 return rc;
2294 return VINF_SUCCESS;
2295}
2296/**
2297 * Internal : Prepares the descriptor to write to the image.
2298 */
2299static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2300 void **ppvData, size_t *pcbData)
2301{
2302 int rc = VINF_SUCCESS;
2303 /*
2304 * Allocate temporary descriptor buffer.
2305 * In case there is no limit allocate a default
2306 * and increase if required.
2307 */
2308 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2309 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2310 size_t offDescriptor = 0;
2311 if (!pszDescriptor)
2312 return VERR_NO_MEMORY;
2313 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2314 {
2315 const char *psz = pImage->Descriptor.aLines[i];
2316 size_t cb = strlen(psz);
2317 /*
2318 * Increase the descriptor if there is no limit and
2319 * there is not enough room left for this line.
2320 */
2321 if (offDescriptor + cb + 1 > cbDescriptor)
2322 {
2323 if (cbLimit)
2324 {
2325 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2326 break;
2327 }
2328 else
2329 {
2330 char *pszDescriptorNew = NULL;
2331 LogFlow(("Increasing descriptor cache\n"));
2332 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2333 if (!pszDescriptorNew)
2334 {
2335 rc = VERR_NO_MEMORY;
2336 break;
2337 }
2338 pszDescriptor = pszDescriptorNew;
2339 cbDescriptor += cb + 4 * _1K;
2340 }
2341 }
2342 if (cb > 0)
2343 {
2344 memcpy(pszDescriptor + offDescriptor, psz, cb);
2345 offDescriptor += cb;
2346 }
2347 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2348 offDescriptor++;
2349 }
2350 if (RT_SUCCESS(rc))
2351 {
2352 *ppvData = pszDescriptor;
2353 *pcbData = offDescriptor;
2354 }
2355 else if (pszDescriptor)
2356 RTMemFree(pszDescriptor);
2357 return rc;
2358}
2359/**
2360 * Internal: write/update the descriptor part of the image.
2361 */
2362static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2363{
2364 int rc = VINF_SUCCESS;
2365 uint64_t cbLimit;
2366 uint64_t uOffset;
2367 PVMDKFILE pDescFile;
2368 void *pvDescriptor = NULL;
2369 size_t cbDescriptor;
2370 if (pImage->pDescData)
2371 {
2372 /* Separate descriptor file. */
2373 uOffset = 0;
2374 cbLimit = 0;
2375 pDescFile = pImage->pFile;
2376 }
2377 else
2378 {
2379 /* Embedded descriptor file. */
2380 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2381 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2382 pDescFile = pImage->pExtents[0].pFile;
2383 }
2384 /* Bail out if there is no file to write to. */
2385 if (pDescFile == NULL)
2386 return VERR_INVALID_PARAMETER;
2387 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2388 if (RT_SUCCESS(rc))
2389 {
2390 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2391 uOffset, pvDescriptor,
2392 cbLimit ? cbLimit : cbDescriptor,
2393 pIoCtx, NULL, NULL);
2394 if ( RT_FAILURE(rc)
2395 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2396 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2397 }
2398 if (RT_SUCCESS(rc) && !cbLimit)
2399 {
2400 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2401 if (RT_FAILURE(rc))
2402 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2403 }
2404 if (RT_SUCCESS(rc))
2405 pImage->Descriptor.fDirty = false;
2406 if (pvDescriptor)
2407 RTMemFree(pvDescriptor);
2408 return rc;
2409}
2410/**
2411 * Internal: validate the consistency check values in a binary header.
2412 */
2413static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2414{
2415 int rc = VINF_SUCCESS;
2416 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2417 {
2418 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2419 return rc;
2420 }
2421 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2422 {
2423 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2424 return rc;
2425 }
2426 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2427 && ( pHeader->singleEndLineChar != '\n'
2428 || pHeader->nonEndLineChar != ' '
2429 || pHeader->doubleEndLineChar1 != '\r'
2430 || pHeader->doubleEndLineChar2 != '\n') )
2431 {
2432 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2433 return rc;
2434 }
2435 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2436 {
2437 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2438 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2439 return rc;
2440 }
2441 return rc;
2442}
2443/**
2444 * Internal: read metadata belonging to an extent with binary header, i.e.
2445 * as found in monolithic files.
2446 */
2447static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2448 bool fMagicAlreadyRead)
2449{
2450 SparseExtentHeader Header;
2451 int rc;
2452 if (!fMagicAlreadyRead)
2453 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2454 &Header, sizeof(Header));
2455 else
2456 {
2457 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2458 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2459 RT_UOFFSETOF(SparseExtentHeader, version),
2460 &Header.version,
2461 sizeof(Header)
2462 - RT_UOFFSETOF(SparseExtentHeader, version));
2463 }
2464 if (RT_SUCCESS(rc))
2465 {
2466 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2467 if (RT_SUCCESS(rc))
2468 {
2469 uint64_t cbFile = 0;
2470 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2471 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2472 pExtent->fFooter = true;
2473 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2474 || ( pExtent->fFooter
2475 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2476 {
2477 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2478 if (RT_FAILURE(rc))
2479 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2480 }
2481 if (RT_SUCCESS(rc))
2482 {
2483 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2484 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2485 if ( pExtent->fFooter
2486 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2487 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2488 {
2489 /* Read the footer, which comes before the end-of-stream marker. */
2490 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2491 cbFile - 2*512, &Header,
2492 sizeof(Header));
2493 if (RT_FAILURE(rc))
2494 {
2495 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2496 rc = VERR_VD_VMDK_INVALID_HEADER;
2497 }
2498 if (RT_SUCCESS(rc))
2499 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2500 /* Prohibit any writes to this extent. */
2501 pExtent->uAppendPosition = 0;
2502 }
2503 if (RT_SUCCESS(rc))
2504 {
2505 pExtent->uVersion = RT_LE2H_U32(Header.version);
2506 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2507 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2508 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2509 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2510 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2511 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2512 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2513 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2514 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2515 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2516 {
2517 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2518 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2519 }
2520 else
2521 {
2522 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2523 pExtent->uSectorRGD = 0;
2524 }
2525 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2526 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2527 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2528 if ( RT_SUCCESS(rc)
2529 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2530 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2531 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2532 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2533 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2534 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2535 if (RT_SUCCESS(rc))
2536 {
2537 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2538 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2539 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2540 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2541 else
2542 {
2543 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2544 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2545 /* Fix up the number of descriptor sectors, as some flat images have
2546 * really just one, and this causes failures when inserting the UUID
2547 * values and other extra information. */
2548 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2549 {
2550 /* Do it the easy way - just fix it for flat images which have no
2551 * other complicated metadata which needs space too. */
2552 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2553 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2554 pExtent->cDescriptorSectors = 4;
2555 }
2556 }
2557 }
2558 }
2559 }
2560 }
2561 }
2562 else
2563 {
2564 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2565 rc = VERR_VD_VMDK_INVALID_HEADER;
2566 }
2567 if (RT_FAILURE(rc))
2568 vmdkFreeExtentData(pImage, pExtent, false);
2569 return rc;
2570}
2571/**
2572 * Internal: read additional metadata belonging to an extent. For those
2573 * extents which have no additional metadata just verify the information.
2574 */
2575static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2576{
2577 int rc = VINF_SUCCESS;
2578/* disabled the check as there are too many truncated vmdk images out there */
2579#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2580 uint64_t cbExtentSize;
2581 /* The image must be a multiple of a sector in size and contain the data
2582 * area (flat images only). If not, it means the image is at least
2583 * truncated, or even seriously garbled. */
2584 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2585 if (RT_FAILURE(rc))
2586 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2587 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2588 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2589 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2590 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2591#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2592 if ( RT_SUCCESS(rc)
2593 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2594 {
2595 /* The spec says that this must be a power of two and greater than 8,
2596 * but probably they meant not less than 8. */
2597 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2598 || pExtent->cSectorsPerGrain < 8)
2599 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2600 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2601 else
2602 {
2603 /* This code requires that a grain table must hold a power of two multiple
2604 * of the number of entries per GT cache entry. */
2605 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2606 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2607 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2608 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2609 else
2610 {
2611 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2612 if (RT_SUCCESS(rc))
2613 {
2614 /* Prohibit any writes to this streamOptimized extent. */
2615 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2616 pExtent->uAppendPosition = 0;
2617 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2618 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2619 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2620 rc = vmdkReadGrainDirectory(pImage, pExtent);
2621 else
2622 {
2623 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2624 pExtent->cbGrainStreamRead = 0;
2625 }
2626 }
2627 }
2628 }
2629 }
2630 if (RT_FAILURE(rc))
2631 vmdkFreeExtentData(pImage, pExtent, false);
2632 return rc;
2633}
2634/**
2635 * Internal: write/update the metadata for a sparse extent.
2636 */
2637static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2638 uint64_t uOffset, PVDIOCTX pIoCtx)
2639{
2640 SparseExtentHeader Header;
2641 memset(&Header, '\0', sizeof(Header));
2642 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2643 Header.version = RT_H2LE_U32(pExtent->uVersion);
2644 Header.flags = RT_H2LE_U32(RT_BIT(0));
2645 if (pExtent->pRGD)
2646 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2647 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2648 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2649 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2650 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2651 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2652 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2653 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2654 if (pExtent->fFooter && uOffset == 0)
2655 {
2656 if (pExtent->pRGD)
2657 {
2658 Assert(pExtent->uSectorRGD);
2659 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2660 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2661 }
2662 else
2663 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2664 }
2665 else
2666 {
2667 if (pExtent->pRGD)
2668 {
2669 Assert(pExtent->uSectorRGD);
2670 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2671 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2672 }
2673 else
2674 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2675 }
2676 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2677 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2678 Header.singleEndLineChar = '\n';
2679 Header.nonEndLineChar = ' ';
2680 Header.doubleEndLineChar1 = '\r';
2681 Header.doubleEndLineChar2 = '\n';
2682 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2683 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
2684 uOffset, &Header, sizeof(Header),
2685 pIoCtx, NULL, NULL);
2686 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2687 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2688 return rc;
2689}
2690/**
2691 * Internal: free the buffers used for streamOptimized images.
2692 */
2693static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
2694{
2695 if (pExtent->pvCompGrain)
2696 {
2697 RTMemFree(pExtent->pvCompGrain);
2698 pExtent->pvCompGrain = NULL;
2699 }
2700 if (pExtent->pvGrain)
2701 {
2702 RTMemFree(pExtent->pvGrain);
2703 pExtent->pvGrain = NULL;
2704 }
2705}
2706/**
2707 * Internal: free the memory used by the extent data structure, optionally
2708 * deleting the referenced files.
2709 *
2710 * @returns VBox status code.
2711 * @param pImage Pointer to the image instance data.
2712 * @param pExtent The extent to free.
2713 * @param fDelete Flag whether to delete the backing storage.
2714 */
2715static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2716 bool fDelete)
2717{
2718 int rc = VINF_SUCCESS;
2719 vmdkFreeGrainDirectory(pExtent);
2720 if (pExtent->pDescData)
2721 {
2722 RTMemFree(pExtent->pDescData);
2723 pExtent->pDescData = NULL;
2724 }
2725 if (pExtent->pFile != NULL)
2726 {
2727 /* Do not delete raw extents, these have full and base names equal. */
2728 rc = vmdkFileClose(pImage, &pExtent->pFile,
2729 fDelete
2730 && pExtent->pszFullname
2731 && pExtent->pszBasename
2732 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2733 }
2734 if (pExtent->pszBasename)
2735 {
2736 RTMemTmpFree((void *)pExtent->pszBasename);
2737 pExtent->pszBasename = NULL;
2738 }
2739 if (pExtent->pszFullname)
2740 {
2741 RTStrFree((char *)(void *)pExtent->pszFullname);
2742 pExtent->pszFullname = NULL;
2743 }
2744 vmdkFreeStreamBuffers(pExtent);
2745 return rc;
2746}
2747/**
2748 * Internal: allocate grain table cache if necessary for this image.
2749 */
2750static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2751{
2752 PVMDKEXTENT pExtent;
2753 /* Allocate grain table cache if any sparse extent is present. */
2754 for (unsigned i = 0; i < pImage->cExtents; i++)
2755 {
2756 pExtent = &pImage->pExtents[i];
2757 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2758 {
2759 /* Allocate grain table cache. */
2760 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
2761 if (!pImage->pGTCache)
2762 return VERR_NO_MEMORY;
2763 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
2764 {
2765 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
2766 pGCE->uExtent = UINT32_MAX;
2767 }
2768 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
2769 break;
2770 }
2771 }
2772 return VINF_SUCCESS;
2773}
2774/**
2775 * Internal: allocate the given number of extents.
2776 */
2777static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
2778{
2779 int rc = VINF_SUCCESS;
2780 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
2781 if (pExtents)
2782 {
2783 for (unsigned i = 0; i < cExtents; i++)
2784 {
2785 pExtents[i].pFile = NULL;
2786 pExtents[i].pszBasename = NULL;
2787 pExtents[i].pszFullname = NULL;
2788 pExtents[i].pGD = NULL;
2789 pExtents[i].pRGD = NULL;
2790 pExtents[i].pDescData = NULL;
2791 pExtents[i].uVersion = 1;
2792 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
2793 pExtents[i].uExtent = i;
2794 pExtents[i].pImage = pImage;
2795 }
2796 pImage->pExtents = pExtents;
2797 pImage->cExtents = cExtents;
2798 }
2799 else
2800 rc = VERR_NO_MEMORY;
2801 return rc;
2802}
2803/**
2804 * Reads and processes the descriptor embedded in sparse images.
2805 *
2806 * @returns VBox status code.
2807 * @param pImage VMDK image instance.
2808 * @param pFile The sparse file handle.
2809 */
2810static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
2811{
2812 /* It's a hosted single-extent image. */
2813 int rc = vmdkCreateExtents(pImage, 1);
2814 if (RT_SUCCESS(rc))
2815 {
2816 /* The opened file is passed to the extent. No separate descriptor
2817 * file, so no need to keep anything open for the image. */
2818 PVMDKEXTENT pExtent = &pImage->pExtents[0];
2819 pExtent->pFile = pFile;
2820 pImage->pFile = NULL;
2821 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
2822 if (RT_LIKELY(pExtent->pszFullname))
2823 {
2824 /* As we're dealing with a monolithic image here, there must
2825 * be a descriptor embedded in the image file. */
2826 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
2827 if ( RT_SUCCESS(rc)
2828 && pExtent->uDescriptorSector
2829 && pExtent->cDescriptorSectors)
2830 {
2831 /* HACK: extend the descriptor if it is unusually small and it fits in
2832 * the unused space after the image header. Allows opening VMDK files
2833 * with extremely small descriptor in read/write mode.
2834 *
2835 * The previous version introduced a possible regression for VMDK stream
2836 * optimized images from VMware which tend to have only a single sector sized
2837 * descriptor. Increasing the descriptor size resulted in adding the various uuid
2838 * entries required to make it work with VBox but for stream optimized images
2839 * the updated binary header wasn't written to the disk creating a mismatch
2840 * between advertised and real descriptor size.
2841 *
2842 * The descriptor size will be increased even if opened readonly now if there
2843 * enough room but the new value will not be written back to the image.
2844 */
2845 if ( pExtent->cDescriptorSectors < 3
2846 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
2847 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
2848 {
2849 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
2850 pExtent->cDescriptorSectors = 4;
2851 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2852 {
2853 /*
2854 * Update the on disk number now to make sure we don't introduce inconsistencies
2855 * in case of stream optimized images from VMware where the descriptor is just
2856 * one sector big (the binary header is not written to disk for complete
2857 * stream optimized images in vmdkFlushImage()).
2858 */
2859 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
2860 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
2861 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
2862 &u64DescSizeNew, sizeof(u64DescSizeNew));
2863 if (RT_FAILURE(rc))
2864 {
2865 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
2866 /* Restore the old size and carry on. */
2867 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
2868 }
2869 }
2870 }
2871 /* Read the descriptor from the extent. */
2872 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2873 if (RT_LIKELY(pExtent->pDescData))
2874 {
2875 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2876 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
2877 pExtent->pDescData,
2878 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2879 if (RT_SUCCESS(rc))
2880 {
2881 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
2882 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2883 if ( RT_SUCCESS(rc)
2884 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2885 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
2886 {
2887 rc = vmdkReadMetaExtent(pImage, pExtent);
2888 if (RT_SUCCESS(rc))
2889 {
2890 /* Mark the extent as unclean if opened in read-write mode. */
2891 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2892 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
2893 {
2894 pExtent->fUncleanShutdown = true;
2895 pExtent->fMetaDirty = true;
2896 }
2897 }
2898 }
2899 else if (RT_SUCCESS(rc))
2900 rc = VERR_NOT_SUPPORTED;
2901 }
2902 else
2903 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
2904 }
2905 else
2906 rc = VERR_NO_MEMORY;
2907 }
2908 else if (RT_SUCCESS(rc))
2909 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
2910 }
2911 else
2912 rc = VERR_NO_MEMORY;
2913 }
2914 return rc;
2915}
2916/**
2917 * Reads the descriptor from a pure text file.
2918 *
2919 * @returns VBox status code.
2920 * @param pImage VMDK image instance.
2921 * @param pFile The descriptor file handle.
2922 */
2923static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
2924{
2925 /* Allocate at least 10K, and make sure that there is 5K free space
2926 * in case new entries need to be added to the descriptor. Never
2927 * allocate more than 128K, because that's no valid descriptor file
2928 * and will result in the correct "truncated read" error handling. */
2929 uint64_t cbFileSize;
2930 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
2931 if ( RT_SUCCESS(rc)
2932 && cbFileSize >= 50)
2933 {
2934 uint64_t cbSize = cbFileSize;
2935 if (cbSize % VMDK_SECTOR2BYTE(10))
2936 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
2937 else
2938 cbSize += VMDK_SECTOR2BYTE(10);
2939 cbSize = RT_MIN(cbSize, _128K);
2940 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
2941 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
2942 if (RT_LIKELY(pImage->pDescData))
2943 {
2944 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
2945 RT_MIN(pImage->cbDescAlloc, cbFileSize));
2946 if (RT_SUCCESS(rc))
2947 {
2948#if 0 /** @todo Revisit */
2949 cbRead += sizeof(u32Magic);
2950 if (cbRead == pImage->cbDescAlloc)
2951 {
2952 /* Likely the read is truncated. Better fail a bit too early
2953 * (normally the descriptor is much smaller than our buffer). */
2954 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
2955 goto out;
2956 }
2957#endif
2958 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
2959 pImage->cbDescAlloc);
2960 if (RT_SUCCESS(rc))
2961 {
2962 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
2963 {
2964 PVMDKEXTENT pExtent = &pImage->pExtents[i];
2965 if (pExtent->pszBasename)
2966 {
2967 /* Hack to figure out whether the specified name in the
2968 * extent descriptor is absolute. Doesn't always work, but
2969 * should be good enough for now. */
2970 char *pszFullname;
2971 /** @todo implement proper path absolute check. */
2972 if (pExtent->pszBasename[0] == RTPATH_SLASH)
2973 {
2974 pszFullname = RTStrDup(pExtent->pszBasename);
2975 if (!pszFullname)
2976 {
2977 rc = VERR_NO_MEMORY;
2978 break;
2979 }
2980 }
2981 else
2982 {
2983 char *pszDirname = RTStrDup(pImage->pszFilename);
2984 if (!pszDirname)
2985 {
2986 rc = VERR_NO_MEMORY;
2987 break;
2988 }
2989 RTPathStripFilename(pszDirname);
2990 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
2991 RTStrFree(pszDirname);
2992 if (!pszFullname)
2993 {
2994 rc = VERR_NO_STR_MEMORY;
2995 break;
2996 }
2997 }
2998 pExtent->pszFullname = pszFullname;
2999 }
3000 else
3001 pExtent->pszFullname = NULL;
3002 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3003 switch (pExtent->enmType)
3004 {
3005 case VMDKETYPE_HOSTED_SPARSE:
3006 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3007 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3008 if (RT_FAILURE(rc))
3009 {
3010 /* Do NOT signal an appropriate error here, as the VD
3011 * layer has the choice of retrying the open if it
3012 * failed. */
3013 break;
3014 }
3015 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3016 false /* fMagicAlreadyRead */);
3017 if (RT_FAILURE(rc))
3018 break;
3019 rc = vmdkReadMetaExtent(pImage, pExtent);
3020 if (RT_FAILURE(rc))
3021 break;
3022 /* Mark extent as unclean if opened in read-write mode. */
3023 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3024 {
3025 pExtent->fUncleanShutdown = true;
3026 pExtent->fMetaDirty = true;
3027 }
3028 break;
3029 case VMDKETYPE_VMFS:
3030 case VMDKETYPE_FLAT:
3031 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3032 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3033 if (RT_FAILURE(rc))
3034 {
3035 /* Do NOT signal an appropriate error here, as the VD
3036 * layer has the choice of retrying the open if it
3037 * failed. */
3038 break;
3039 }
3040 break;
3041 case VMDKETYPE_ZERO:
3042 /* Nothing to do. */
3043 break;
3044 default:
3045 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3046 }
3047 }
3048 }
3049 }
3050 else
3051 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3052 }
3053 else
3054 rc = VERR_NO_MEMORY;
3055 }
3056 else if (RT_SUCCESS(rc))
3057 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3058 return rc;
3059}
3060/**
3061 * Read and process the descriptor based on the image type.
3062 *
3063 * @returns VBox status code.
3064 * @param pImage VMDK image instance.
3065 * @param pFile VMDK file handle.
3066 */
3067static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3068{
3069 uint32_t u32Magic;
3070 /* Read magic (if present). */
3071 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3072 &u32Magic, sizeof(u32Magic));
3073 if (RT_SUCCESS(rc))
3074 {
3075 /* Handle the file according to its magic number. */
3076 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3077 rc = vmdkDescriptorReadSparse(pImage, pFile);
3078 else
3079 rc = vmdkDescriptorReadAscii(pImage, pFile);
3080 }
3081 else
3082 {
3083 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3084 rc = VERR_VD_VMDK_INVALID_HEADER;
3085 }
3086 return rc;
3087}
3088/**
3089 * Internal: Open an image, constructing all necessary data structures.
3090 */
3091static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3092{
3093 pImage->uOpenFlags = uOpenFlags;
3094 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3095 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3096 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3097 /*
3098 * Open the image.
3099 * We don't have to check for asynchronous access because
3100 * we only support raw access and the opened file is a description
3101 * file were no data is stored.
3102 */
3103 PVMDKFILE pFile;
3104 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3105 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3106 if (RT_SUCCESS(rc))
3107 {
3108 pImage->pFile = pFile;
3109 rc = vmdkDescriptorRead(pImage, pFile);
3110 if (RT_SUCCESS(rc))
3111 {
3112 /* Determine PCHS geometry if not set. */
3113 if (pImage->PCHSGeometry.cCylinders == 0)
3114 {
3115 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3116 / pImage->PCHSGeometry.cHeads
3117 / pImage->PCHSGeometry.cSectors;
3118 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3119 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3120 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3121 {
3122 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3123 AssertRC(rc);
3124 }
3125 }
3126 /* Update the image metadata now in case has changed. */
3127 rc = vmdkFlushImage(pImage, NULL);
3128 if (RT_SUCCESS(rc))
3129 {
3130 /* Figure out a few per-image constants from the extents. */
3131 pImage->cbSize = 0;
3132 for (unsigned i = 0; i < pImage->cExtents; i++)
3133 {
3134 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3135 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3136 {
3137 /* Here used to be a check whether the nominal size of an extent
3138 * is a multiple of the grain size. The spec says that this is
3139 * always the case, but unfortunately some files out there in the
3140 * wild violate the spec (e.g. ReactOS 0.3.1). */
3141 }
3142 else if ( pExtent->enmType == VMDKETYPE_FLAT
3143 || pExtent->enmType == VMDKETYPE_ZERO)
3144 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3145 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3146 }
3147 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3148 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3149 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3150 rc = vmdkAllocateGrainTableCache(pImage);
3151 }
3152 }
3153 }
3154 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3155 * choice of retrying the open if it failed. */
3156 if (RT_SUCCESS(rc))
3157 {
3158 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3159 pImage->RegionList.fFlags = 0;
3160 pImage->RegionList.cRegions = 1;
3161 pRegion->offRegion = 0; /* Disk start. */
3162 pRegion->cbBlock = 512;
3163 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3164 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3165 pRegion->cbData = 512;
3166 pRegion->cbMetadata = 0;
3167 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3168 }
3169 else
3170 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3171 return rc;
3172}
3173/**
3174 * Frees a raw descriptor.
3175 * @internal
3176 */
3177static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3178{
3179 if (!pRawDesc)
3180 return VINF_SUCCESS;
3181 RTStrFree(pRawDesc->pszRawDisk);
3182 pRawDesc->pszRawDisk = NULL;
3183 /* Partitions: */
3184 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3185 {
3186 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3187 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3188 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3189 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3190 }
3191 RTMemFree(pRawDesc->pPartDescs);
3192 pRawDesc->pPartDescs = NULL;
3193 RTMemFree(pRawDesc);
3194 return VINF_SUCCESS;
3195}
3196/**
3197 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3198 * returning the pointer to the first new entry.
3199 * @internal
3200 */
3201static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3202{
3203 uint32_t const cOld = pRawDesc->cPartDescs;
3204 uint32_t const cNew = cOld + cToAdd;
3205 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3206 cOld * sizeof(pRawDesc->pPartDescs[0]),
3207 cNew * sizeof(pRawDesc->pPartDescs[0]));
3208 if (paNew)
3209 {
3210 pRawDesc->cPartDescs = cNew;
3211 pRawDesc->pPartDescs = paNew;
3212 *ppRet = &paNew[cOld];
3213 return VINF_SUCCESS;
3214 }
3215 *ppRet = NULL;
3216 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3217 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3218 pImage->pszFilename, cOld, cNew);
3219}
3220/**
3221 * @callback_method_impl{FNRTSORTCMP}
3222 */
3223static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3224{
3225 RT_NOREF(pvUser);
3226 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3227 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3228}
3229/**
3230 * Post processes the partition descriptors.
3231 *
3232 * Sorts them and check that they don't overlap.
3233 */
3234static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3235{
3236 /*
3237 * Sort data areas in ascending order of start.
3238 */
3239 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3240 /*
3241 * Check that we don't have overlapping descriptors. If we do, that's an
3242 * indication that the drive is corrupt or that the RTDvm code is buggy.
3243 */
3244 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3245 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3246 {
3247 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3248 if (offLast <= paPartDescs[i].offStartInVDisk)
3249 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3250 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3251 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3252 paPartDescs[i].pvPartitionData ? " (data)" : "");
3253 offLast -= 1;
3254 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3255 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3256 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3257 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3258 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3259 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3260 if (offLast >= cbSize)
3261 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3262 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3263 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3264 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3265 }
3266 return VINF_SUCCESS;
3267}
3268#ifdef RT_OS_LINUX
3269/**
3270 * Searches the dir specified in @a pszBlockDevDir for subdirectories with a
3271 * 'dev' file matching @a uDevToLocate.
3272 *
3273 * This is used both
3274 *
3275 * @returns IPRT status code, errors have been reported properly.
3276 * @param pImage For error reporting.
3277 * @param pszBlockDevDir Input: Path to the directory search under.
3278 * Output: Path to the directory containing information
3279 * for @a uDevToLocate.
3280 * @param cbBlockDevDir The size of the buffer @a pszBlockDevDir points to.
3281 * @param uDevToLocate The device number of the block device info dir to
3282 * locate.
3283 * @param pszDevToLocate For error reporting.
3284 */
3285static int vmdkFindSysBlockDevPath(PVMDKIMAGE pImage, char *pszBlockDevDir, size_t cbBlockDevDir,
3286 dev_t uDevToLocate, const char *pszDevToLocate)
3287{
3288 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir);
3289 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW);
3290 RTDIR hDir = NIL_RTDIR;
3291 int rc = RTDirOpen(&hDir, pszBlockDevDir);
3292 if (RT_SUCCESS(rc))
3293 {
3294 for (;;)
3295 {
3296 RTDIRENTRY Entry;
3297 rc = RTDirRead(hDir, &Entry, NULL);
3298 if (RT_SUCCESS(rc))
3299 {
3300 /* We're interested in directories and symlinks. */
3301 if ( Entry.enmType == RTDIRENTRYTYPE_DIRECTORY
3302 || Entry.enmType == RTDIRENTRYTYPE_SYMLINK
3303 || Entry.enmType == RTDIRENTRYTYPE_UNKNOWN)
3304 {
3305 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName);
3306 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */
3307 dev_t uThisDevNo = ~uDevToLocate;
3308 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir);
3309 if (RT_SUCCESS(rc) && uThisDevNo == uDevToLocate)
3310 break;
3311 }
3312 }
3313 else
3314 {
3315 pszBlockDevDir[cchDir] = '\0';
3316 if (rc == VERR_NO_MORE_FILES)
3317 rc = vdIfError(pImage->pIfError, VERR_NOT_FOUND, RT_SRC_POS,
3318 N_("VMDK: Image path: '%s'. Failed to locate device corresponding to '%s' under '%s'"),
3319 pImage->pszFilename, pszDevToLocate, pszBlockDevDir);
3320 else
3321 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3322 N_("VMDK: Image path: '%s'. RTDirRead failed enumerating '%s': %Rrc"),
3323 pImage->pszFilename, pszBlockDevDir, rc);
3324 break;
3325 }
3326 }
3327 RTDirClose(hDir);
3328 }
3329 else
3330 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3331 N_("VMDK: Image path: '%s'. Failed to open dir '%s' for listing: %Rrc"),
3332 pImage->pszFilename, pszBlockDevDir, rc);
3333 return rc;
3334}
3335#endif /* RT_OS_LINUX */
3336#ifdef RT_OS_FREEBSD
3337/**
3338 * Reads the config data from the provider and returns offset and size
3339 *
3340 * @return IPRT status code
3341 * @param pProvider GEOM provider representing partition
3342 * @param pcbOffset Placeholder for the offset of the partition
3343 * @param pcbSize Placeholder for the size of the partition
3344 */
3345static int vmdkReadPartitionsParamsFromProvider(gprovider *pProvider, uint64_t *pcbOffset, uint64_t *pcbSize)
3346{
3347 gconfig *pConfEntry;
3348 int rc = VERR_NOT_FOUND;
3349 /*
3350 * Required parameters are located in the list containing key/value pairs.
3351 * Both key and value are in text form. Manuals tells nothing about the fact
3352 * that the both parameters should be present in the list. Thus, there are
3353 * cases when only one parameter is presented. To handle such cases we treat
3354 * absent params as zero allowing the caller decide the case is either correct
3355 * or an error.
3356 */
3357 uint64_t cbOffset = 0;
3358 uint64_t cbSize = 0;
3359 LIST_FOREACH(pConfEntry, &pProvider->lg_config, lg_config)
3360 {
3361 if (RTStrCmp(pConfEntry->lg_name, "offset") == 0)
3362 {
3363 cbOffset = RTStrToUInt64(pConfEntry->lg_val);
3364 rc = VINF_SUCCESS;
3365 }
3366 else if (RTStrCmp(pConfEntry->lg_name, "length") == 0)
3367 {
3368 cbSize = RTStrToUInt64(pConfEntry->lg_val);
3369 rc = VINF_SUCCESS;
3370 }
3371 }
3372 if (RT_SUCCESS(rc))
3373 {
3374 *pcbOffset = cbOffset;
3375 *pcbSize = cbSize;
3376 }
3377 return rc;
3378}
3379/**
3380 * Searches the partition specified by name and calculates its size and absolute offset.
3381 *
3382 * @return IPRT status code.
3383 * @param pParentClass Class containing pParentGeom
3384 * @param pszParentGeomName Name of the parent geom where we are looking for provider
3385 * @param pszProviderName Name of the provider we are looking for
3386 * @param pcbAbsoluteOffset Placeholder for the absolute offset of the partition, i.e. offset from the beginning of the disk
3387 * @param psbSize Placeholder for the size of the partition.
3388 */
3389static int vmdkFindPartitionParamsByName(gclass *pParentClass, const char *pszParentGeomName, const char *pszProviderName,
3390 uint64_t *pcbAbsoluteOffset, uint64_t *pcbSize)
3391{
3392 AssertReturn(pParentClass, VERR_INVALID_PARAMETER);
3393 AssertReturn(pszParentGeomName, VERR_INVALID_PARAMETER);
3394 AssertReturn(pszProviderName, VERR_INVALID_PARAMETER);
3395 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER);
3396 AssertReturn(pcbSize, VERR_INVALID_PARAMETER);
3397 ggeom *pParentGeom;
3398 int rc = VERR_NOT_FOUND;
3399 LIST_FOREACH(pParentGeom, &pParentClass->lg_geom, lg_geom)
3400 {
3401 if (RTStrCmp(pParentGeom->lg_name, pszParentGeomName) == 0)
3402 {
3403 rc = VINF_SUCCESS;
3404 break;
3405 }
3406 }
3407 if (RT_FAILURE(rc))
3408 return rc;
3409 gprovider *pProvider;
3410 /*
3411 * First, go over providers without handling EBR or BSDLabel
3412 * partitions for case when looking provider is child
3413 * of the givng geom, to reduce searching time
3414 */
3415 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3416 {
3417 if (RTStrCmp(pProvider->lg_name, pszProviderName) == 0)
3418 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize);
3419 }
3420 /*
3421 * No provider found. Go over the parent geom again
3422 * and make recursions if geom represents EBR or BSDLabel.
3423 * In this case given parent geom contains only EBR or BSDLabel
3424 * partition itself and their own partitions are in the separate
3425 * geoms. Also, partition offsets are relative to geom, so
3426 * we have to add offset from child provider with parent geoms
3427 * provider
3428 */
3429 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3430 {
3431 uint64_t cbOffset = 0;
3432 uint64_t cbSize = 0;
3433 rc = vmdkReadPartitionsParamsFromProvider(pProvider, &cbOffset, &cbSize);
3434 if (RT_FAILURE(rc))
3435 return rc;
3436 uint64_t cbProviderOffset = 0;
3437 uint64_t cbProviderSize = 0;
3438 rc = vmdkFindPartitionParamsByName(pParentClass, pProvider->lg_name, pszProviderName, &cbProviderOffset, &cbProviderSize);
3439 if (RT_SUCCESS(rc))
3440 {
3441 *pcbAbsoluteOffset = cbOffset + cbProviderOffset;
3442 *pcbSize = cbProviderSize;
3443 return rc;
3444 }
3445 }
3446 return VERR_NOT_FOUND;
3447}
3448#endif
3449/**
3450 * Attempts to verify the raw partition path.
3451 *
3452 * We don't want to trust RTDvm and the partition device node morphing blindly.
3453 */
3454static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3455 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3456{
3457 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3458 /*
3459 * Try open the raw partition device.
3460 */
3461 RTFILE hRawPart = NIL_RTFILE;
3462 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
3463 if (RT_FAILURE(rc))
3464 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3465 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
3466 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
3467 /*
3468 * Compare the partition UUID if we can get it.
3469 */
3470#ifdef RT_OS_WINDOWS
3471 DWORD cbReturned;
3472 /* 1. Get the device numbers for both handles, they should have the same disk. */
3473 STORAGE_DEVICE_NUMBER DevNum1;
3474 RT_ZERO(DevNum1);
3475 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3476 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
3477 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3478 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3479 pImage->pszFilename, pszRawDrive, GetLastError());
3480 STORAGE_DEVICE_NUMBER DevNum2;
3481 RT_ZERO(DevNum2);
3482 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3483 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
3484 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3485 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3486 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
3487 if ( RT_SUCCESS(rc)
3488 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
3489 || DevNum1.DeviceType != DevNum2.DeviceType))
3490 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3491 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
3492 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3493 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
3494 if (RT_SUCCESS(rc))
3495 {
3496 /* Get the partitions from the raw drive and match up with the volume info
3497 from RTDvm. The partition number is found in DevNum2. */
3498 DWORD cbNeeded = 0;
3499 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3500 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
3501 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
3502 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
3503 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
3504 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
3505 if (pLayout)
3506 {
3507 cbReturned = 0;
3508 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3509 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
3510 {
3511 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
3512 unsigned iEntry = 0;
3513 while ( iEntry < pLayout->PartitionCount
3514 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
3515 iEntry++;
3516 if (iEntry < pLayout->PartitionCount)
3517 {
3518 /* Compare the basics */
3519 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
3520 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
3521 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3522 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
3523 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3524 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
3525 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
3526 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3527 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
3528 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3529 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
3530 /** @todo We could compare the MBR type, GPT type and ID. */
3531 RT_NOREF(hVol);
3532 }
3533 else
3534 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3535 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
3536 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3537 DevNum2.PartitionNumber, pLayout->PartitionCount);
3538# ifndef LOG_ENABLED
3539 if (RT_FAILURE(rc))
3540# endif
3541 {
3542 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
3543 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
3544 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
3545 {
3546 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
3547 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
3548 pEntry->PartitionStyle, pEntry->RewritePartition));
3549 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
3550 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
3551 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
3552 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
3553 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
3554 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
3555 else
3556 LogRel(("\n"));
3557 }
3558 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
3559 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
3560 }
3561 }
3562 else
3563 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3564 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
3565 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
3566 RTMemTmpFree(pLayout);
3567 }
3568 else
3569 rc = VERR_NO_TMP_MEMORY;
3570 }
3571#elif defined(RT_OS_LINUX)
3572 RT_NOREF(hVol);
3573 /* Stat the two devices first to get their device numbers. (We probably
3574 could make some assumptions here about the major & minor number assignments
3575 for legacy nodes, but it doesn't hold up for nvme, so we'll skip that.) */
3576 struct stat StDrive, StPart;
3577 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
3578 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3579 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3580 else if (fstat((int)RTFileToNative(hRawPart), &StPart) != 0)
3581 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3582 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pPartDesc->pszRawDevice, errno);
3583 else
3584 {
3585 /* Scan the directories immediately under /sys/block/ for one with a
3586 'dev' file matching the drive's device number: */
3587 char szSysPath[RTPATH_MAX];
3588 rc = RTLinuxConstructPath(szSysPath, sizeof(szSysPath), "block/");
3589 AssertRCReturn(rc, rc); /* this shall not fail */
3590 if (RTDirExists(szSysPath))
3591 {
3592 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive);
3593 /* Now, scan the directories under that again for a partition device
3594 matching the hRawPart device's number: */
3595 if (RT_SUCCESS(rc))
3596 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice);
3597 /* Having found the /sys/block/device/partition/ path, we can finally
3598 read the partition attributes and compare with hVol. */
3599 if (RT_SUCCESS(rc))
3600 {
3601 /* partition number: */
3602 int64_t iLnxPartition = 0;
3603 rc = RTLinuxSysFsReadIntFile(10, &iLnxPartition, "%s/partition", szSysPath);
3604 if (RT_SUCCESS(rc) && iLnxPartition != idxPartition)
3605 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3606 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Partition number %RI64, expected %RU32"),
3607 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition);
3608 /* else: ignore failure? */
3609 /* start offset: */
3610 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */
3611 if (RT_SUCCESS(rc))
3612 {
3613 int64_t offLnxStart = -1;
3614 rc = RTLinuxSysFsReadIntFile(10, &offLnxStart, "%s/start", szSysPath);
3615 offLnxStart *= cbLnxSector;
3616 if (RT_SUCCESS(rc) && offLnxStart != (int64_t)pPartDesc->offStartInVDisk)
3617 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3618 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
3619 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, offLnxStart, pPartDesc->offStartInVDisk);
3620 /* else: ignore failure? */
3621 }
3622 /* the size: */
3623 if (RT_SUCCESS(rc))
3624 {
3625 int64_t cbLnxData = -1;
3626 rc = RTLinuxSysFsReadIntFile(10, &cbLnxData, "%s/size", szSysPath);
3627 cbLnxData *= cbLnxSector;
3628 if (RT_SUCCESS(rc) && cbLnxData != (int64_t)pPartDesc->cbData)
3629 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3630 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
3631 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbLnxData, pPartDesc->cbData);
3632 /* else: ignore failure? */
3633 }
3634 }
3635 }
3636 /* else: We've got nothing to work on, so only do content comparison. */
3637 }
3638#elif defined(RT_OS_FREEBSD)
3639 char szDriveDevName[256];
3640 char* pszDevName = fdevname_r(RTFileToNative(hRawDrive), szDriveDevName, 256);
3641 if (pszDevName == NULL)
3642 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3643 N_("VMDK: Image path: '%s'. '%s' is not a drive path"), pImage->pszFilename, pszRawDrive);
3644 char szPartDevName[256];
3645 if (RT_SUCCESS(rc))
3646 {
3647 pszDevName = fdevname_r(RTFileToNative(hRawPart), szPartDevName, 256);
3648 if (pszDevName == NULL)
3649 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3650 N_("VMDK: Image path: '%s'. '%s' is not a partition path"), pImage->pszFilename, pPartDesc->pszRawDevice);
3651 }
3652 if (RT_SUCCESS(rc))
3653 {
3654 gmesh geomMesh;
3655 int err = geom_gettree(&geomMesh);
3656 if (err == 0)
3657 {
3658 /* Find root class containg partitions info */
3659 gclass* pPartClass;
3660 LIST_FOREACH(pPartClass, &geomMesh.lg_class, lg_class)
3661 {
3662 if (RTStrCmp(pPartClass->lg_name, "PART") == 0)
3663 break;
3664 }
3665 if (pPartClass == NULL || RTStrCmp(pPartClass->lg_name, "PART") != 0)
3666 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS,
3667 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename);
3668 if (RT_SUCCESS(rc))
3669 {
3670 /* Find provider representing partition device */
3671 uint64_t cbOffset;
3672 uint64_t cbSize;
3673 rc = vmdkFindPartitionParamsByName(pPartClass, szDriveDevName, szPartDevName, &cbOffset, &cbSize);
3674 if (RT_SUCCESS(rc))
3675 {
3676 if (cbOffset != pPartDesc->offStartInVDisk)
3677 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3678 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
3679 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3680 if (cbSize != pPartDesc->cbData)
3681 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3682 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
3683 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3684 }
3685 else
3686 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3687 N_("VMDK: Image path: '%s'. Error getting geom provider for the partition '%s' of the drive '%s' in the GEOM tree: %Rrc"),
3688 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc);
3689 }
3690 geom_deletetree(&geomMesh);
3691 }
3692 else
3693 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(err), RT_SRC_POS,
3694 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err);
3695 }
3696#elif defined(RT_OS_SOLARIS)
3697 RT_NOREF(hVol);
3698 dk_cinfo dkiDriveInfo;
3699 dk_cinfo dkiPartInfo;
3700 if (ioctl(RTFileToNative(hRawDrive), DKIOCINFO, (caddr_t)&dkiDriveInfo) == -1)
3701 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3702 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3703 else if (ioctl(RTFileToNative(hRawPart), DKIOCINFO, (caddr_t)&dkiPartInfo) == -1)
3704 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3705 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3706 else if ( dkiDriveInfo.dki_ctype != dkiPartInfo.dki_ctype
3707 || dkiDriveInfo.dki_cnum != dkiPartInfo.dki_cnum
3708 || dkiDriveInfo.dki_addr != dkiPartInfo.dki_addr
3709 || dkiDriveInfo.dki_unit != dkiPartInfo.dki_unit
3710 || dkiDriveInfo.dki_slave != dkiPartInfo.dki_slave)
3711 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3712 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x)"),
3713 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3714 dkiDriveInfo.dki_ctype, dkiPartInfo.dki_ctype, dkiDriveInfo.dki_cnum, dkiPartInfo.dki_cnum,
3715 dkiDriveInfo.dki_addr, dkiPartInfo.dki_addr, dkiDriveInfo.dki_unit, dkiPartInfo.dki_unit,
3716 dkiDriveInfo.dki_slave, dkiPartInfo.dki_slave);
3717 else
3718 {
3719 uint64_t cbOffset = 0;
3720 uint64_t cbSize = 0;
3721 dk_gpt *pEfi = NULL;
3722 int idxEfiPart = efi_alloc_and_read(RTFileToNative(hRawPart), &pEfi);
3723 if (idxEfiPart >= 0)
3724 {
3725 if ((uint32_t)dkiPartInfo.dki_partition + 1 == idxPartition)
3726 {
3727 cbOffset = pEfi->efi_parts[idxEfiPart].p_start * pEfi->efi_lbasize;
3728 cbSize = pEfi->efi_parts[idxEfiPart].p_size * pEfi->efi_lbasize;
3729 }
3730 else
3731 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3732 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
3733 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3734 idxPartition, (uint32_t)dkiPartInfo.dki_partition + 1);
3735 efi_free(pEfi);
3736 }
3737 else
3738 {
3739 /*
3740 * Manual says the efi_alloc_and_read returns VT_EINVAL if no EFI partition table found.
3741 * Actually, the function returns any error, e.g. VT_ERROR. Thus, we are not sure, is it
3742 * real error or just no EFI table found. Therefore, let's try to obtain partition info
3743 * using another way. If there is an error, it returns errno which will be handled below.
3744 */
3745 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition;
3746 if (numPartition > NDKMAP)
3747 numPartition -= NDKMAP;
3748 if (numPartition != idxPartition)
3749 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3750 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
3751 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3752 idxPartition, numPartition);
3753 else
3754 {
3755 dk_minfo_ext mediaInfo;
3756 if (ioctl(RTFileToNative(hRawPart), DKIOCGMEDIAINFOEXT, (caddr_t)&mediaInfo) == -1)
3757 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3758 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
3759 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3760 else
3761 {
3762 extpart_info extPartInfo;
3763 if (ioctl(RTFileToNative(hRawPart), DKIOCEXTPARTINFO, (caddr_t)&extPartInfo) != -1)
3764 {
3765 cbOffset = (uint64_t)extPartInfo.p_start * mediaInfo.dki_lbsize;
3766 cbSize = (uint64_t)extPartInfo.p_length * mediaInfo.dki_lbsize;
3767 }
3768 else
3769 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3770 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
3771 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3772 }
3773 }
3774 }
3775 if (RT_SUCCESS(rc) && cbOffset != pPartDesc->offStartInVDisk)
3776 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3777 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
3778 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3779 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData)
3780 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3781 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
3782 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3783 }
3784
3785#elif defined(RT_OS_DARWIN)
3786 /* Stat the drive get its device number. */
3787 struct stat StDrive;
3788 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
3789 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3790 N_("VMDK: Image path: '%s'. fstat failed on '%s' (errno=%d)"), pImage->pszFilename, pszRawDrive, errno);
3791 else
3792 {
3793 if (ioctl(RTFileToNative(hRawPart), DKIOCLOCKPHYSICALEXTENTS, NULL) == -1)
3794 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3795 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to lock the partition (errno=%d)"),
3796 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3797 else
3798 {
3799 uint32_t cbBlockSize = 0;
3800 uint64_t cbOffset = 0;
3801 uint64_t cbSize = 0;
3802 if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKSIZE, (caddr_t)&cbBlockSize) == -1)
3803 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3804 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the sector size of the partition (errno=%d)"),
3805 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3806 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBASE, (caddr_t)&cbOffset) == -1)
3807 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3808 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the start offset of the partition (errno=%d)"),
3809 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3810 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKCOUNT, (caddr_t)&cbSize) == -1)
3811 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3812 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the size of the partition (errno=%d)"),
3813 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3814 else
3815 {
3816 cbSize *= (uint64_t)cbBlockSize;
3817 dk_physical_extent_t dkPartExtent = {0};
3818 dkPartExtent.offset = 0;
3819 dkPartExtent.length = cbSize;
3820 if (ioctl(RTFileToNative(hRawPart), DKIOCGETPHYSICALEXTENT, (caddr_t)&dkPartExtent) == -1)
3821 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3822 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain partition info (errno=%d)"),
3823 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3824 else
3825 {
3826 if (dkPartExtent.dev != StDrive.st_rdev)
3827 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3828 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Drive does not contain the partition"),
3829 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive);
3830 else if (cbOffset != pPartDesc->offStartInVDisk)
3831 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3832 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
3833 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3834 else if (cbSize != pPartDesc->cbData)
3835 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3836 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
3837 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3838 }
3839 }
3840
3841 if (ioctl(RTFileToNative(hRawPart), DKIOCUNLOCKPHYSICALEXTENTS, NULL) == -1)
3842 {
3843 int rc2 = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3844 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to unlock the partition (errno=%d)"),
3845 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3846 if (RT_SUCCESS(rc))
3847 rc = rc2;
3848 }
3849 }
3850 }
3851
3852#else
3853 RT_NOREF(hVol); /* PORTME */
3854#endif
3855 if (RT_SUCCESS(rc))
3856 {
3857 /*
3858 * Compare the first 32 sectors of the partition.
3859 *
3860 * This might not be conclusive, but for partitions formatted with the more
3861 * common file systems it should be as they have a superblock copy at or near
3862 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
3863 */
3864 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
3865 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
3866 if (pbSector1 != NULL)
3867 {
3868 uint8_t *pbSector2 = pbSector1 + cbToCompare;
3869 /* Do the comparing, we repeat if it fails and the data might be volatile. */
3870 uint64_t uPrevCrc1 = 0;
3871 uint64_t uPrevCrc2 = 0;
3872 uint32_t cStable = 0;
3873 for (unsigned iTry = 0; iTry < 256; iTry++)
3874 {
3875 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
3876 if (RT_SUCCESS(rc))
3877 {
3878 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
3879 if (RT_SUCCESS(rc))
3880 {
3881 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
3882 {
3883 rc = VERR_MISMATCH;
3884 /* Do data stability checks before repeating: */
3885 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
3886 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
3887 if ( uPrevCrc1 != uCrc1
3888 || uPrevCrc2 != uCrc2)
3889 cStable = 0;
3890 else if (++cStable > 4)
3891 break;
3892 uPrevCrc1 = uCrc1;
3893 uPrevCrc2 = uCrc2;
3894 continue;
3895 }
3896 rc = VINF_SUCCESS;
3897 }
3898 else
3899 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3900 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
3901 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
3902 }
3903 else
3904 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3905 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
3906 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
3907 break;
3908 }
3909 if (rc == VERR_MISMATCH)
3910 {
3911 /* Find the first mismatching bytes: */
3912 size_t offMissmatch = 0;
3913 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
3914 offMissmatch++;
3915 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
3916 if (cStable > 0)
3917 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3918 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
3919 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
3920 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
3921 else
3922 {
3923 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
3924 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3925 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
3926 rc = -rc;
3927 }
3928 }
3929 RTMemTmpFree(pbSector1);
3930 }
3931 else
3932 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
3933 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
3934 pImage->pszFilename, cbToCompare * 2);
3935 }
3936 RTFileClose(hRawPart);
3937 return rc;
3938}
3939#ifdef RT_OS_WINDOWS
3940/**
3941 * Construct the device name for the given partition number.
3942 */
3943static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
3944 char **ppszRawPartition)
3945{
3946 int rc = VINF_SUCCESS;
3947 DWORD cbReturned = 0;
3948 STORAGE_DEVICE_NUMBER DevNum;
3949 RT_ZERO(DevNum);
3950 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3951 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
3952 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
3953 else
3954 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3955 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3956 pImage->pszFilename, pszRawDrive, GetLastError());
3957 return rc;
3958}
3959#endif /* RT_OS_WINDOWS */
3960/**
3961 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
3962 * 'Partitions' configuration value is present.
3963 *
3964 * @returns VBox status code, error message has been set on failure.
3965 *
3966 * @note Caller is assumed to clean up @a pRawDesc and release
3967 * @a *phVolToRelease.
3968 * @internal
3969 */
3970static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
3971 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
3972 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
3973 PRTDVMVOLUME phVolToRelease)
3974{
3975 *phVolToRelease = NIL_RTDVMVOLUME;
3976 /* Check sanity/understanding. */
3977 Assert(fPartitions);
3978 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
3979 /*
3980 * Allocate on descriptor for each volume up front.
3981 */
3982 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
3983 PVDISKRAWPARTDESC paPartDescs = NULL;
3984 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
3985 AssertRCReturn(rc, rc);
3986 /*
3987 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
3988 */
3989 uint32_t fPartitionsLeft = fPartitions;
3990 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
3991 for (uint32_t i = 0; i < cVolumes; i++)
3992 {
3993 /*
3994 * Get the next/first volume and release the current.
3995 */
3996 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
3997 if (i == 0)
3998 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
3999 else
4000 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
4001 if (RT_FAILURE(rc))
4002 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4003 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
4004 pImage->pszFilename, i, pszRawDrive, rc);
4005 uint32_t cRefs = RTDvmVolumeRelease(hVol);
4006 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
4007 *phVolToRelease = hVol = hVolNext;
4008 /*
4009 * Depending on the fPartitions selector and associated read-only mask,
4010 * the guest either gets read-write or read-only access (bits set)
4011 * or no access (selector bit clear, access directed to the VMDK).
4012 */
4013 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
4014 uint64_t offVolumeEndIgnored = 0;
4015 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
4016 if (RT_FAILURE(rc))
4017 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4018 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
4019 pImage->pszFilename, i, pszRawDrive, rc);
4020 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
4021 /* Note! The index must match IHostDrivePartition::number. */
4022 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
4023 if ( idxPartition < 32
4024 && (fPartitions & RT_BIT_32(idxPartition)))
4025 {
4026 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
4027 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
4028 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
4029 if (!fRelative)
4030 {
4031 /*
4032 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
4033 */
4034 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
4035 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
4036 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4037 }
4038 else
4039 {
4040 /*
4041 * Relative means access the partition data via the device node for that
4042 * partition, allowing the sysadmin/OS to allow a user access to individual
4043 * partitions without necessarily being able to compromise the host OS.
4044 * Obviously, the creation of the VMDK requires read access to the main
4045 * device node for the drive, but that's a one-time thing and can be done
4046 * by the sysadmin. Here data starts at offset zero in the device node.
4047 */
4048 paPartDescs[i].offStartInDevice = 0;
4049#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
4050 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
4051 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
4052#elif defined(RT_OS_LINUX)
4053 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
4054 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
4055 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
4056#elif defined(RT_OS_WINDOWS)
4057 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
4058 AssertRCReturn(rc, rc);
4059#elif defined(RT_OS_SOLARIS)
4060 if (pRawDesc->enmPartitioningType == VDISKPARTTYPE_MBR)
4061 {
4062 /*
4063 * MBR partitions have device nodes in form /dev/(r)dsk/cXtYdZpK
4064 * where X is the controller,
4065 * Y is target (SCSI device number),
4066 * Z is disk number,
4067 * K is partition number,
4068 * where p0 is the whole disk
4069 * p1-pN are the partitions of the disk
4070 */
4071 const char *pszRawDrivePath = pszRawDrive;
4072 char szDrivePath[RTPATH_MAX];
4073 size_t cbRawDrive = strlen(pszRawDrive);
4074 if ( cbRawDrive > 1 && strcmp(&pszRawDrive[cbRawDrive - 2], "p0") == 0)
4075 {
4076 memcpy(szDrivePath, pszRawDrive, cbRawDrive - 2);
4077 szDrivePath[cbRawDrive - 2] = '\0';
4078 pszRawDrivePath = szDrivePath;
4079 }
4080 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%sp%u", pszRawDrivePath, idxPartition);
4081 }
4082 else /* GPT */
4083 {
4084 /*
4085 * GPT partitions have device nodes in form /dev/(r)dsk/cXtYdZsK
4086 * where X is the controller,
4087 * Y is target (SCSI device number),
4088 * Z is disk number,
4089 * K is partition number, zero based. Can be only from 0 to 6.
4090 * Thus, only partitions numbered 0 through 6 have device nodes.
4091 */
4092 if (idxPartition > 7)
4093 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4094 N_("VMDK: Image path: '%s'. the partition #%u on '%s' has no device node and can not be specified with 'Relative' property"),
4095 pImage->pszFilename, idxPartition, pszRawDrive);
4096 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition - 1);
4097 }
4098#else
4099 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
4100#endif
4101 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4102 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
4103 AssertRCReturn(rc, rc);
4104 }
4105 }
4106 else
4107 {
4108 /* Not accessible to the guest. */
4109 paPartDescs[i].offStartInDevice = 0;
4110 paPartDescs[i].pszRawDevice = NULL;
4111 }
4112 } /* for each volume */
4113 RTDvmVolumeRelease(hVol);
4114 *phVolToRelease = NIL_RTDVMVOLUME;
4115 /*
4116 * Check that we found all the partitions the user selected.
4117 */
4118 if (fPartitionsLeft)
4119 {
4120 char szLeft[3 * sizeof(fPartitions) * 8];
4121 size_t cchLeft = 0;
4122 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
4123 if (fPartitionsLeft & RT_BIT_32(i))
4124 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
4125 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4126 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
4127 pImage->pszFilename, pszRawDrive, szLeft);
4128 }
4129 return VINF_SUCCESS;
4130}
4131/**
4132 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
4133 * of the partition tables and associated padding areas when the 'Partitions'
4134 * configuration value is present.
4135 *
4136 * The guest is not allowed access to the partition tables, however it needs
4137 * them to be able to access the drive. So, create descriptors for each of the
4138 * tables and attach the current disk content. vmdkCreateRawImage() will later
4139 * write the content to the VMDK. Any changes the guest later makes to the
4140 * partition tables will then go to the VMDK copy, rather than the host drive.
4141 *
4142 * @returns VBox status code, error message has been set on failure.
4143 *
4144 * @note Caller is assumed to clean up @a pRawDesc
4145 * @internal
4146 */
4147static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4148 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
4149{
4150 /*
4151 * Query the locations.
4152 */
4153 /* Determin how many locations there are: */
4154 size_t cLocations = 0;
4155 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
4156 if (rc != VERR_BUFFER_OVERFLOW)
4157 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4158 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4159 pImage->pszFilename, pszRawDrive, rc);
4160 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
4161 /* We can allocate the partition descriptors here to save an intentation level. */
4162 PVDISKRAWPARTDESC paPartDescs = NULL;
4163 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
4164 AssertRCReturn(rc, rc);
4165 /* Allocate the result table and repeat the location table query: */
4166 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
4167 if (!paLocations)
4168 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
4169 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
4170 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
4171 if (RT_SUCCESS(rc))
4172 {
4173 /*
4174 * Translate them into descriptors.
4175 *
4176 * We restrict the amount of partition alignment padding to 4MiB as more
4177 * will just be a waste of space. The use case for including the padding
4178 * are older boot loaders and boot manager (including one by a team member)
4179 * that put data and code in the 62 sectors between the MBR and the first
4180 * partition (total of 63). Later CHS was abandond and partition started
4181 * being aligned on power of two sector boundraries (typically 64KiB or
4182 * 1MiB depending on the media size).
4183 */
4184 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
4185 {
4186 Assert(paLocations[i].cb > 0);
4187 if (paLocations[i].cb <= _64M)
4188 {
4189 /* Create the partition descriptor entry: */
4190 //paPartDescs[i].pszRawDevice = NULL;
4191 //paPartDescs[i].offStartInDevice = 0;
4192 //paPartDescs[i].uFlags = 0;
4193 paPartDescs[i].offStartInVDisk = paLocations[i].off;
4194 paPartDescs[i].cbData = paLocations[i].cb;
4195 if (paPartDescs[i].cbData < _4M)
4196 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4197 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4198 if (paPartDescs[i].pvPartitionData)
4199 {
4200 /* Read the content from the drive: */
4201 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4202 (size_t)paPartDescs[i].cbData, NULL);
4203 if (RT_SUCCESS(rc))
4204 {
4205 /* Do we have custom boot sector code? */
4206 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4207 {
4208 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4209 Instead we fail as we weren't able to do what the user requested us to do.
4210 Better if the user knows than starts questioning why the guest isn't
4211 booting as expected. */
4212 if (cbBootSector <= paPartDescs[i].cbData)
4213 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4214 else
4215 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4216 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4217 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4218 }
4219 }
4220 else
4221 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4222 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4223 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4224 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4225 }
4226 else
4227 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4228 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4229 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4230 }
4231 else
4232 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4233 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4234 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4235 }
4236 }
4237 else
4238 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4239 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4240 pImage->pszFilename, pszRawDrive, rc);
4241 RTMemFree(paLocations);
4242 return rc;
4243}
4244/**
4245 * Opens the volume manager for the raw drive when in selected-partition mode.
4246 *
4247 * @param pImage The VMDK image (for errors).
4248 * @param hRawDrive The raw drive handle.
4249 * @param pszRawDrive The raw drive device path (for errors).
4250 * @param cbSector The sector size.
4251 * @param phVolMgr Where to return the handle to the volume manager on
4252 * success.
4253 * @returns VBox status code, errors have been reported.
4254 * @internal
4255 */
4256static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4257{
4258 *phVolMgr = NIL_RTDVM;
4259 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4260 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4261 if (RT_FAILURE(rc))
4262 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4263 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4264 pImage->pszFilename, pszRawDrive, rc);
4265 RTDVM hVolMgr = NIL_RTDVM;
4266 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4267 RTVfsFileRelease(hVfsFile);
4268 if (RT_FAILURE(rc))
4269 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4270 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4271 pImage->pszFilename, pszRawDrive, rc);
4272 rc = RTDvmMapOpen(hVolMgr);
4273 if (RT_SUCCESS(rc))
4274 {
4275 *phVolMgr = hVolMgr;
4276 return VINF_SUCCESS;
4277 }
4278 RTDvmRelease(hVolMgr);
4279 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4280 pImage->pszFilename, pszRawDrive, rc);
4281}
4282/**
4283 * Opens the raw drive device and get the sizes for it.
4284 *
4285 * @param pImage The image (for error reporting).
4286 * @param pszRawDrive The device/whatever to open.
4287 * @param phRawDrive Where to return the file handle.
4288 * @param pcbRawDrive Where to return the size.
4289 * @param pcbSector Where to return the sector size.
4290 * @returns IPRT status code, errors have been reported.
4291 * @internal
4292 */
4293static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4294 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4295{
4296 /*
4297 * Open the device for the raw drive.
4298 */
4299 RTFILE hRawDrive = NIL_RTFILE;
4300 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4301 if (RT_FAILURE(rc))
4302 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4303 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4304 pImage->pszFilename, pszRawDrive, rc);
4305 /*
4306 * Get the sector size.
4307 */
4308 uint32_t cbSector = 0;
4309 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4310 if (RT_SUCCESS(rc))
4311 {
4312 /* sanity checks */
4313 if ( cbSector >= 512
4314 && cbSector <= _64K
4315 && RT_IS_POWER_OF_TWO(cbSector))
4316 {
4317 /*
4318 * Get the size.
4319 */
4320 uint64_t cbRawDrive = 0;
4321 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4322 if (RT_SUCCESS(rc))
4323 {
4324 /* Check whether cbSize is actually sensible. */
4325 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4326 {
4327 *phRawDrive = hRawDrive;
4328 *pcbRawDrive = cbRawDrive;
4329 *pcbSector = cbSector;
4330 return VINF_SUCCESS;
4331 }
4332 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4333 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4334 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4335 }
4336 else
4337 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4338 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4339 pImage->pszFilename, pszRawDrive, rc);
4340 }
4341 else
4342 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4343 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4344 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4345 }
4346 else
4347 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4348 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4349 pImage->pszFilename, pszRawDrive, rc);
4350 RTFileClose(hRawDrive);
4351 return rc;
4352}
4353/**
4354 * Reads the raw disk configuration, leaving initalization and cleanup to the
4355 * caller (regardless of return status).
4356 *
4357 * @returns VBox status code, errors properly reported.
4358 * @internal
4359 */
4360static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4361 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4362 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4363 char **ppszFreeMe)
4364{
4365 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4366 if (!pImgCfg)
4367 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4368 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4369 /*
4370 * RawDrive = path
4371 */
4372 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4373 if (RT_FAILURE(rc))
4374 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4375 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4376 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4377 /*
4378 * Partitions=n[r][,...]
4379 */
4380 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4381 *pfPartitions = *pfPartitionsReadOnly = 0;
4382 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4383 if (RT_SUCCESS(rc))
4384 {
4385 char *psz = *ppszFreeMe;
4386 while (*psz != '\0')
4387 {
4388 char *pszNext;
4389 uint32_t u32;
4390 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4391 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4392 rc = -rc;
4393 if (RT_FAILURE(rc))
4394 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4395 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4396 pImage->pszFilename, rc, psz);
4397 if (u32 >= cMaxPartitionBits)
4398 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4399 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4400 pImage->pszFilename, u32, cMaxPartitionBits);
4401 *pfPartitions |= RT_BIT_32(u32);
4402 psz = pszNext;
4403 if (*psz == 'r')
4404 {
4405 *pfPartitionsReadOnly |= RT_BIT_32(u32);
4406 psz++;
4407 }
4408 if (*psz == ',')
4409 psz++;
4410 else if (*psz != '\0')
4411 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4412 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
4413 pImage->pszFilename, psz);
4414 }
4415 RTStrFree(*ppszFreeMe);
4416 *ppszFreeMe = NULL;
4417 }
4418 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4419 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4420 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4421 /*
4422 * BootSector=base64
4423 */
4424 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
4425 if (RT_SUCCESS(rc))
4426 {
4427 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
4428 if (cbBootSector < 0)
4429 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
4430 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
4431 pImage->pszFilename, *ppszRawDrive);
4432 if (cbBootSector == 0)
4433 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4434 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
4435 pImage->pszFilename, *ppszRawDrive);
4436 if (cbBootSector > _4M) /* this is just a preliminary max */
4437 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4438 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
4439 pImage->pszFilename, *ppszRawDrive, cbBootSector);
4440 /* Refuse the boot sector if whole-drive. This used to be done quietly,
4441 however, bird disagrees and thinks the user should be told that what
4442 he/she/it tries to do isn't possible. There should be less head
4443 scratching this way when the guest doesn't do the expected thing. */
4444 if (!*pfPartitions)
4445 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4446 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
4447 pImage->pszFilename, *ppszRawDrive);
4448 *pcbBootSector = (size_t)cbBootSector;
4449 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
4450 if (!*ppvBootSector)
4451 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4452 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
4453 pImage->pszFilename, cbBootSector, *ppszRawDrive);
4454 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
4455 if (RT_FAILURE(rc))
4456 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4457 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
4458 pImage->pszFilename, *ppszRawDrive, rc);
4459 RTStrFree(*ppszFreeMe);
4460 *ppszFreeMe = NULL;
4461 }
4462 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4463 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4464 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4465 /*
4466 * Relative=0/1
4467 */
4468 *pfRelative = false;
4469 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
4470 if (RT_SUCCESS(rc))
4471 {
4472 if (!*pfPartitions && *pfRelative != false)
4473 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4474 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
4475 pImage->pszFilename);
4476#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) && !defined(RT_OS_SOLARIS) /* PORTME */
4477 if (*pfRelative == true)
4478 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4479 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
4480 pImage->pszFilename);
4481#endif
4482 }
4483 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4484 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4485 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4486 else
4487#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
4488 *pfRelative = true;
4489#else
4490 *pfRelative = false;
4491#endif
4492 return VINF_SUCCESS;
4493}
4494/**
4495 * Creates a raw drive (nee disk) descriptor.
4496 *
4497 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
4498 * here much later. That's one of the reasons why we produce a descriptor just
4499 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
4500 *
4501 * @returns VBox status code.
4502 * @param pImage The image.
4503 * @param ppRaw Where to return the raw drive descriptor. Caller must
4504 * free it using vmdkRawDescFree regardless of the status
4505 * code.
4506 * @internal
4507 */
4508static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
4509{
4510 /* Make sure it's NULL. */
4511 *ppRaw = NULL;
4512 /*
4513 * Read the configuration.
4514 */
4515 char *pszRawDrive = NULL;
4516 uint32_t fPartitions = 0; /* zero if whole-drive */
4517 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
4518 void *pvBootSector = NULL;
4519 size_t cbBootSector = 0;
4520 bool fRelative = false;
4521 char *pszFreeMe = NULL; /* lazy bird cleanup. */
4522 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
4523 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
4524 RTStrFree(pszFreeMe);
4525 if (RT_SUCCESS(rc))
4526 {
4527 /*
4528 * Open the device, getting the sector size and drive size.
4529 */
4530 uint64_t cbSize = 0;
4531 uint32_t cbSector = 0;
4532 RTFILE hRawDrive = NIL_RTFILE;
4533 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
4534 if (RT_SUCCESS(rc))
4535 {
4536 /*
4537 * Create the raw-drive descriptor
4538 */
4539 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
4540 if (pRawDesc)
4541 {
4542 pRawDesc->szSignature[0] = 'R';
4543 pRawDesc->szSignature[1] = 'A';
4544 pRawDesc->szSignature[2] = 'W';
4545 //pRawDesc->szSignature[3] = '\0';
4546 if (!fPartitions)
4547 {
4548 /*
4549 * It's simple for when doing the whole drive.
4550 */
4551 pRawDesc->uFlags = VDISKRAW_DISK;
4552 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
4553 }
4554 else
4555 {
4556 /*
4557 * In selected partitions mode we've got a lot more work ahead of us.
4558 */
4559 pRawDesc->uFlags = VDISKRAW_NORMAL;
4560 //pRawDesc->pszRawDisk = NULL;
4561 //pRawDesc->cPartDescs = 0;
4562 //pRawDesc->pPartDescs = NULL;
4563 /* We need to parse the partition map to complete the descriptor: */
4564 RTDVM hVolMgr = NIL_RTDVM;
4565 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
4566 if (RT_SUCCESS(rc))
4567 {
4568 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
4569 if ( enmFormatType == RTDVMFORMATTYPE_MBR
4570 || enmFormatType == RTDVMFORMATTYPE_GPT)
4571 {
4572 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
4573 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
4574 /* Add copies of the partition tables: */
4575 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
4576 pvBootSector, cbBootSector);
4577 if (RT_SUCCESS(rc))
4578 {
4579 /* Add descriptors for the partitions/volumes, indicating which
4580 should be accessible and how to access them: */
4581 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
4582 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
4583 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
4584 RTDvmVolumeRelease(hVolRelease);
4585 /* Finally, sort the partition and check consistency (overlaps, etc): */
4586 if (RT_SUCCESS(rc))
4587 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
4588 }
4589 }
4590 else
4591 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4592 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
4593 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
4594 RTDvmRelease(hVolMgr);
4595 }
4596 }
4597 if (RT_SUCCESS(rc))
4598 {
4599 /*
4600 * We succeeded.
4601 */
4602 *ppRaw = pRawDesc;
4603 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
4604 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
4605 if (pRawDesc->cPartDescs)
4606 {
4607 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
4608 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
4609 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
4610 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
4611 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
4612 }
4613 }
4614 else
4615 vmdkRawDescFree(pRawDesc);
4616 }
4617 else
4618 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4619 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
4620 pImage->pszFilename, sizeof(*pRawDesc));
4621 RTFileClose(hRawDrive);
4622 }
4623 }
4624 RTStrFree(pszRawDrive);
4625 RTMemFree(pvBootSector);
4626 return rc;
4627}
4628/**
4629 * Internal: create VMDK images for raw disk/partition access.
4630 */
4631static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
4632 uint64_t cbSize)
4633{
4634 int rc = VINF_SUCCESS;
4635 PVMDKEXTENT pExtent;
4636 if (pRaw->uFlags & VDISKRAW_DISK)
4637 {
4638 /* Full raw disk access. This requires setting up a descriptor
4639 * file and open the (flat) raw disk. */
4640 rc = vmdkCreateExtents(pImage, 1);
4641 if (RT_FAILURE(rc))
4642 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4643 pExtent = &pImage->pExtents[0];
4644 /* Create raw disk descriptor file. */
4645 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4646 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4647 true /* fCreate */));
4648 if (RT_FAILURE(rc))
4649 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4650 /* Set up basename for extent description. Cannot use StrDup. */
4651 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
4652 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4653 if (!pszBasename)
4654 return VERR_NO_MEMORY;
4655 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
4656 pExtent->pszBasename = pszBasename;
4657 /* For raw disks the full name is identical to the base name. */
4658 pExtent->pszFullname = RTStrDup(pszBasename);
4659 if (!pExtent->pszFullname)
4660 return VERR_NO_MEMORY;
4661 pExtent->enmType = VMDKETYPE_FLAT;
4662 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
4663 pExtent->uSectorOffset = 0;
4664 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4665 pExtent->fMetaDirty = false;
4666 /* Open flat image, the raw disk. */
4667 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4668 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4669 false /* fCreate */));
4670 if (RT_FAILURE(rc))
4671 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
4672 }
4673 else
4674 {
4675 /* Raw partition access. This requires setting up a descriptor
4676 * file, write the partition information to a flat extent and
4677 * open all the (flat) raw disk partitions. */
4678 /* First pass over the partition data areas to determine how many
4679 * extents we need. One data area can require up to 2 extents, as
4680 * it might be necessary to skip over unpartitioned space. */
4681 unsigned cExtents = 0;
4682 uint64_t uStart = 0;
4683 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4684 {
4685 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4686 if (uStart > pPart->offStartInVDisk)
4687 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4688 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
4689 if (uStart < pPart->offStartInVDisk)
4690 cExtents++;
4691 uStart = pPart->offStartInVDisk + pPart->cbData;
4692 cExtents++;
4693 }
4694 /* Another extent for filling up the rest of the image. */
4695 if (uStart != cbSize)
4696 cExtents++;
4697 rc = vmdkCreateExtents(pImage, cExtents);
4698 if (RT_FAILURE(rc))
4699 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4700 /* Create raw partition descriptor file. */
4701 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4702 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4703 true /* fCreate */));
4704 if (RT_FAILURE(rc))
4705 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4706 /* Create base filename for the partition table extent. */
4707 /** @todo remove fixed buffer without creating memory leaks. */
4708 char pszPartition[1024];
4709 const char *pszBase = RTPathFilename(pImage->pszFilename);
4710 const char *pszSuff = RTPathSuffix(pszBase);
4711 if (pszSuff == NULL)
4712 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
4713 char *pszBaseBase = RTStrDup(pszBase);
4714 if (!pszBaseBase)
4715 return VERR_NO_MEMORY;
4716 RTPathStripSuffix(pszBaseBase);
4717 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
4718 pszBaseBase, pszSuff);
4719 RTStrFree(pszBaseBase);
4720 /* Second pass over the partitions, now define all extents. */
4721 uint64_t uPartOffset = 0;
4722 cExtents = 0;
4723 uStart = 0;
4724 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4725 {
4726 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4727 pExtent = &pImage->pExtents[cExtents++];
4728 if (uStart < pPart->offStartInVDisk)
4729 {
4730 pExtent->pszBasename = NULL;
4731 pExtent->pszFullname = NULL;
4732 pExtent->enmType = VMDKETYPE_ZERO;
4733 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
4734 pExtent->uSectorOffset = 0;
4735 pExtent->enmAccess = VMDKACCESS_READWRITE;
4736 pExtent->fMetaDirty = false;
4737 /* go to next extent */
4738 pExtent = &pImage->pExtents[cExtents++];
4739 }
4740 uStart = pPart->offStartInVDisk + pPart->cbData;
4741 if (pPart->pvPartitionData)
4742 {
4743 /* Set up basename for extent description. Can't use StrDup. */
4744 size_t cbBasename = strlen(pszPartition) + 1;
4745 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4746 if (!pszBasename)
4747 return VERR_NO_MEMORY;
4748 memcpy(pszBasename, pszPartition, cbBasename);
4749 pExtent->pszBasename = pszBasename;
4750 /* Set up full name for partition extent. */
4751 char *pszDirname = RTStrDup(pImage->pszFilename);
4752 if (!pszDirname)
4753 return VERR_NO_STR_MEMORY;
4754 RTPathStripFilename(pszDirname);
4755 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
4756 RTStrFree(pszDirname);
4757 if (!pszFullname)
4758 return VERR_NO_STR_MEMORY;
4759 pExtent->pszFullname = pszFullname;
4760 pExtent->enmType = VMDKETYPE_FLAT;
4761 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4762 pExtent->uSectorOffset = uPartOffset;
4763 pExtent->enmAccess = VMDKACCESS_READWRITE;
4764 pExtent->fMetaDirty = false;
4765 /* Create partition table flat image. */
4766 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4767 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4768 true /* fCreate */));
4769 if (RT_FAILURE(rc))
4770 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
4771 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4772 VMDK_SECTOR2BYTE(uPartOffset),
4773 pPart->pvPartitionData,
4774 pPart->cbData);
4775 if (RT_FAILURE(rc))
4776 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
4777 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
4778 }
4779 else
4780 {
4781 if (pPart->pszRawDevice)
4782 {
4783 /* Set up basename for extent descr. Can't use StrDup. */
4784 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
4785 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4786 if (!pszBasename)
4787 return VERR_NO_MEMORY;
4788 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
4789 pExtent->pszBasename = pszBasename;
4790 /* For raw disks full name is identical to base name. */
4791 pExtent->pszFullname = RTStrDup(pszBasename);
4792 if (!pExtent->pszFullname)
4793 return VERR_NO_MEMORY;
4794 pExtent->enmType = VMDKETYPE_FLAT;
4795 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4796 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
4797 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4798 pExtent->fMetaDirty = false;
4799 /* Open flat image, the raw partition. */
4800 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4801 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4802 false /* fCreate */));
4803 if (RT_FAILURE(rc))
4804 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
4805 }
4806 else
4807 {
4808 pExtent->pszBasename = NULL;
4809 pExtent->pszFullname = NULL;
4810 pExtent->enmType = VMDKETYPE_ZERO;
4811 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4812 pExtent->uSectorOffset = 0;
4813 pExtent->enmAccess = VMDKACCESS_READWRITE;
4814 pExtent->fMetaDirty = false;
4815 }
4816 }
4817 }
4818 /* Another extent for filling up the rest of the image. */
4819 if (uStart != cbSize)
4820 {
4821 pExtent = &pImage->pExtents[cExtents++];
4822 pExtent->pszBasename = NULL;
4823 pExtent->pszFullname = NULL;
4824 pExtent->enmType = VMDKETYPE_ZERO;
4825 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
4826 pExtent->uSectorOffset = 0;
4827 pExtent->enmAccess = VMDKACCESS_READWRITE;
4828 pExtent->fMetaDirty = false;
4829 }
4830 }
4831 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4832 (pRaw->uFlags & VDISKRAW_DISK) ?
4833 "fullDevice" : "partitionedDevice");
4834 if (RT_FAILURE(rc))
4835 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4836 return rc;
4837}
4838/**
4839 * Internal: create a regular (i.e. file-backed) VMDK image.
4840 */
4841static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
4842 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
4843 unsigned uPercentStart, unsigned uPercentSpan)
4844{
4845 int rc = VINF_SUCCESS;
4846 unsigned cExtents = 1;
4847 uint64_t cbOffset = 0;
4848 uint64_t cbRemaining = cbSize;
4849 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4850 {
4851 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
4852 /* Do proper extent computation: need one smaller extent if the total
4853 * size isn't evenly divisible by the split size. */
4854 if (cbSize % VMDK_2G_SPLIT_SIZE)
4855 cExtents++;
4856 }
4857 rc = vmdkCreateExtents(pImage, cExtents);
4858 if (RT_FAILURE(rc))
4859 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4860 /* Basename strings needed for constructing the extent names. */
4861 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
4862 AssertPtr(pszBasenameSubstr);
4863 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
4864 /* Create separate descriptor file if necessary. */
4865 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
4866 {
4867 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4868 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4869 true /* fCreate */));
4870 if (RT_FAILURE(rc))
4871 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
4872 }
4873 else
4874 pImage->pFile = NULL;
4875 /* Set up all extents. */
4876 for (unsigned i = 0; i < cExtents; i++)
4877 {
4878 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4879 uint64_t cbExtent = cbRemaining;
4880 /* Set up fullname/basename for extent description. Cannot use StrDup
4881 * for basename, as it is not guaranteed that the memory can be freed
4882 * with RTMemTmpFree, which must be used as in other code paths
4883 * StrDup is not usable. */
4884 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4885 {
4886 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
4887 if (!pszBasename)
4888 return VERR_NO_MEMORY;
4889 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
4890 pExtent->pszBasename = pszBasename;
4891 }
4892 else
4893 {
4894 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
4895 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
4896 RTPathStripSuffix(pszBasenameBase);
4897 char *pszTmp;
4898 size_t cbTmp;
4899 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4900 {
4901 if (cExtents == 1)
4902 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
4903 pszBasenameSuff);
4904 else
4905 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
4906 i+1, pszBasenameSuff);
4907 }
4908 else
4909 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
4910 pszBasenameSuff);
4911 RTStrFree(pszBasenameBase);
4912 if (!pszTmp)
4913 return VERR_NO_STR_MEMORY;
4914 cbTmp = strlen(pszTmp) + 1;
4915 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
4916 if (!pszBasename)
4917 {
4918 RTStrFree(pszTmp);
4919 return VERR_NO_MEMORY;
4920 }
4921 memcpy(pszBasename, pszTmp, cbTmp);
4922 RTStrFree(pszTmp);
4923 pExtent->pszBasename = pszBasename;
4924 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4925 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
4926 }
4927 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
4928 if (!pszBasedirectory)
4929 return VERR_NO_STR_MEMORY;
4930 RTPathStripFilename(pszBasedirectory);
4931 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
4932 RTStrFree(pszBasedirectory);
4933 if (!pszFullname)
4934 return VERR_NO_STR_MEMORY;
4935 pExtent->pszFullname = pszFullname;
4936 /* Create file for extent. */
4937 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4938 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4939 true /* fCreate */));
4940 if (RT_FAILURE(rc))
4941 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
4942 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4943 {
4944 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
4945 0 /* fFlags */, pIfProgress,
4946 uPercentStart + cbOffset * uPercentSpan / cbSize,
4947 cbExtent * uPercentSpan / cbSize);
4948 if (RT_FAILURE(rc))
4949 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
4950 }
4951 /* Place descriptor file information (where integrated). */
4952 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4953 {
4954 pExtent->uDescriptorSector = 1;
4955 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
4956 /* The descriptor is part of the (only) extent. */
4957 pExtent->pDescData = pImage->pDescData;
4958 pImage->pDescData = NULL;
4959 }
4960 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4961 {
4962 uint64_t cSectorsPerGDE, cSectorsPerGD;
4963 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
4964 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
4965 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
4966 pExtent->cGTEntries = 512;
4967 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
4968 pExtent->cSectorsPerGDE = cSectorsPerGDE;
4969 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
4970 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
4971 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4972 {
4973 /* The spec says version is 1 for all VMDKs, but the vast
4974 * majority of streamOptimized VMDKs actually contain
4975 * version 3 - so go with the majority. Both are accepted. */
4976 pExtent->uVersion = 3;
4977 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
4978 }
4979 }
4980 else
4981 {
4982 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4983 pExtent->enmType = VMDKETYPE_VMFS;
4984 else
4985 pExtent->enmType = VMDKETYPE_FLAT;
4986 }
4987 pExtent->enmAccess = VMDKACCESS_READWRITE;
4988 pExtent->fUncleanShutdown = true;
4989 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
4990 pExtent->uSectorOffset = 0;
4991 pExtent->fMetaDirty = true;
4992 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4993 {
4994 /* fPreAlloc should never be false because VMware can't use such images. */
4995 rc = vmdkCreateGrainDirectory(pImage, pExtent,
4996 RT_MAX( pExtent->uDescriptorSector
4997 + pExtent->cDescriptorSectors,
4998 1),
4999 true /* fPreAlloc */);
5000 if (RT_FAILURE(rc))
5001 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5002 }
5003 cbOffset += cbExtent;
5004 if (RT_SUCCESS(rc))
5005 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
5006 cbRemaining -= cbExtent;
5007 }
5008 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5009 {
5010 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
5011 * controller type is set in an image. */
5012 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
5013 if (RT_FAILURE(rc))
5014 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
5015 }
5016 const char *pszDescType = NULL;
5017 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5018 {
5019 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5020 pszDescType = "vmfs";
5021 else
5022 pszDescType = (cExtents == 1)
5023 ? "monolithicFlat" : "twoGbMaxExtentFlat";
5024 }
5025 else
5026 {
5027 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5028 pszDescType = "streamOptimized";
5029 else
5030 {
5031 pszDescType = (cExtents == 1)
5032 ? "monolithicSparse" : "twoGbMaxExtentSparse";
5033 }
5034 }
5035 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5036 pszDescType);
5037 if (RT_FAILURE(rc))
5038 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5039 return rc;
5040}
5041/**
5042 * Internal: Create a real stream optimized VMDK using only linear writes.
5043 */
5044static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
5045{
5046 int rc = vmdkCreateExtents(pImage, 1);
5047 if (RT_FAILURE(rc))
5048 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5049 /* Basename strings needed for constructing the extent names. */
5050 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5051 AssertPtr(pszBasenameSubstr);
5052 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5053 /* No separate descriptor file. */
5054 pImage->pFile = NULL;
5055 /* Set up all extents. */
5056 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5057 /* Set up fullname/basename for extent description. Cannot use StrDup
5058 * for basename, as it is not guaranteed that the memory can be freed
5059 * with RTMemTmpFree, which must be used as in other code paths
5060 * StrDup is not usable. */
5061 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5062 if (!pszBasename)
5063 return VERR_NO_MEMORY;
5064 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5065 pExtent->pszBasename = pszBasename;
5066 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5067 RTPathStripFilename(pszBasedirectory);
5068 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5069 RTStrFree(pszBasedirectory);
5070 if (!pszFullname)
5071 return VERR_NO_STR_MEMORY;
5072 pExtent->pszFullname = pszFullname;
5073 /* Create file for extent. Make it write only, no reading allowed. */
5074 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5075 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5076 true /* fCreate */)
5077 & ~RTFILE_O_READ);
5078 if (RT_FAILURE(rc))
5079 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5080 /* Place descriptor file information. */
5081 pExtent->uDescriptorSector = 1;
5082 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5083 /* The descriptor is part of the (only) extent. */
5084 pExtent->pDescData = pImage->pDescData;
5085 pImage->pDescData = NULL;
5086 uint64_t cSectorsPerGDE, cSectorsPerGD;
5087 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5088 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
5089 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5090 pExtent->cGTEntries = 512;
5091 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5092 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5093 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5094 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5095 /* The spec says version is 1 for all VMDKs, but the vast
5096 * majority of streamOptimized VMDKs actually contain
5097 * version 3 - so go with the majority. Both are accepted. */
5098 pExtent->uVersion = 3;
5099 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5100 pExtent->fFooter = true;
5101 pExtent->enmAccess = VMDKACCESS_READONLY;
5102 pExtent->fUncleanShutdown = false;
5103 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5104 pExtent->uSectorOffset = 0;
5105 pExtent->fMetaDirty = true;
5106 /* Create grain directory, without preallocating it straight away. It will
5107 * be constructed on the fly when writing out the data and written when
5108 * closing the image. The end effect is that the full grain directory is
5109 * allocated, which is a requirement of the VMDK specs. */
5110 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
5111 false /* fPreAlloc */);
5112 if (RT_FAILURE(rc))
5113 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5114 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5115 "streamOptimized");
5116 if (RT_FAILURE(rc))
5117 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5118 return rc;
5119}
5120/**
5121 * Initializes the UUID fields in the DDB.
5122 *
5123 * @returns VBox status code.
5124 * @param pImage The VMDK image instance.
5125 */
5126static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5127{
5128 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5129 if (RT_SUCCESS(rc))
5130 {
5131 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5132 if (RT_SUCCESS(rc))
5133 {
5134 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5135 &pImage->ModificationUuid);
5136 if (RT_SUCCESS(rc))
5137 {
5138 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5139 &pImage->ParentModificationUuid);
5140 if (RT_FAILURE(rc))
5141 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5142 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5143 }
5144 else
5145 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5146 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5147 }
5148 else
5149 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5150 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5151 }
5152 else
5153 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5154 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5155 return rc;
5156}
5157/**
5158 * Internal: The actual code for creating any VMDK variant currently in
5159 * existence on hosted environments.
5160 */
5161static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5162 unsigned uImageFlags, const char *pszComment,
5163 PCVDGEOMETRY pPCHSGeometry,
5164 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5165 PVDINTERFACEPROGRESS pIfProgress,
5166 unsigned uPercentStart, unsigned uPercentSpan)
5167{
5168 pImage->uImageFlags = uImageFlags;
5169 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5170 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5171 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5172 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5173 &pImage->Descriptor);
5174 if (RT_SUCCESS(rc))
5175 {
5176 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5177 {
5178 /* Raw disk image (includes raw partition). */
5179 PVDISKRAW pRaw = NULL;
5180 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5181 if (RT_FAILURE(rc))
5182 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could get raw descriptor for '%s'"), pImage->pszFilename);
5183 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5184 vmdkRawDescFree(pRaw);
5185 }
5186 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5187 {
5188 /* Stream optimized sparse image (monolithic). */
5189 rc = vmdkCreateStreamImage(pImage, cbSize);
5190 }
5191 else
5192 {
5193 /* Regular fixed or sparse image (monolithic or split). */
5194 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5195 pIfProgress, uPercentStart,
5196 uPercentSpan * 95 / 100);
5197 }
5198 if (RT_SUCCESS(rc))
5199 {
5200 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5201 pImage->cbSize = cbSize;
5202 for (unsigned i = 0; i < pImage->cExtents; i++)
5203 {
5204 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5205 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5206 pExtent->cNominalSectors, pExtent->enmType,
5207 pExtent->pszBasename, pExtent->uSectorOffset);
5208 if (RT_FAILURE(rc))
5209 {
5210 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5211 break;
5212 }
5213 }
5214 if (RT_SUCCESS(rc))
5215 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5216 if ( RT_SUCCESS(rc)
5217 && pPCHSGeometry->cCylinders != 0
5218 && pPCHSGeometry->cHeads != 0
5219 && pPCHSGeometry->cSectors != 0)
5220 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5221 if ( RT_SUCCESS(rc)
5222 && pLCHSGeometry->cCylinders != 0
5223 && pLCHSGeometry->cHeads != 0
5224 && pLCHSGeometry->cSectors != 0)
5225 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5226 pImage->LCHSGeometry = *pLCHSGeometry;
5227 pImage->PCHSGeometry = *pPCHSGeometry;
5228 pImage->ImageUuid = *pUuid;
5229 RTUuidClear(&pImage->ParentUuid);
5230 RTUuidClear(&pImage->ModificationUuid);
5231 RTUuidClear(&pImage->ParentModificationUuid);
5232 if (RT_SUCCESS(rc))
5233 rc = vmdkCreateImageDdbUuidsInit(pImage);
5234 if (RT_SUCCESS(rc))
5235 rc = vmdkAllocateGrainTableCache(pImage);
5236 if (RT_SUCCESS(rc))
5237 {
5238 rc = vmdkSetImageComment(pImage, pszComment);
5239 if (RT_FAILURE(rc))
5240 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5241 }
5242 if (RT_SUCCESS(rc))
5243 {
5244 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5245 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5246 {
5247 /* streamOptimized is a bit special, we cannot trigger the flush
5248 * until all data has been written. So we write the necessary
5249 * information explicitly. */
5250 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5251 - pImage->Descriptor.aLines[0], 512));
5252 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5253 if (RT_SUCCESS(rc))
5254 {
5255 rc = vmdkWriteDescriptor(pImage, NULL);
5256 if (RT_FAILURE(rc))
5257 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5258 }
5259 else
5260 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5261 }
5262 else
5263 rc = vmdkFlushImage(pImage, NULL);
5264 }
5265 }
5266 }
5267 else
5268 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5269 if (RT_SUCCESS(rc))
5270 {
5271 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5272 pImage->RegionList.fFlags = 0;
5273 pImage->RegionList.cRegions = 1;
5274 pRegion->offRegion = 0; /* Disk start. */
5275 pRegion->cbBlock = 512;
5276 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5277 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5278 pRegion->cbData = 512;
5279 pRegion->cbMetadata = 0;
5280 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5281 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5282 }
5283 else
5284 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5285 return rc;
5286}
5287/**
5288 * Internal: Update image comment.
5289 */
5290static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5291{
5292 char *pszCommentEncoded = NULL;
5293 if (pszComment)
5294 {
5295 pszCommentEncoded = vmdkEncodeString(pszComment);
5296 if (!pszCommentEncoded)
5297 return VERR_NO_MEMORY;
5298 }
5299 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5300 "ddb.comment", pszCommentEncoded);
5301 if (pszCommentEncoded)
5302 RTStrFree(pszCommentEncoded);
5303 if (RT_FAILURE(rc))
5304 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5305 return VINF_SUCCESS;
5306}
5307/**
5308 * Internal. Clear the grain table buffer for real stream optimized writing.
5309 */
5310static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
5311{
5312 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5313 for (uint32_t i = 0; i < cCacheLines; i++)
5314 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
5315 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5316}
5317/**
5318 * Internal. Flush the grain table buffer for real stream optimized writing.
5319 */
5320static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5321 uint32_t uGDEntry)
5322{
5323 int rc = VINF_SUCCESS;
5324 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5325 /* VMware does not write out completely empty grain tables in the case
5326 * of streamOptimized images, which according to my interpretation of
5327 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
5328 * handle it without problems do it the same way and save some bytes. */
5329 bool fAllZero = true;
5330 for (uint32_t i = 0; i < cCacheLines; i++)
5331 {
5332 /* Convert the grain table to little endian in place, as it will not
5333 * be used at all after this function has been called. */
5334 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5335 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5336 if (*pGTTmp)
5337 {
5338 fAllZero = false;
5339 break;
5340 }
5341 if (!fAllZero)
5342 break;
5343 }
5344 if (fAllZero)
5345 return VINF_SUCCESS;
5346 uint64_t uFileOffset = pExtent->uAppendPosition;
5347 if (!uFileOffset)
5348 return VERR_INTERNAL_ERROR;
5349 /* Align to sector, as the previous write could have been any size. */
5350 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5351 /* Grain table marker. */
5352 uint8_t aMarker[512];
5353 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5354 memset(pMarker, '\0', sizeof(aMarker));
5355 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
5356 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
5357 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5358 aMarker, sizeof(aMarker));
5359 AssertRC(rc);
5360 uFileOffset += 512;
5361 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
5362 return VERR_INTERNAL_ERROR;
5363 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5364 for (uint32_t i = 0; i < cCacheLines; i++)
5365 {
5366 /* Convert the grain table to little endian in place, as it will not
5367 * be used at all after this function has been called. */
5368 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5369 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5370 *pGTTmp = RT_H2LE_U32(*pGTTmp);
5371 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5372 &pImage->pGTCache->aGTCache[i].aGTData[0],
5373 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5374 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5375 if (RT_FAILURE(rc))
5376 break;
5377 }
5378 Assert(!(uFileOffset % 512));
5379 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
5380 return rc;
5381}
5382/**
5383 * Internal. Free all allocated space for representing an image, and optionally
5384 * delete the image from disk.
5385 */
5386static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
5387{
5388 int rc = VINF_SUCCESS;
5389 /* Freeing a never allocated image (e.g. because the open failed) is
5390 * not signalled as an error. After all nothing bad happens. */
5391 if (pImage)
5392 {
5393 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5394 {
5395 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5396 {
5397 /* Check if all extents are clean. */
5398 for (unsigned i = 0; i < pImage->cExtents; i++)
5399 {
5400 Assert(!pImage->pExtents[i].fUncleanShutdown);
5401 }
5402 }
5403 else
5404 {
5405 /* Mark all extents as clean. */
5406 for (unsigned i = 0; i < pImage->cExtents; i++)
5407 {
5408 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
5409 && pImage->pExtents[i].fUncleanShutdown)
5410 {
5411 pImage->pExtents[i].fUncleanShutdown = false;
5412 pImage->pExtents[i].fMetaDirty = true;
5413 }
5414 /* From now on it's not safe to append any more data. */
5415 pImage->pExtents[i].uAppendPosition = 0;
5416 }
5417 }
5418 }
5419 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5420 {
5421 /* No need to write any pending data if the file will be deleted
5422 * or if the new file wasn't successfully created. */
5423 if ( !fDelete && pImage->pExtents
5424 && pImage->pExtents[0].cGTEntries
5425 && pImage->pExtents[0].uAppendPosition)
5426 {
5427 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5428 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5429 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5430 AssertRC(rc);
5431 vmdkStreamClearGT(pImage, pExtent);
5432 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
5433 {
5434 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5435 AssertRC(rc);
5436 }
5437 uint64_t uFileOffset = pExtent->uAppendPosition;
5438 if (!uFileOffset)
5439 return VERR_INTERNAL_ERROR;
5440 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5441 /* From now on it's not safe to append any more data. */
5442 pExtent->uAppendPosition = 0;
5443 /* Grain directory marker. */
5444 uint8_t aMarker[512];
5445 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5446 memset(pMarker, '\0', sizeof(aMarker));
5447 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
5448 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
5449 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5450 aMarker, sizeof(aMarker));
5451 AssertRC(rc);
5452 uFileOffset += 512;
5453 /* Write grain directory in little endian style. The array will
5454 * not be used after this, so convert in place. */
5455 uint32_t *pGDTmp = pExtent->pGD;
5456 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
5457 *pGDTmp = RT_H2LE_U32(*pGDTmp);
5458 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5459 uFileOffset, pExtent->pGD,
5460 pExtent->cGDEntries * sizeof(uint32_t));
5461 AssertRC(rc);
5462 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
5463 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
5464 uFileOffset = RT_ALIGN_64( uFileOffset
5465 + pExtent->cGDEntries * sizeof(uint32_t),
5466 512);
5467 /* Footer marker. */
5468 memset(pMarker, '\0', sizeof(aMarker));
5469 pMarker->uSector = VMDK_BYTE2SECTOR(512);
5470 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
5471 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5472 uFileOffset, aMarker, sizeof(aMarker));
5473 AssertRC(rc);
5474 uFileOffset += 512;
5475 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
5476 AssertRC(rc);
5477 uFileOffset += 512;
5478 /* End-of-stream marker. */
5479 memset(pMarker, '\0', sizeof(aMarker));
5480 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5481 uFileOffset, aMarker, sizeof(aMarker));
5482 AssertRC(rc);
5483 }
5484 }
5485 else if (!fDelete && fFlush)
5486 vmdkFlushImage(pImage, NULL);
5487 if (pImage->pExtents != NULL)
5488 {
5489 for (unsigned i = 0 ; i < pImage->cExtents; i++)
5490 {
5491 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
5492 if (RT_SUCCESS(rc))
5493 rc = rc2; /* Propogate any error when closing the file. */
5494 }
5495 RTMemFree(pImage->pExtents);
5496 pImage->pExtents = NULL;
5497 }
5498 pImage->cExtents = 0;
5499 if (pImage->pFile != NULL)
5500 {
5501 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
5502 if (RT_SUCCESS(rc))
5503 rc = rc2; /* Propogate any error when closing the file. */
5504 }
5505 int rc2 = vmdkFileCheckAllClose(pImage);
5506 if (RT_SUCCESS(rc))
5507 rc = rc2; /* Propogate any error when closing the file. */
5508 if (pImage->pGTCache)
5509 {
5510 RTMemFree(pImage->pGTCache);
5511 pImage->pGTCache = NULL;
5512 }
5513 if (pImage->pDescData)
5514 {
5515 RTMemFree(pImage->pDescData);
5516 pImage->pDescData = NULL;
5517 }
5518 }
5519 LogFlowFunc(("returns %Rrc\n", rc));
5520 return rc;
5521}
5522/**
5523 * Internal. Flush image data (and metadata) to disk.
5524 */
5525static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
5526{
5527 PVMDKEXTENT pExtent;
5528 int rc = VINF_SUCCESS;
5529 /* Update descriptor if changed. */
5530 if (pImage->Descriptor.fDirty)
5531 rc = vmdkWriteDescriptor(pImage, pIoCtx);
5532 if (RT_SUCCESS(rc))
5533 {
5534 for (unsigned i = 0; i < pImage->cExtents; i++)
5535 {
5536 pExtent = &pImage->pExtents[i];
5537 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
5538 {
5539 switch (pExtent->enmType)
5540 {
5541 case VMDKETYPE_HOSTED_SPARSE:
5542 if (!pExtent->fFooter)
5543 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
5544 else
5545 {
5546 uint64_t uFileOffset = pExtent->uAppendPosition;
5547 /* Simply skip writing anything if the streamOptimized
5548 * image hasn't been just created. */
5549 if (!uFileOffset)
5550 break;
5551 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5552 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
5553 uFileOffset, pIoCtx);
5554 }
5555 break;
5556 case VMDKETYPE_VMFS:
5557 case VMDKETYPE_FLAT:
5558 /* Nothing to do. */
5559 break;
5560 case VMDKETYPE_ZERO:
5561 default:
5562 AssertMsgFailed(("extent with type %d marked as dirty\n",
5563 pExtent->enmType));
5564 break;
5565 }
5566 }
5567 if (RT_FAILURE(rc))
5568 break;
5569 switch (pExtent->enmType)
5570 {
5571 case VMDKETYPE_HOSTED_SPARSE:
5572 case VMDKETYPE_VMFS:
5573 case VMDKETYPE_FLAT:
5574 /** @todo implement proper path absolute check. */
5575 if ( pExtent->pFile != NULL
5576 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5577 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
5578 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
5579 NULL, NULL);
5580 break;
5581 case VMDKETYPE_ZERO:
5582 /* No need to do anything for this extent. */
5583 break;
5584 default:
5585 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
5586 break;
5587 }
5588 }
5589 }
5590 return rc;
5591}
5592/**
5593 * Internal. Find extent corresponding to the sector number in the disk.
5594 */
5595static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
5596 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
5597{
5598 PVMDKEXTENT pExtent = NULL;
5599 int rc = VINF_SUCCESS;
5600 for (unsigned i = 0; i < pImage->cExtents; i++)
5601 {
5602 if (offSector < pImage->pExtents[i].cNominalSectors)
5603 {
5604 pExtent = &pImage->pExtents[i];
5605 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
5606 break;
5607 }
5608 offSector -= pImage->pExtents[i].cNominalSectors;
5609 }
5610 if (pExtent)
5611 *ppExtent = pExtent;
5612 else
5613 rc = VERR_IO_SECTOR_NOT_FOUND;
5614 return rc;
5615}
5616/**
5617 * Internal. Hash function for placing the grain table hash entries.
5618 */
5619static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
5620 unsigned uExtent)
5621{
5622 /** @todo this hash function is quite simple, maybe use a better one which
5623 * scrambles the bits better. */
5624 return (uSector + uExtent) % pCache->cEntries;
5625}
5626/**
5627 * Internal. Get sector number in the extent file from the relative sector
5628 * number in the extent.
5629 */
5630static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
5631 PVMDKEXTENT pExtent, uint64_t uSector,
5632 uint64_t *puExtentSector)
5633{
5634 PVMDKGTCACHE pCache = pImage->pGTCache;
5635 uint64_t uGDIndex, uGTSector, uGTBlock;
5636 uint32_t uGTHash, uGTBlockIndex;
5637 PVMDKGTCACHEENTRY pGTCacheEntry;
5638 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5639 int rc;
5640 /* For newly created and readonly/sequentially opened streamOptimized
5641 * images this must be a no-op, as the grain directory is not there. */
5642 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5643 && pExtent->uAppendPosition)
5644 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5645 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
5646 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
5647 {
5648 *puExtentSector = 0;
5649 return VINF_SUCCESS;
5650 }
5651 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5652 if (uGDIndex >= pExtent->cGDEntries)
5653 return VERR_OUT_OF_RANGE;
5654 uGTSector = pExtent->pGD[uGDIndex];
5655 if (!uGTSector)
5656 {
5657 /* There is no grain table referenced by this grain directory
5658 * entry. So there is absolutely no data in this area. */
5659 *puExtentSector = 0;
5660 return VINF_SUCCESS;
5661 }
5662 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5663 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5664 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5665 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5666 || pGTCacheEntry->uGTBlock != uGTBlock)
5667 {
5668 /* Cache miss, fetch data from disk. */
5669 PVDMETAXFER pMetaXfer;
5670 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5671 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5672 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
5673 if (RT_FAILURE(rc))
5674 return rc;
5675 /* We can release the metadata transfer immediately. */
5676 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5677 pGTCacheEntry->uExtent = pExtent->uExtent;
5678 pGTCacheEntry->uGTBlock = uGTBlock;
5679 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5680 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5681 }
5682 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5683 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
5684 if (uGrainSector)
5685 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
5686 else
5687 *puExtentSector = 0;
5688 return VINF_SUCCESS;
5689}
5690/**
5691 * Internal. Writes the grain and also if necessary the grain tables.
5692 * Uses the grain table cache as a true grain table.
5693 */
5694static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5695 uint64_t uSector, PVDIOCTX pIoCtx,
5696 uint64_t cbWrite)
5697{
5698 uint32_t uGrain;
5699 uint32_t uGDEntry, uLastGDEntry;
5700 uint32_t cbGrain = 0;
5701 uint32_t uCacheLine, uCacheEntry;
5702 const void *pData;
5703 int rc;
5704 /* Very strict requirements: always write at least one full grain, with
5705 * proper alignment. Everything else would require reading of already
5706 * written data, which we don't support for obvious reasons. The only
5707 * exception is the last grain, and only if the image size specifies
5708 * that only some portion holds data. In any case the write must be
5709 * within the image limits, no "overshoot" allowed. */
5710 if ( cbWrite == 0
5711 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
5712 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
5713 || uSector % pExtent->cSectorsPerGrain
5714 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
5715 return VERR_INVALID_PARAMETER;
5716 /* Clip write range to at most the rest of the grain. */
5717 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
5718 /* Do not allow to go back. */
5719 uGrain = uSector / pExtent->cSectorsPerGrain;
5720 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5721 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
5722 uGDEntry = uGrain / pExtent->cGTEntries;
5723 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5724 if (uGrain < pExtent->uLastGrainAccess)
5725 return VERR_VD_VMDK_INVALID_WRITE;
5726 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
5727 * to allocate something, we also need to detect the situation ourself. */
5728 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
5729 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
5730 return VINF_SUCCESS;
5731 if (uGDEntry != uLastGDEntry)
5732 {
5733 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5734 if (RT_FAILURE(rc))
5735 return rc;
5736 vmdkStreamClearGT(pImage, pExtent);
5737 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
5738 {
5739 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5740 if (RT_FAILURE(rc))
5741 return rc;
5742 }
5743 }
5744 uint64_t uFileOffset;
5745 uFileOffset = pExtent->uAppendPosition;
5746 if (!uFileOffset)
5747 return VERR_INTERNAL_ERROR;
5748 /* Align to sector, as the previous write could have been any size. */
5749 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5750 /* Paranoia check: extent type, grain table buffer presence and
5751 * grain table buffer space. Also grain table entry must be clear. */
5752 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
5753 || !pImage->pGTCache
5754 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
5755 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
5756 return VERR_INTERNAL_ERROR;
5757 /* Update grain table entry. */
5758 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5759 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5760 {
5761 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
5762 memset((char *)pExtent->pvGrain + cbWrite, '\0',
5763 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
5764 pData = pExtent->pvGrain;
5765 }
5766 else
5767 {
5768 RTSGSEG Segment;
5769 unsigned cSegments = 1;
5770 size_t cbSeg = 0;
5771 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
5772 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5773 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5774 pData = Segment.pvSeg;
5775 }
5776 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
5777 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5778 uSector, &cbGrain);
5779 if (RT_FAILURE(rc))
5780 {
5781 pExtent->uGrainSectorAbs = 0;
5782 AssertRC(rc);
5783 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5784 }
5785 pExtent->uLastGrainAccess = uGrain;
5786 pExtent->uAppendPosition += cbGrain;
5787 return rc;
5788}
5789/**
5790 * Internal: Updates the grain table during grain allocation.
5791 */
5792static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
5793 PVMDKGRAINALLOCASYNC pGrainAlloc)
5794{
5795 int rc = VINF_SUCCESS;
5796 PVMDKGTCACHE pCache = pImage->pGTCache;
5797 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5798 uint32_t uGTHash, uGTBlockIndex;
5799 uint64_t uGTSector, uRGTSector, uGTBlock;
5800 uint64_t uSector = pGrainAlloc->uSector;
5801 PVMDKGTCACHEENTRY pGTCacheEntry;
5802 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
5803 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
5804 uGTSector = pGrainAlloc->uGTSector;
5805 uRGTSector = pGrainAlloc->uRGTSector;
5806 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5807 /* Update the grain table (and the cache). */
5808 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5809 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5810 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5811 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5812 || pGTCacheEntry->uGTBlock != uGTBlock)
5813 {
5814 /* Cache miss, fetch data from disk. */
5815 LogFlow(("Cache miss, fetch data from disk\n"));
5816 PVDMETAXFER pMetaXfer = NULL;
5817 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5818 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5819 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5820 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
5821 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5822 {
5823 pGrainAlloc->cIoXfersPending++;
5824 pGrainAlloc->fGTUpdateNeeded = true;
5825 /* Leave early, we will be called again after the read completed. */
5826 LogFlowFunc(("Metadata read in progress, leaving\n"));
5827 return rc;
5828 }
5829 else if (RT_FAILURE(rc))
5830 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5831 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5832 pGTCacheEntry->uExtent = pExtent->uExtent;
5833 pGTCacheEntry->uGTBlock = uGTBlock;
5834 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5835 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5836 }
5837 else
5838 {
5839 /* Cache hit. Convert grain table block back to disk format, otherwise
5840 * the code below will write garbage for all but the updated entry. */
5841 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5842 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5843 }
5844 pGrainAlloc->fGTUpdateNeeded = false;
5845 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5846 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
5847 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
5848 /* Update grain table on disk. */
5849 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5850 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5851 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5852 vmdkAllocGrainComplete, pGrainAlloc);
5853 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5854 pGrainAlloc->cIoXfersPending++;
5855 else if (RT_FAILURE(rc))
5856 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5857 if (pExtent->pRGD)
5858 {
5859 /* Update backup grain table on disk. */
5860 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5861 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5862 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5863 vmdkAllocGrainComplete, pGrainAlloc);
5864 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5865 pGrainAlloc->cIoXfersPending++;
5866 else if (RT_FAILURE(rc))
5867 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5868 }
5869 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5870 return rc;
5871}
5872/**
5873 * Internal - complete the grain allocation by updating disk grain table if required.
5874 */
5875static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
5876{
5877 RT_NOREF1(rcReq);
5878 int rc = VINF_SUCCESS;
5879 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5880 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
5881 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
5882 pBackendData, pIoCtx, pvUser, rcReq));
5883 pGrainAlloc->cIoXfersPending--;
5884 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
5885 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
5886 if (!pGrainAlloc->cIoXfersPending)
5887 {
5888 /* Grain allocation completed. */
5889 RTMemFree(pGrainAlloc);
5890 }
5891 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
5892 return rc;
5893}
5894/**
5895 * Internal. Allocates a new grain table (if necessary).
5896 */
5897static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
5898 uint64_t uSector, uint64_t cbWrite)
5899{
5900 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
5901 uint64_t uGDIndex, uGTSector, uRGTSector;
5902 uint64_t uFileOffset;
5903 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
5904 int rc;
5905 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
5906 pCache, pExtent, pIoCtx, uSector, cbWrite));
5907 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
5908 if (!pGrainAlloc)
5909 return VERR_NO_MEMORY;
5910 pGrainAlloc->pExtent = pExtent;
5911 pGrainAlloc->uSector = uSector;
5912 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5913 if (uGDIndex >= pExtent->cGDEntries)
5914 {
5915 RTMemFree(pGrainAlloc);
5916 return VERR_OUT_OF_RANGE;
5917 }
5918 uGTSector = pExtent->pGD[uGDIndex];
5919 if (pExtent->pRGD)
5920 uRGTSector = pExtent->pRGD[uGDIndex];
5921 else
5922 uRGTSector = 0; /**< avoid compiler warning */
5923 if (!uGTSector)
5924 {
5925 LogFlow(("Allocating new grain table\n"));
5926 /* There is no grain table referenced by this grain directory
5927 * entry. So there is absolutely no data in this area. Allocate
5928 * a new grain table and put the reference to it in the GDs. */
5929 uFileOffset = pExtent->uAppendPosition;
5930 if (!uFileOffset)
5931 {
5932 RTMemFree(pGrainAlloc);
5933 return VERR_INTERNAL_ERROR;
5934 }
5935 Assert(!(uFileOffset % 512));
5936 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5937 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
5938 /* Normally the grain table is preallocated for hosted sparse extents
5939 * that support more than 32 bit sector numbers. So this shouldn't
5940 * ever happen on a valid extent. */
5941 if (uGTSector > UINT32_MAX)
5942 {
5943 RTMemFree(pGrainAlloc);
5944 return VERR_VD_VMDK_INVALID_HEADER;
5945 }
5946 /* Write grain table by writing the required number of grain table
5947 * cache chunks. Allocate memory dynamically here or we flood the
5948 * metadata cache with very small entries. */
5949 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
5950 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
5951 if (!paGTDataTmp)
5952 {
5953 RTMemFree(pGrainAlloc);
5954 return VERR_NO_MEMORY;
5955 }
5956 memset(paGTDataTmp, '\0', cbGTDataTmp);
5957 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5958 VMDK_SECTOR2BYTE(uGTSector),
5959 paGTDataTmp, cbGTDataTmp, pIoCtx,
5960 vmdkAllocGrainComplete, pGrainAlloc);
5961 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5962 pGrainAlloc->cIoXfersPending++;
5963 else if (RT_FAILURE(rc))
5964 {
5965 RTMemTmpFree(paGTDataTmp);
5966 RTMemFree(pGrainAlloc);
5967 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5968 }
5969 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
5970 + cbGTDataTmp, 512);
5971 if (pExtent->pRGD)
5972 {
5973 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5974 uFileOffset = pExtent->uAppendPosition;
5975 if (!uFileOffset)
5976 return VERR_INTERNAL_ERROR;
5977 Assert(!(uFileOffset % 512));
5978 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
5979 /* Normally the redundant grain table is preallocated for hosted
5980 * sparse extents that support more than 32 bit sector numbers. So
5981 * this shouldn't ever happen on a valid extent. */
5982 if (uRGTSector > UINT32_MAX)
5983 {
5984 RTMemTmpFree(paGTDataTmp);
5985 return VERR_VD_VMDK_INVALID_HEADER;
5986 }
5987 /* Write grain table by writing the required number of grain table
5988 * cache chunks. Allocate memory dynamically here or we flood the
5989 * metadata cache with very small entries. */
5990 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5991 VMDK_SECTOR2BYTE(uRGTSector),
5992 paGTDataTmp, cbGTDataTmp, pIoCtx,
5993 vmdkAllocGrainComplete, pGrainAlloc);
5994 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5995 pGrainAlloc->cIoXfersPending++;
5996 else if (RT_FAILURE(rc))
5997 {
5998 RTMemTmpFree(paGTDataTmp);
5999 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
6000 }
6001 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
6002 }
6003 RTMemTmpFree(paGTDataTmp);
6004 /* Update the grain directory on disk (doing it before writing the
6005 * grain table will result in a garbled extent if the operation is
6006 * aborted for some reason. Otherwise the worst that can happen is
6007 * some unused sectors in the extent. */
6008 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
6009 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6010 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
6011 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
6012 vmdkAllocGrainComplete, pGrainAlloc);
6013 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6014 pGrainAlloc->cIoXfersPending++;
6015 else if (RT_FAILURE(rc))
6016 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
6017 if (pExtent->pRGD)
6018 {
6019 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
6020 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6021 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
6022 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
6023 vmdkAllocGrainComplete, pGrainAlloc);
6024 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6025 pGrainAlloc->cIoXfersPending++;
6026 else if (RT_FAILURE(rc))
6027 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
6028 }
6029 /* As the final step update the in-memory copy of the GDs. */
6030 pExtent->pGD[uGDIndex] = uGTSector;
6031 if (pExtent->pRGD)
6032 pExtent->pRGD[uGDIndex] = uRGTSector;
6033 }
6034 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6035 pGrainAlloc->uGTSector = uGTSector;
6036 pGrainAlloc->uRGTSector = uRGTSector;
6037 uFileOffset = pExtent->uAppendPosition;
6038 if (!uFileOffset)
6039 return VERR_INTERNAL_ERROR;
6040 Assert(!(uFileOffset % 512));
6041 pGrainAlloc->uGrainOffset = uFileOffset;
6042 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6043 {
6044 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6045 ("Accesses to stream optimized images must be synchronous\n"),
6046 VERR_INVALID_STATE);
6047 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6048 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
6049 /* Invalidate cache, just in case some code incorrectly allows mixing
6050 * of reads and writes. Normally shouldn't be needed. */
6051 pExtent->uGrainSectorAbs = 0;
6052 /* Write compressed data block and the markers. */
6053 uint32_t cbGrain = 0;
6054 size_t cbSeg = 0;
6055 RTSGSEG Segment;
6056 unsigned cSegments = 1;
6057 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6058 &cSegments, cbWrite);
6059 Assert(cbSeg == cbWrite);
6060 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
6061 Segment.pvSeg, cbWrite, uSector, &cbGrain);
6062 if (RT_FAILURE(rc))
6063 {
6064 AssertRC(rc);
6065 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
6066 }
6067 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
6068 pExtent->uAppendPosition += cbGrain;
6069 }
6070 else
6071 {
6072 /* Write the data. Always a full grain, or we're in big trouble. */
6073 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6074 uFileOffset, pIoCtx, cbWrite,
6075 vmdkAllocGrainComplete, pGrainAlloc);
6076 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6077 pGrainAlloc->cIoXfersPending++;
6078 else if (RT_FAILURE(rc))
6079 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
6080 pExtent->uAppendPosition += cbWrite;
6081 }
6082 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
6083 if (!pGrainAlloc->cIoXfersPending)
6084 {
6085 /* Grain allocation completed. */
6086 RTMemFree(pGrainAlloc);
6087 }
6088 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6089 return rc;
6090}
6091/**
6092 * Internal. Reads the contents by sequentially going over the compressed
6093 * grains (hoping that they are in sequence).
6094 */
6095static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6096 uint64_t uSector, PVDIOCTX pIoCtx,
6097 uint64_t cbRead)
6098{
6099 int rc;
6100 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6101 pImage, pExtent, uSector, pIoCtx, cbRead));
6102 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6103 ("Async I/O not supported for sequential stream optimized images\n"),
6104 VERR_INVALID_STATE);
6105 /* Do not allow to go back. */
6106 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6107 if (uGrain < pExtent->uLastGrainAccess)
6108 return VERR_VD_VMDK_INVALID_STATE;
6109 pExtent->uLastGrainAccess = uGrain;
6110 /* After a previous error do not attempt to recover, as it would need
6111 * seeking (in the general case backwards which is forbidden). */
6112 if (!pExtent->uGrainSectorAbs)
6113 return VERR_VD_VMDK_INVALID_STATE;
6114 /* Check if we need to read something from the image or if what we have
6115 * in the buffer is good to fulfill the request. */
6116 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6117 {
6118 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6119 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6120 /* Get the marker from the next data block - and skip everything which
6121 * is not a compressed grain. If it's a compressed grain which is for
6122 * the requested sector (or after), read it. */
6123 VMDKMARKER Marker;
6124 do
6125 {
6126 RT_ZERO(Marker);
6127 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6128 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6129 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6130 if (RT_FAILURE(rc))
6131 return rc;
6132 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6133 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6134 if (Marker.cbSize == 0)
6135 {
6136 /* A marker for something else than a compressed grain. */
6137 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6138 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6139 + RT_UOFFSETOF(VMDKMARKER, uType),
6140 &Marker.uType, sizeof(Marker.uType));
6141 if (RT_FAILURE(rc))
6142 return rc;
6143 Marker.uType = RT_LE2H_U32(Marker.uType);
6144 switch (Marker.uType)
6145 {
6146 case VMDK_MARKER_EOS:
6147 uGrainSectorAbs++;
6148 /* Read (or mostly skip) to the end of file. Uses the
6149 * Marker (LBA sector) as it is unused anyway. This
6150 * makes sure that really everything is read in the
6151 * success case. If this read fails it means the image
6152 * is truncated, but this is harmless so ignore. */
6153 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6154 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6155 + 511,
6156 &Marker.uSector, 1);
6157 break;
6158 case VMDK_MARKER_GT:
6159 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6160 break;
6161 case VMDK_MARKER_GD:
6162 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6163 break;
6164 case VMDK_MARKER_FOOTER:
6165 uGrainSectorAbs += 2;
6166 break;
6167 case VMDK_MARKER_UNSPECIFIED:
6168 /* Skip over the contents of the unspecified marker
6169 * type 4 which exists in some vSphere created files. */
6170 /** @todo figure out what the payload means. */
6171 uGrainSectorAbs += 1;
6172 break;
6173 default:
6174 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6175 pExtent->uGrainSectorAbs = 0;
6176 return VERR_VD_VMDK_INVALID_STATE;
6177 }
6178 pExtent->cbGrainStreamRead = 0;
6179 }
6180 else
6181 {
6182 /* A compressed grain marker. If it is at/after what we're
6183 * interested in read and decompress data. */
6184 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6185 {
6186 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6187 continue;
6188 }
6189 uint64_t uLBA = 0;
6190 uint32_t cbGrainStreamRead = 0;
6191 rc = vmdkFileInflateSync(pImage, pExtent,
6192 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6193 pExtent->pvGrain,
6194 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6195 &Marker, &uLBA, &cbGrainStreamRead);
6196 if (RT_FAILURE(rc))
6197 {
6198 pExtent->uGrainSectorAbs = 0;
6199 return rc;
6200 }
6201 if ( pExtent->uGrain
6202 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6203 {
6204 pExtent->uGrainSectorAbs = 0;
6205 return VERR_VD_VMDK_INVALID_STATE;
6206 }
6207 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6208 pExtent->cbGrainStreamRead = cbGrainStreamRead;
6209 break;
6210 }
6211 } while (Marker.uType != VMDK_MARKER_EOS);
6212 pExtent->uGrainSectorAbs = uGrainSectorAbs;
6213 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
6214 {
6215 pExtent->uGrain = UINT32_MAX;
6216 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
6217 * the next read would try to get more data, and we're at EOF. */
6218 pExtent->cbGrainStreamRead = 1;
6219 }
6220 }
6221 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
6222 {
6223 /* The next data block we have is not for this area, so just return
6224 * that there is no data. */
6225 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
6226 return VERR_VD_BLOCK_FREE;
6227 }
6228 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
6229 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6230 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
6231 cbRead);
6232 LogFlowFunc(("returns VINF_SUCCESS\n"));
6233 return VINF_SUCCESS;
6234}
6235/**
6236 * Replaces a fragment of a string with the specified string.
6237 *
6238 * @returns Pointer to the allocated UTF-8 string.
6239 * @param pszWhere UTF-8 string to search in.
6240 * @param pszWhat UTF-8 string to search for.
6241 * @param pszByWhat UTF-8 string to replace the found string with.
6242 *
6243 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
6244 * for updating the base name in the descriptor, the second is for
6245 * generating new filenames for extents. This code borked when
6246 * RTPathAbs started correcting the driver letter case on windows,
6247 * when strstr failed because the pExtent->pszFullname was not
6248 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
6249 * this by apply RTPathAbs to the places it wasn't applied.
6250 *
6251 * However, this highlights some undocumented ASSUMPTIONS as well as
6252 * terrible short commings of the approach.
6253 *
6254 * Given the right filename, it may also screw up the descriptor. Take
6255 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
6256 * we'll be asked to replace "Test0" with something, no problem. No,
6257 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
6258 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
6259 * its bum. The descriptor string must be parsed and reconstructed,
6260 * the lazy strstr approach doesn't cut it.
6261 *
6262 * I'm also curious as to what would be the correct escaping of '"' in
6263 * the file name and how that is supposed to be handled, because it
6264 * needs to be or such names must be rejected in several places (maybe
6265 * they are, I didn't check).
6266 *
6267 * When this function is used to replace the start of a path, I think
6268 * the assumption from the prep/setup code is that we kind of knows
6269 * what we're working on (I could be wrong). However, using strstr
6270 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
6271 * Especially on unix systems, weird stuff could happen if someone
6272 * unwittingly tinkers with the prep/setup code. What should really be
6273 * done here is using a new RTPathStartEx function that (via flags)
6274 * allows matching partial final component and returns the length of
6275 * what it matched up (in case it skipped slashes and '.' components).
6276 *
6277 */
6278static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
6279 const char *pszByWhat)
6280{
6281 AssertPtr(pszWhere);
6282 AssertPtr(pszWhat);
6283 AssertPtr(pszByWhat);
6284 const char *pszFoundStr = strstr(pszWhere, pszWhat);
6285 if (!pszFoundStr)
6286 {
6287 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
6288 return NULL;
6289 }
6290 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
6291 char *pszNewStr = RTStrAlloc(cbFinal);
6292 if (pszNewStr)
6293 {
6294 char *pszTmp = pszNewStr;
6295 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
6296 pszTmp += pszFoundStr - pszWhere;
6297 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
6298 pszTmp += strlen(pszByWhat);
6299 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
6300 }
6301 return pszNewStr;
6302}
6303/** @copydoc VDIMAGEBACKEND::pfnProbe */
6304static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
6305 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
6306{
6307 RT_NOREF(enmDesiredType);
6308 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
6309 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
6310 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6311 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6312
6313 int rc = VINF_SUCCESS;
6314 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6315 if (RT_LIKELY(pImage))
6316 {
6317 pImage->pszFilename = pszFilename;
6318 pImage->pFile = NULL;
6319 pImage->pExtents = NULL;
6320 pImage->pFiles = NULL;
6321 pImage->pGTCache = NULL;
6322 pImage->pDescData = NULL;
6323 pImage->pVDIfsDisk = pVDIfsDisk;
6324 pImage->pVDIfsImage = pVDIfsImage;
6325 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
6326 * much as possible in vmdkOpenImage. */
6327 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
6328 vmdkFreeImage(pImage, false, false /*fFlush*/);
6329 RTMemFree(pImage);
6330 if (RT_SUCCESS(rc))
6331 *penmType = VDTYPE_HDD;
6332 }
6333 else
6334 rc = VERR_NO_MEMORY;
6335 LogFlowFunc(("returns %Rrc\n", rc));
6336 return rc;
6337}
6338/** @copydoc VDIMAGEBACKEND::pfnOpen */
6339static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
6340 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6341 VDTYPE enmType, void **ppBackendData)
6342{
6343 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
6344 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
6345 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
6346 int rc;
6347 /* Check open flags. All valid flags are supported. */
6348 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6349 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6350 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6351
6352 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6353 if (RT_LIKELY(pImage))
6354 {
6355 pImage->pszFilename = pszFilename;
6356 pImage->pFile = NULL;
6357 pImage->pExtents = NULL;
6358 pImage->pFiles = NULL;
6359 pImage->pGTCache = NULL;
6360 pImage->pDescData = NULL;
6361 pImage->pVDIfsDisk = pVDIfsDisk;
6362 pImage->pVDIfsImage = pVDIfsImage;
6363 rc = vmdkOpenImage(pImage, uOpenFlags);
6364 if (RT_SUCCESS(rc))
6365 *ppBackendData = pImage;
6366 else
6367 RTMemFree(pImage);
6368 }
6369 else
6370 rc = VERR_NO_MEMORY;
6371 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6372 return rc;
6373}
6374/** @copydoc VDIMAGEBACKEND::pfnCreate */
6375static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
6376 unsigned uImageFlags, const char *pszComment,
6377 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
6378 PCRTUUID pUuid, unsigned uOpenFlags,
6379 unsigned uPercentStart, unsigned uPercentSpan,
6380 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6381 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
6382 void **ppBackendData)
6383{
6384 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
6385 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
6386 int rc;
6387 /* Check the VD container type and image flags. */
6388 if ( enmType != VDTYPE_HDD
6389 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
6390 return VERR_VD_INVALID_TYPE;
6391 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
6392 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
6393 && ( !cbSize
6394 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
6395 return VERR_VD_INVALID_SIZE;
6396 /* Check image flags for invalid combinations. */
6397 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6398 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
6399 return VERR_INVALID_PARAMETER;
6400 /* Check open flags. All valid flags are supported. */
6401 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6402 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6403 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6404 AssertPtrReturn(pPCHSGeometry, VERR_INVALID_POINTER);
6405 AssertPtrReturn(pLCHSGeometry, VERR_INVALID_POINTER);
6406 AssertReturn(!( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
6407 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
6408 VERR_INVALID_PARAMETER);
6409 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6410 if (RT_LIKELY(pImage))
6411 {
6412 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
6413 pImage->pszFilename = pszFilename;
6414 pImage->pFile = NULL;
6415 pImage->pExtents = NULL;
6416 pImage->pFiles = NULL;
6417 pImage->pGTCache = NULL;
6418 pImage->pDescData = NULL;
6419 pImage->pVDIfsDisk = pVDIfsDisk;
6420 pImage->pVDIfsImage = pVDIfsImage;
6421 /* Descriptors for split images can be pretty large, especially if the
6422 * filename is long. So prepare for the worst, and allocate quite some
6423 * memory for the descriptor in this case. */
6424 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
6425 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
6426 else
6427 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
6428 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
6429 if (RT_LIKELY(pImage->pDescData))
6430 {
6431 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
6432 pPCHSGeometry, pLCHSGeometry, pUuid,
6433 pIfProgress, uPercentStart, uPercentSpan);
6434 if (RT_SUCCESS(rc))
6435 {
6436 /* So far the image is opened in read/write mode. Make sure the
6437 * image is opened in read-only mode if the caller requested that. */
6438 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
6439 {
6440 vmdkFreeImage(pImage, false, true /*fFlush*/);
6441 rc = vmdkOpenImage(pImage, uOpenFlags);
6442 }
6443 if (RT_SUCCESS(rc))
6444 *ppBackendData = pImage;
6445 }
6446 if (RT_FAILURE(rc))
6447 RTMemFree(pImage->pDescData);
6448 }
6449 else
6450 rc = VERR_NO_MEMORY;
6451 if (RT_FAILURE(rc))
6452 RTMemFree(pImage);
6453 }
6454 else
6455 rc = VERR_NO_MEMORY;
6456 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6457 return rc;
6458}
6459/**
6460 * Prepares the state for renaming a VMDK image, setting up the state and allocating
6461 * memory.
6462 *
6463 * @returns VBox status code.
6464 * @param pImage VMDK image instance.
6465 * @param pRenameState The state to initialize.
6466 * @param pszFilename The new filename.
6467 */
6468static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6469{
6470 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
6471 int rc = VINF_SUCCESS;
6472 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
6473 /*
6474 * Allocate an array to store both old and new names of renamed files
6475 * in case we have to roll back the changes. Arrays are initialized
6476 * with zeros. We actually save stuff when and if we change it.
6477 */
6478 pRenameState->cExtents = pImage->cExtents;
6479 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6480 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6481 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
6482 if ( pRenameState->apszOldName
6483 && pRenameState->apszNewName
6484 && pRenameState->apszNewLines)
6485 {
6486 /* Save the descriptor size and position. */
6487 if (pImage->pDescData)
6488 {
6489 /* Separate descriptor file. */
6490 pRenameState->fEmbeddedDesc = false;
6491 }
6492 else
6493 {
6494 /* Embedded descriptor file. */
6495 pRenameState->ExtentCopy = pImage->pExtents[0];
6496 pRenameState->fEmbeddedDesc = true;
6497 }
6498 /* Save the descriptor content. */
6499 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
6500 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6501 {
6502 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
6503 if (!pRenameState->DescriptorCopy.aLines[i])
6504 {
6505 rc = VERR_NO_MEMORY;
6506 break;
6507 }
6508 }
6509 if (RT_SUCCESS(rc))
6510 {
6511 /* Prepare both old and new base names used for string replacement. */
6512 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
6513 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
6514 RTPathStripSuffix(pRenameState->pszNewBaseName);
6515 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
6516 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
6517 RTPathStripSuffix(pRenameState->pszOldBaseName);
6518 /* Prepare both old and new full names used for string replacement.
6519 Note! Must abspath the stuff here, so the strstr weirdness later in
6520 the renaming process get a match against abspath'ed extent paths.
6521 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
6522 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
6523 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
6524 RTPathStripSuffix(pRenameState->pszNewFullName);
6525 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
6526 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
6527 RTPathStripSuffix(pRenameState->pszOldFullName);
6528 /* Save the old name for easy access to the old descriptor file. */
6529 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
6530 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
6531 /* Save old image name. */
6532 pRenameState->pszOldImageName = pImage->pszFilename;
6533 }
6534 }
6535 else
6536 rc = VERR_NO_TMP_MEMORY;
6537 return rc;
6538}
6539/**
6540 * Destroys the given rename state, freeing all allocated memory.
6541 *
6542 * @returns nothing.
6543 * @param pRenameState The rename state to destroy.
6544 */
6545static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
6546{
6547 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6548 if (pRenameState->DescriptorCopy.aLines[i])
6549 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
6550 if (pRenameState->apszOldName)
6551 {
6552 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6553 if (pRenameState->apszOldName[i])
6554 RTStrFree(pRenameState->apszOldName[i]);
6555 RTMemTmpFree(pRenameState->apszOldName);
6556 }
6557 if (pRenameState->apszNewName)
6558 {
6559 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6560 if (pRenameState->apszNewName[i])
6561 RTStrFree(pRenameState->apszNewName[i]);
6562 RTMemTmpFree(pRenameState->apszNewName);
6563 }
6564 if (pRenameState->apszNewLines)
6565 {
6566 for (unsigned i = 0; i < pRenameState->cExtents; i++)
6567 if (pRenameState->apszNewLines[i])
6568 RTStrFree(pRenameState->apszNewLines[i]);
6569 RTMemTmpFree(pRenameState->apszNewLines);
6570 }
6571 if (pRenameState->pszOldDescName)
6572 RTStrFree(pRenameState->pszOldDescName);
6573 if (pRenameState->pszOldBaseName)
6574 RTStrFree(pRenameState->pszOldBaseName);
6575 if (pRenameState->pszNewBaseName)
6576 RTStrFree(pRenameState->pszNewBaseName);
6577 if (pRenameState->pszOldFullName)
6578 RTStrFree(pRenameState->pszOldFullName);
6579 if (pRenameState->pszNewFullName)
6580 RTStrFree(pRenameState->pszNewFullName);
6581}
6582/**
6583 * Rolls back the rename operation to the original state.
6584 *
6585 * @returns VBox status code.
6586 * @param pImage VMDK image instance.
6587 * @param pRenameState The rename state.
6588 */
6589static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
6590{
6591 int rc = VINF_SUCCESS;
6592 if (!pRenameState->fImageFreed)
6593 {
6594 /*
6595 * Some extents may have been closed, close the rest. We will
6596 * re-open the whole thing later.
6597 */
6598 vmdkFreeImage(pImage, false, true /*fFlush*/);
6599 }
6600 /* Rename files back. */
6601 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6602 {
6603 if (pRenameState->apszOldName[i])
6604 {
6605 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
6606 AssertRC(rc);
6607 }
6608 }
6609 /* Restore the old descriptor. */
6610 PVMDKFILE pFile;
6611 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
6612 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
6613 false /* fCreate */));
6614 AssertRC(rc);
6615 if (pRenameState->fEmbeddedDesc)
6616 {
6617 pRenameState->ExtentCopy.pFile = pFile;
6618 pImage->pExtents = &pRenameState->ExtentCopy;
6619 }
6620 else
6621 {
6622 /* Shouldn't be null for separate descriptor.
6623 * There will be no access to the actual content.
6624 */
6625 pImage->pDescData = pRenameState->pszOldDescName;
6626 pImage->pFile = pFile;
6627 }
6628 pImage->Descriptor = pRenameState->DescriptorCopy;
6629 vmdkWriteDescriptor(pImage, NULL);
6630 vmdkFileClose(pImage, &pFile, false);
6631 /* Get rid of the stuff we implanted. */
6632 pImage->pExtents = NULL;
6633 pImage->pFile = NULL;
6634 pImage->pDescData = NULL;
6635 /* Re-open the image back. */
6636 pImage->pszFilename = pRenameState->pszOldImageName;
6637 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6638 return rc;
6639}
6640/**
6641 * Rename worker doing the real work.
6642 *
6643 * @returns VBox status code.
6644 * @param pImage VMDK image instance.
6645 * @param pRenameState The rename state.
6646 * @param pszFilename The new filename.
6647 */
6648static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6649{
6650 int rc = VINF_SUCCESS;
6651 unsigned i, line;
6652 /* Update the descriptor with modified extent names. */
6653 for (i = 0, line = pImage->Descriptor.uFirstExtent;
6654 i < pRenameState->cExtents;
6655 i++, line = pImage->Descriptor.aNextLines[line])
6656 {
6657 /* Update the descriptor. */
6658 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
6659 pRenameState->pszOldBaseName,
6660 pRenameState->pszNewBaseName);
6661 if (!pRenameState->apszNewLines[i])
6662 {
6663 rc = VERR_NO_MEMORY;
6664 break;
6665 }
6666 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
6667 }
6668 if (RT_SUCCESS(rc))
6669 {
6670 /* Make sure the descriptor gets written back. */
6671 pImage->Descriptor.fDirty = true;
6672 /* Flush the descriptor now, in case it is embedded. */
6673 vmdkFlushImage(pImage, NULL);
6674 /* Close and rename/move extents. */
6675 for (i = 0; i < pRenameState->cExtents; i++)
6676 {
6677 PVMDKEXTENT pExtent = &pImage->pExtents[i];
6678 /* Compose new name for the extent. */
6679 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
6680 pRenameState->pszOldFullName,
6681 pRenameState->pszNewFullName);
6682 if (!pRenameState->apszNewName[i])
6683 {
6684 rc = VERR_NO_MEMORY;
6685 break;
6686 }
6687 /* Close the extent file. */
6688 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
6689 if (RT_FAILURE(rc))
6690 break;;
6691 /* Rename the extent file. */
6692 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
6693 if (RT_FAILURE(rc))
6694 break;
6695 /* Remember the old name. */
6696 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
6697 }
6698 if (RT_SUCCESS(rc))
6699 {
6700 /* Release all old stuff. */
6701 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
6702 if (RT_SUCCESS(rc))
6703 {
6704 pRenameState->fImageFreed = true;
6705 /* Last elements of new/old name arrays are intended for
6706 * storing descriptor's names.
6707 */
6708 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
6709 /* Rename the descriptor file if it's separate. */
6710 if (!pRenameState->fEmbeddedDesc)
6711 {
6712 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
6713 if (RT_SUCCESS(rc))
6714 {
6715 /* Save old name only if we may need to change it back. */
6716 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
6717 }
6718 }
6719 /* Update pImage with the new information. */
6720 pImage->pszFilename = pszFilename;
6721 /* Open the new image. */
6722 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6723 }
6724 }
6725 }
6726 return rc;
6727}
6728/** @copydoc VDIMAGEBACKEND::pfnRename */
6729static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
6730{
6731 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
6732 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6733 VMDKRENAMESTATE RenameState;
6734 memset(&RenameState, 0, sizeof(RenameState));
6735 /* Check arguments. */
6736 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
6737 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6738 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6739 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER);
6740 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
6741 if (RT_SUCCESS(rc))
6742 {
6743 /* --- Up to this point we have not done any damage yet. --- */
6744 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
6745 /* Roll back all changes in case of failure. */
6746 if (RT_FAILURE(rc))
6747 {
6748 int rrc = vmdkRenameRollback(pImage, &RenameState);
6749 AssertRC(rrc);
6750 }
6751 }
6752 vmdkRenameStateDestroy(&RenameState);
6753 LogFlowFunc(("returns %Rrc\n", rc));
6754 return rc;
6755}
6756/** @copydoc VDIMAGEBACKEND::pfnClose */
6757static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
6758{
6759 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
6760 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6761 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
6762 RTMemFree(pImage);
6763 LogFlowFunc(("returns %Rrc\n", rc));
6764 return rc;
6765}
6766/** @copydoc VDIMAGEBACKEND::pfnRead */
6767static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
6768 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6769{
6770 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6771 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
6772 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6773 AssertPtr(pImage);
6774 Assert(uOffset % 512 == 0);
6775 Assert(cbToRead % 512 == 0);
6776 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
6777 AssertReturn(cbToRead, VERR_INVALID_PARAMETER);
6778 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
6779 /* Find the extent and check access permissions as defined in the extent descriptor. */
6780 PVMDKEXTENT pExtent;
6781 uint64_t uSectorExtentRel;
6782 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6783 &pExtent, &uSectorExtentRel);
6784 if ( RT_SUCCESS(rc)
6785 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
6786 {
6787 /* Clip read range to remain in this extent. */
6788 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6789 /* Handle the read according to the current extent type. */
6790 switch (pExtent->enmType)
6791 {
6792 case VMDKETYPE_HOSTED_SPARSE:
6793 {
6794 uint64_t uSectorExtentAbs;
6795 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
6796 if (RT_FAILURE(rc))
6797 break;
6798 /* Clip read range to at most the rest of the grain. */
6799 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6800 Assert(!(cbToRead % 512));
6801 if (uSectorExtentAbs == 0)
6802 {
6803 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6804 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6805 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6806 rc = VERR_VD_BLOCK_FREE;
6807 else
6808 rc = vmdkStreamReadSequential(pImage, pExtent,
6809 uSectorExtentRel,
6810 pIoCtx, cbToRead);
6811 }
6812 else
6813 {
6814 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6815 {
6816 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6817 ("Async I/O is not supported for stream optimized VMDK's\n"));
6818 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
6819 uSectorExtentAbs -= uSectorInGrain;
6820 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
6821 {
6822 uint64_t uLBA = 0; /* gcc maybe uninitialized */
6823 rc = vmdkFileInflateSync(pImage, pExtent,
6824 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6825 pExtent->pvGrain,
6826 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6827 NULL, &uLBA, NULL);
6828 if (RT_FAILURE(rc))
6829 {
6830 pExtent->uGrainSectorAbs = 0;
6831 break;
6832 }
6833 pExtent->uGrainSectorAbs = uSectorExtentAbs;
6834 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
6835 Assert(uLBA == uSectorExtentRel);
6836 }
6837 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6838 (uint8_t *)pExtent->pvGrain
6839 + VMDK_SECTOR2BYTE(uSectorInGrain),
6840 cbToRead);
6841 }
6842 else
6843 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
6844 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6845 pIoCtx, cbToRead);
6846 }
6847 break;
6848 }
6849 case VMDKETYPE_VMFS:
6850 case VMDKETYPE_FLAT:
6851 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
6852 VMDK_SECTOR2BYTE(uSectorExtentRel),
6853 pIoCtx, cbToRead);
6854 break;
6855 case VMDKETYPE_ZERO:
6856 {
6857 size_t cbSet;
6858 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
6859 Assert(cbSet == cbToRead);
6860 break;
6861 }
6862 }
6863 if (pcbActuallyRead)
6864 *pcbActuallyRead = cbToRead;
6865 }
6866 else if (RT_SUCCESS(rc))
6867 rc = VERR_VD_VMDK_INVALID_STATE;
6868 LogFlowFunc(("returns %Rrc\n", rc));
6869 return rc;
6870}
6871/** @copydoc VDIMAGEBACKEND::pfnWrite */
6872static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
6873 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
6874 size_t *pcbPostRead, unsigned fWrite)
6875{
6876 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
6877 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6878 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6879 int rc;
6880 AssertPtr(pImage);
6881 Assert(uOffset % 512 == 0);
6882 Assert(cbToWrite % 512 == 0);
6883 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
6884 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER);
6885 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6886 {
6887 PVMDKEXTENT pExtent;
6888 uint64_t uSectorExtentRel;
6889 uint64_t uSectorExtentAbs;
6890 /* No size check here, will do that later when the extent is located.
6891 * There are sparse images out there which according to the spec are
6892 * invalid, because the total size is not a multiple of the grain size.
6893 * Also for sparse images which are stitched together in odd ways (not at
6894 * grain boundaries, and with the nominal size not being a multiple of the
6895 * grain size), this would prevent writing to the last grain. */
6896 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6897 &pExtent, &uSectorExtentRel);
6898 if (RT_SUCCESS(rc))
6899 {
6900 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
6901 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6902 && !pImage->pExtents[0].uAppendPosition
6903 && pExtent->enmAccess != VMDKACCESS_READONLY))
6904 rc = VERR_VD_VMDK_INVALID_STATE;
6905 else
6906 {
6907 /* Handle the write according to the current extent type. */
6908 switch (pExtent->enmType)
6909 {
6910 case VMDKETYPE_HOSTED_SPARSE:
6911 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
6912 if (RT_SUCCESS(rc))
6913 {
6914 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6915 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
6916 rc = VERR_VD_VMDK_INVALID_WRITE;
6917 else
6918 {
6919 /* Clip write range to at most the rest of the grain. */
6920 cbToWrite = RT_MIN(cbToWrite,
6921 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
6922 - uSectorExtentRel % pExtent->cSectorsPerGrain));
6923 if (uSectorExtentAbs == 0)
6924 {
6925 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6926 {
6927 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6928 {
6929 /* Full block write to a previously unallocated block.
6930 * Check if the caller wants to avoid the automatic alloc. */
6931 if (!(fWrite & VD_WRITE_NO_ALLOC))
6932 {
6933 /* Allocate GT and find out where to store the grain. */
6934 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
6935 uSectorExtentRel, cbToWrite);
6936 }
6937 else
6938 rc = VERR_VD_BLOCK_FREE;
6939 *pcbPreRead = 0;
6940 *pcbPostRead = 0;
6941 }
6942 else
6943 {
6944 /* Clip write range to remain in this extent. */
6945 cbToWrite = RT_MIN(cbToWrite,
6946 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
6947 + pExtent->cNominalSectors - uSectorExtentRel));
6948 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
6949 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
6950 rc = VERR_VD_BLOCK_FREE;
6951 }
6952 }
6953 else
6954 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
6955 pIoCtx, cbToWrite);
6956 }
6957 else
6958 {
6959 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6960 {
6961 /* A partial write to a streamOptimized image is simply
6962 * invalid. It requires rewriting already compressed data
6963 * which is somewhere between expensive and impossible. */
6964 rc = VERR_VD_VMDK_INVALID_STATE;
6965 pExtent->uGrainSectorAbs = 0;
6966 AssertRC(rc);
6967 }
6968 else
6969 {
6970 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
6971 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6972 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6973 pIoCtx, cbToWrite, NULL, NULL);
6974 }
6975 }
6976 }
6977 }
6978 break;
6979 case VMDKETYPE_VMFS:
6980 case VMDKETYPE_FLAT:
6981 /* Clip write range to remain in this extent. */
6982 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6983 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6984 VMDK_SECTOR2BYTE(uSectorExtentRel),
6985 pIoCtx, cbToWrite, NULL, NULL);
6986 break;
6987 case VMDKETYPE_ZERO:
6988 /* Clip write range to remain in this extent. */
6989 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6990 break;
6991 }
6992 }
6993 if (pcbWriteProcess)
6994 *pcbWriteProcess = cbToWrite;
6995 }
6996 }
6997 else
6998 rc = VERR_VD_IMAGE_READ_ONLY;
6999 LogFlowFunc(("returns %Rrc\n", rc));
7000 return rc;
7001}
7002/** @copydoc VDIMAGEBACKEND::pfnFlush */
7003static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
7004{
7005 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7006 return vmdkFlushImage(pImage, pIoCtx);
7007}
7008/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
7009static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
7010{
7011 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7012 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7013 AssertPtrReturn(pImage, 0);
7014 return VMDK_IMAGE_VERSION;
7015}
7016/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
7017static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
7018{
7019 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7020 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7021 uint64_t cb = 0;
7022 AssertPtrReturn(pImage, 0);
7023 if (pImage->pFile != NULL)
7024 {
7025 uint64_t cbFile;
7026 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
7027 if (RT_SUCCESS(rc))
7028 cb += cbFile;
7029 }
7030 for (unsigned i = 0; i < pImage->cExtents; i++)
7031 {
7032 if (pImage->pExtents[i].pFile != NULL)
7033 {
7034 uint64_t cbFile;
7035 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
7036 if (RT_SUCCESS(rc))
7037 cb += cbFile;
7038 }
7039 }
7040 LogFlowFunc(("returns %lld\n", cb));
7041 return cb;
7042}
7043/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
7044static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7045{
7046 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7047 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7048 int rc = VINF_SUCCESS;
7049 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7050 if (pImage->PCHSGeometry.cCylinders)
7051 *pPCHSGeometry = pImage->PCHSGeometry;
7052 else
7053 rc = VERR_VD_GEOMETRY_NOT_SET;
7054 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7055 return rc;
7056}
7057/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
7058static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7059{
7060 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
7061 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7062 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7063 int rc = VINF_SUCCESS;
7064 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7065 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7066 {
7067 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7068 {
7069 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7070 if (RT_SUCCESS(rc))
7071 pImage->PCHSGeometry = *pPCHSGeometry;
7072 }
7073 else
7074 rc = VERR_NOT_SUPPORTED;
7075 }
7076 else
7077 rc = VERR_VD_IMAGE_READ_ONLY;
7078 LogFlowFunc(("returns %Rrc\n", rc));
7079 return rc;
7080}
7081/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
7082static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7083{
7084 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7085 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7086 int rc = VINF_SUCCESS;
7087 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7088 if (pImage->LCHSGeometry.cCylinders)
7089 *pLCHSGeometry = pImage->LCHSGeometry;
7090 else
7091 rc = VERR_VD_GEOMETRY_NOT_SET;
7092 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7093 return rc;
7094}
7095/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7096static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7097{
7098 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7099 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7100 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7101 int rc = VINF_SUCCESS;
7102 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7103 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7104 {
7105 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7106 {
7107 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7108 if (RT_SUCCESS(rc))
7109 pImage->LCHSGeometry = *pLCHSGeometry;
7110 }
7111 else
7112 rc = VERR_NOT_SUPPORTED;
7113 }
7114 else
7115 rc = VERR_VD_IMAGE_READ_ONLY;
7116 LogFlowFunc(("returns %Rrc\n", rc));
7117 return rc;
7118}
7119/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
7120static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
7121{
7122 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
7123 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7124 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
7125 *ppRegionList = &pThis->RegionList;
7126 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
7127 return VINF_SUCCESS;
7128}
7129/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
7130static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
7131{
7132 RT_NOREF1(pRegionList);
7133 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
7134 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7135 AssertPtr(pThis); RT_NOREF(pThis);
7136 /* Nothing to do here. */
7137}
7138/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
7139static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
7140{
7141 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7142 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7143 AssertPtrReturn(pImage, 0);
7144 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
7145 return pImage->uImageFlags;
7146}
7147/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
7148static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
7149{
7150 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7151 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7152 AssertPtrReturn(pImage, 0);
7153 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
7154 return pImage->uOpenFlags;
7155}
7156/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
7157static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
7158{
7159 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
7160 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7161 int rc;
7162 /* Image must be opened and the new flags must be valid. */
7163 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
7164 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
7165 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
7166 rc = VERR_INVALID_PARAMETER;
7167 else
7168 {
7169 /* StreamOptimized images need special treatment: reopen is prohibited. */
7170 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7171 {
7172 if (pImage->uOpenFlags == uOpenFlags)
7173 rc = VINF_SUCCESS;
7174 else
7175 rc = VERR_INVALID_PARAMETER;
7176 }
7177 else
7178 {
7179 /* Implement this operation via reopening the image. */
7180 vmdkFreeImage(pImage, false, true /*fFlush*/);
7181 rc = vmdkOpenImage(pImage, uOpenFlags);
7182 }
7183 }
7184 LogFlowFunc(("returns %Rrc\n", rc));
7185 return rc;
7186}
7187/** @copydoc VDIMAGEBACKEND::pfnGetComment */
7188static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
7189{
7190 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
7191 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7192 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7193 char *pszCommentEncoded = NULL;
7194 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
7195 "ddb.comment", &pszCommentEncoded);
7196 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
7197 {
7198 pszCommentEncoded = NULL;
7199 rc = VINF_SUCCESS;
7200 }
7201 if (RT_SUCCESS(rc))
7202 {
7203 if (pszComment && pszCommentEncoded)
7204 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
7205 else if (pszComment)
7206 *pszComment = '\0';
7207 if (pszCommentEncoded)
7208 RTMemTmpFree(pszCommentEncoded);
7209 }
7210 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
7211 return rc;
7212}
7213/** @copydoc VDIMAGEBACKEND::pfnSetComment */
7214static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
7215{
7216 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
7217 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7218 int rc;
7219 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7220 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7221 {
7222 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7223 rc = vmdkSetImageComment(pImage, pszComment);
7224 else
7225 rc = VERR_NOT_SUPPORTED;
7226 }
7227 else
7228 rc = VERR_VD_IMAGE_READ_ONLY;
7229 LogFlowFunc(("returns %Rrc\n", rc));
7230 return rc;
7231}
7232/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
7233static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
7234{
7235 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7236 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7237 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7238 *pUuid = pImage->ImageUuid;
7239 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7240 return VINF_SUCCESS;
7241}
7242/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
7243static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
7244{
7245 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7246 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7247 int rc = VINF_SUCCESS;
7248 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7249 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7250 {
7251 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7252 {
7253 pImage->ImageUuid = *pUuid;
7254 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7255 VMDK_DDB_IMAGE_UUID, pUuid);
7256 if (RT_FAILURE(rc))
7257 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7258 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
7259 }
7260 else
7261 rc = VERR_NOT_SUPPORTED;
7262 }
7263 else
7264 rc = VERR_VD_IMAGE_READ_ONLY;
7265 LogFlowFunc(("returns %Rrc\n", rc));
7266 return rc;
7267}
7268/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
7269static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
7270{
7271 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7272 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7273 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7274 *pUuid = pImage->ModificationUuid;
7275 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7276 return VINF_SUCCESS;
7277}
7278/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
7279static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
7280{
7281 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7282 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7283 int rc = VINF_SUCCESS;
7284 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7285 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7286 {
7287 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7288 {
7289 /* Only touch the modification uuid if it changed. */
7290 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
7291 {
7292 pImage->ModificationUuid = *pUuid;
7293 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7294 VMDK_DDB_MODIFICATION_UUID, pUuid);
7295 if (RT_FAILURE(rc))
7296 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
7297 }
7298 }
7299 else
7300 rc = VERR_NOT_SUPPORTED;
7301 }
7302 else
7303 rc = VERR_VD_IMAGE_READ_ONLY;
7304 LogFlowFunc(("returns %Rrc\n", rc));
7305 return rc;
7306}
7307/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
7308static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
7309{
7310 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7311 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7312 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7313 *pUuid = pImage->ParentUuid;
7314 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7315 return VINF_SUCCESS;
7316}
7317/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
7318static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
7319{
7320 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7321 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7322 int rc = VINF_SUCCESS;
7323 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7324 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7325 {
7326 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7327 {
7328 pImage->ParentUuid = *pUuid;
7329 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7330 VMDK_DDB_PARENT_UUID, pUuid);
7331 if (RT_FAILURE(rc))
7332 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7333 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7334 }
7335 else
7336 rc = VERR_NOT_SUPPORTED;
7337 }
7338 else
7339 rc = VERR_VD_IMAGE_READ_ONLY;
7340 LogFlowFunc(("returns %Rrc\n", rc));
7341 return rc;
7342}
7343/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
7344static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
7345{
7346 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7347 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7348 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7349 *pUuid = pImage->ParentModificationUuid;
7350 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7351 return VINF_SUCCESS;
7352}
7353/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
7354static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
7355{
7356 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7357 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7358 int rc = VINF_SUCCESS;
7359 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7360 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7361 {
7362 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7363 {
7364 pImage->ParentModificationUuid = *pUuid;
7365 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7366 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
7367 if (RT_FAILURE(rc))
7368 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7369 }
7370 else
7371 rc = VERR_NOT_SUPPORTED;
7372 }
7373 else
7374 rc = VERR_VD_IMAGE_READ_ONLY;
7375 LogFlowFunc(("returns %Rrc\n", rc));
7376 return rc;
7377}
7378/** @copydoc VDIMAGEBACKEND::pfnDump */
7379static DECLCALLBACK(void) vmdkDump(void *pBackendData)
7380{
7381 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7382 AssertPtrReturnVoid(pImage);
7383 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
7384 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
7385 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
7386 VMDK_BYTE2SECTOR(pImage->cbSize));
7387 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
7388 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
7389 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
7390 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
7391}
7392const VDIMAGEBACKEND g_VmdkBackend =
7393{
7394 /* u32Version */
7395 VD_IMGBACKEND_VERSION,
7396 /* pszBackendName */
7397 "VMDK",
7398 /* uBackendCaps */
7399 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
7400 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
7401 | VD_CAP_VFS | VD_CAP_PREFERRED,
7402 /* paFileExtensions */
7403 s_aVmdkFileExtensions,
7404 /* paConfigInfo */
7405 s_aVmdkConfigInfo,
7406 /* pfnProbe */
7407 vmdkProbe,
7408 /* pfnOpen */
7409 vmdkOpen,
7410 /* pfnCreate */
7411 vmdkCreate,
7412 /* pfnRename */
7413 vmdkRename,
7414 /* pfnClose */
7415 vmdkClose,
7416 /* pfnRead */
7417 vmdkRead,
7418 /* pfnWrite */
7419 vmdkWrite,
7420 /* pfnFlush */
7421 vmdkFlush,
7422 /* pfnDiscard */
7423 NULL,
7424 /* pfnGetVersion */
7425 vmdkGetVersion,
7426 /* pfnGetFileSize */
7427 vmdkGetFileSize,
7428 /* pfnGetPCHSGeometry */
7429 vmdkGetPCHSGeometry,
7430 /* pfnSetPCHSGeometry */
7431 vmdkSetPCHSGeometry,
7432 /* pfnGetLCHSGeometry */
7433 vmdkGetLCHSGeometry,
7434 /* pfnSetLCHSGeometry */
7435 vmdkSetLCHSGeometry,
7436 /* pfnQueryRegions */
7437 vmdkQueryRegions,
7438 /* pfnRegionListRelease */
7439 vmdkRegionListRelease,
7440 /* pfnGetImageFlags */
7441 vmdkGetImageFlags,
7442 /* pfnGetOpenFlags */
7443 vmdkGetOpenFlags,
7444 /* pfnSetOpenFlags */
7445 vmdkSetOpenFlags,
7446 /* pfnGetComment */
7447 vmdkGetComment,
7448 /* pfnSetComment */
7449 vmdkSetComment,
7450 /* pfnGetUuid */
7451 vmdkGetUuid,
7452 /* pfnSetUuid */
7453 vmdkSetUuid,
7454 /* pfnGetModificationUuid */
7455 vmdkGetModificationUuid,
7456 /* pfnSetModificationUuid */
7457 vmdkSetModificationUuid,
7458 /* pfnGetParentUuid */
7459 vmdkGetParentUuid,
7460 /* pfnSetParentUuid */
7461 vmdkSetParentUuid,
7462 /* pfnGetParentModificationUuid */
7463 vmdkGetParentModificationUuid,
7464 /* pfnSetParentModificationUuid */
7465 vmdkSetParentModificationUuid,
7466 /* pfnDump */
7467 vmdkDump,
7468 /* pfnGetTimestamp */
7469 NULL,
7470 /* pfnGetParentTimestamp */
7471 NULL,
7472 /* pfnSetParentTimestamp */
7473 NULL,
7474 /* pfnGetParentFilename */
7475 NULL,
7476 /* pfnSetParentFilename */
7477 NULL,
7478 /* pfnComposeLocation */
7479 genericFileComposeLocation,
7480 /* pfnComposeName */
7481 genericFileComposeName,
7482 /* pfnCompact */
7483 NULL,
7484 /* pfnResize */
7485 NULL,
7486 /* pfnRepair */
7487 NULL,
7488 /* pfnTraverseMetadata */
7489 NULL,
7490 /* u32VersionEnd */
7491 VD_IMGBACKEND_VERSION
7492};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette