VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 87159

Last change on this file since 87159 was 87055, checked in by vboxsync, 4 years ago

Main: bugref:9224: Fixed scm errors

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 333.7 KB
Line 
1/* $Id: VMDK.cpp 87055 2020-12-08 14:20:23Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5/*
6 * Copyright (C) 2006-2020 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16
17
18/*********************************************************************************************************************************
19* Header Files *
20*********************************************************************************************************************************/
21#define LOG_GROUP LOG_GROUP_VD_VMDK
22#include <VBox/log.h> /* before VBox/vd-ifs.h */
23#include <VBox/vd-plugin.h>
24#include <VBox/err.h>
25#include <iprt/assert.h>
26#include <iprt/alloc.h>
27#include <iprt/base64.h>
28#include <iprt/ctype.h>
29#include <iprt/crc.h>
30#include <iprt/dvm.h>
31#include <iprt/uuid.h>
32#include <iprt/path.h>
33#include <iprt/rand.h>
34#include <iprt/string.h>
35#include <iprt/sort.h>
36#include <iprt/zip.h>
37#include <iprt/asm.h>
38#ifdef RT_OS_WINDOWS
39# include <iprt/utf16.h>
40# include <iprt/uni.h>
41# include <iprt/uni.h>
42# include <iprt/nt/nt-and-windows.h>
43# include <winioctl.h>
44#endif
45#ifdef RT_OS_LINUX
46# include <errno.h>
47# include <sys/stat.h>
48# include <iprt/dir.h>
49# include <iprt/symlink.h>
50# include <iprt/linux/sysfs.h>
51#endif
52#ifdef RT_OS_FREEBSD
53#include <libgeom.h>
54#include <sys/stat.h>
55#include <stdlib.h>
56#endif
57#ifdef RT_OS_SOLARIS
58#include <sys/dkio.h>
59#include <sys/vtoc.h>
60#include <sys/efi_partition.h>
61#include <unistd.h>
62#include <errno.h>
63#endif
64#include "VDBackends.h"
65
66
67/*********************************************************************************************************************************
68* Constants And Macros, Structures and Typedefs *
69*********************************************************************************************************************************/
70/** Maximum encoded string size (including NUL) we allow for VMDK images.
71 * Deliberately not set high to avoid running out of descriptor space. */
72#define VMDK_ENCODED_COMMENT_MAX 1024
73/** VMDK descriptor DDB entry for PCHS cylinders. */
74#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
75/** VMDK descriptor DDB entry for PCHS heads. */
76#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
77/** VMDK descriptor DDB entry for PCHS sectors. */
78#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
79/** VMDK descriptor DDB entry for LCHS cylinders. */
80#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
81/** VMDK descriptor DDB entry for LCHS heads. */
82#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
83/** VMDK descriptor DDB entry for LCHS sectors. */
84#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
85/** VMDK descriptor DDB entry for image UUID. */
86#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
87/** VMDK descriptor DDB entry for image modification UUID. */
88#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
89/** VMDK descriptor DDB entry for parent image UUID. */
90#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
91/** VMDK descriptor DDB entry for parent image modification UUID. */
92#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
93/** No compression for streamOptimized files. */
94#define VMDK_COMPRESSION_NONE 0
95/** Deflate compression for streamOptimized files. */
96#define VMDK_COMPRESSION_DEFLATE 1
97/** Marker that the actual GD value is stored in the footer. */
98#define VMDK_GD_AT_END 0xffffffffffffffffULL
99/** Marker for end-of-stream in streamOptimized images. */
100#define VMDK_MARKER_EOS 0
101/** Marker for grain table block in streamOptimized images. */
102#define VMDK_MARKER_GT 1
103/** Marker for grain directory block in streamOptimized images. */
104#define VMDK_MARKER_GD 2
105/** Marker for footer in streamOptimized images. */
106#define VMDK_MARKER_FOOTER 3
107/** Marker for unknown purpose in streamOptimized images.
108 * Shows up in very recent images created by vSphere, but only sporadically.
109 * They "forgot" to document that one in the VMDK specification. */
110#define VMDK_MARKER_UNSPECIFIED 4
111/** Dummy marker for "don't check the marker value". */
112#define VMDK_MARKER_IGNORE 0xffffffffU
113/**
114 * Magic number for hosted images created by VMware Workstation 4, VMware
115 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
116 */
117#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
118/**
119 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
120 * this header is also used for monolithic flat images.
121 */
122#pragma pack(1)
123typedef struct SparseExtentHeader
124{
125 uint32_t magicNumber;
126 uint32_t version;
127 uint32_t flags;
128 uint64_t capacity;
129 uint64_t grainSize;
130 uint64_t descriptorOffset;
131 uint64_t descriptorSize;
132 uint32_t numGTEsPerGT;
133 uint64_t rgdOffset;
134 uint64_t gdOffset;
135 uint64_t overHead;
136 bool uncleanShutdown;
137 char singleEndLineChar;
138 char nonEndLineChar;
139 char doubleEndLineChar1;
140 char doubleEndLineChar2;
141 uint16_t compressAlgorithm;
142 uint8_t pad[433];
143} SparseExtentHeader;
144#pragma pack()
145/** The maximum allowed descriptor size in the extent header in sectors. */
146#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
147/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
148 * divisible by the default grain size (64K) */
149#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
150/** VMDK streamOptimized file format marker. The type field may or may not
151 * be actually valid, but there's always data to read there. */
152#pragma pack(1)
153typedef struct VMDKMARKER
154{
155 uint64_t uSector;
156 uint32_t cbSize;
157 uint32_t uType;
158} VMDKMARKER, *PVMDKMARKER;
159#pragma pack()
160/** Convert sector number/size to byte offset/size. */
161#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
162/** Convert byte offset/size to sector number/size. */
163#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
164/**
165 * VMDK extent type.
166 */
167typedef enum VMDKETYPE
168{
169 /** Hosted sparse extent. */
170 VMDKETYPE_HOSTED_SPARSE = 1,
171 /** Flat extent. */
172 VMDKETYPE_FLAT,
173 /** Zero extent. */
174 VMDKETYPE_ZERO,
175 /** VMFS extent, used by ESX. */
176 VMDKETYPE_VMFS
177} VMDKETYPE, *PVMDKETYPE;
178/**
179 * VMDK access type for a extent.
180 */
181typedef enum VMDKACCESS
182{
183 /** No access allowed. */
184 VMDKACCESS_NOACCESS = 0,
185 /** Read-only access. */
186 VMDKACCESS_READONLY,
187 /** Read-write access. */
188 VMDKACCESS_READWRITE
189} VMDKACCESS, *PVMDKACCESS;
190/** Forward declaration for PVMDKIMAGE. */
191typedef struct VMDKIMAGE *PVMDKIMAGE;
192/**
193 * Extents files entry. Used for opening a particular file only once.
194 */
195typedef struct VMDKFILE
196{
197 /** Pointer to file path. Local copy. */
198 const char *pszFilename;
199 /** Pointer to base name. Local copy. */
200 const char *pszBasename;
201 /** File open flags for consistency checking. */
202 unsigned fOpen;
203 /** Handle for sync/async file abstraction.*/
204 PVDIOSTORAGE pStorage;
205 /** Reference counter. */
206 unsigned uReferences;
207 /** Flag whether the file should be deleted on last close. */
208 bool fDelete;
209 /** Pointer to the image we belong to (for debugging purposes). */
210 PVMDKIMAGE pImage;
211 /** Pointer to next file descriptor. */
212 struct VMDKFILE *pNext;
213 /** Pointer to the previous file descriptor. */
214 struct VMDKFILE *pPrev;
215} VMDKFILE, *PVMDKFILE;
216/**
217 * VMDK extent data structure.
218 */
219typedef struct VMDKEXTENT
220{
221 /** File handle. */
222 PVMDKFILE pFile;
223 /** Base name of the image extent. */
224 const char *pszBasename;
225 /** Full name of the image extent. */
226 const char *pszFullname;
227 /** Number of sectors in this extent. */
228 uint64_t cSectors;
229 /** Number of sectors per block (grain in VMDK speak). */
230 uint64_t cSectorsPerGrain;
231 /** Starting sector number of descriptor. */
232 uint64_t uDescriptorSector;
233 /** Size of descriptor in sectors. */
234 uint64_t cDescriptorSectors;
235 /** Starting sector number of grain directory. */
236 uint64_t uSectorGD;
237 /** Starting sector number of redundant grain directory. */
238 uint64_t uSectorRGD;
239 /** Total number of metadata sectors. */
240 uint64_t cOverheadSectors;
241 /** Nominal size (i.e. as described by the descriptor) of this extent. */
242 uint64_t cNominalSectors;
243 /** Sector offset (i.e. as described by the descriptor) of this extent. */
244 uint64_t uSectorOffset;
245 /** Number of entries in a grain table. */
246 uint32_t cGTEntries;
247 /** Number of sectors reachable via a grain directory entry. */
248 uint32_t cSectorsPerGDE;
249 /** Number of entries in the grain directory. */
250 uint32_t cGDEntries;
251 /** Pointer to the next free sector. Legacy information. Do not use. */
252 uint32_t uFreeSector;
253 /** Number of this extent in the list of images. */
254 uint32_t uExtent;
255 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
256 char *pDescData;
257 /** Pointer to the grain directory. */
258 uint32_t *pGD;
259 /** Pointer to the redundant grain directory. */
260 uint32_t *pRGD;
261 /** VMDK version of this extent. 1=1.0/1.1 */
262 uint32_t uVersion;
263 /** Type of this extent. */
264 VMDKETYPE enmType;
265 /** Access to this extent. */
266 VMDKACCESS enmAccess;
267 /** Flag whether this extent is marked as unclean. */
268 bool fUncleanShutdown;
269 /** Flag whether the metadata in the extent header needs to be updated. */
270 bool fMetaDirty;
271 /** Flag whether there is a footer in this extent. */
272 bool fFooter;
273 /** Compression type for this extent. */
274 uint16_t uCompression;
275 /** Append position for writing new grain. Only for sparse extents. */
276 uint64_t uAppendPosition;
277 /** Last grain which was accessed. Only for streamOptimized extents. */
278 uint32_t uLastGrainAccess;
279 /** Starting sector corresponding to the grain buffer. */
280 uint32_t uGrainSectorAbs;
281 /** Grain number corresponding to the grain buffer. */
282 uint32_t uGrain;
283 /** Actual size of the compressed data, only valid for reading. */
284 uint32_t cbGrainStreamRead;
285 /** Size of compressed grain buffer for streamOptimized extents. */
286 size_t cbCompGrain;
287 /** Compressed grain buffer for streamOptimized extents, with marker. */
288 void *pvCompGrain;
289 /** Decompressed grain buffer for streamOptimized extents. */
290 void *pvGrain;
291 /** Reference to the image in which this extent is used. Do not use this
292 * on a regular basis to avoid passing pImage references to functions
293 * explicitly. */
294 struct VMDKIMAGE *pImage;
295} VMDKEXTENT, *PVMDKEXTENT;
296/**
297 * Grain table cache size. Allocated per image.
298 */
299#define VMDK_GT_CACHE_SIZE 256
300/**
301 * Grain table block size. Smaller than an actual grain table block to allow
302 * more grain table blocks to be cached without having to allocate excessive
303 * amounts of memory for the cache.
304 */
305#define VMDK_GT_CACHELINE_SIZE 128
306/**
307 * Maximum number of lines in a descriptor file. Not worth the effort of
308 * making it variable. Descriptor files are generally very short (~20 lines),
309 * with the exception of sparse files split in 2G chunks, which need for the
310 * maximum size (almost 2T) exactly 1025 lines for the disk database.
311 */
312#define VMDK_DESCRIPTOR_LINES_MAX 1100U
313/**
314 * Parsed descriptor information. Allows easy access and update of the
315 * descriptor (whether separate file or not). Free form text files suck.
316 */
317typedef struct VMDKDESCRIPTOR
318{
319 /** Line number of first entry of the disk descriptor. */
320 unsigned uFirstDesc;
321 /** Line number of first entry in the extent description. */
322 unsigned uFirstExtent;
323 /** Line number of first disk database entry. */
324 unsigned uFirstDDB;
325 /** Total number of lines. */
326 unsigned cLines;
327 /** Total amount of memory available for the descriptor. */
328 size_t cbDescAlloc;
329 /** Set if descriptor has been changed and not yet written to disk. */
330 bool fDirty;
331 /** Array of pointers to the data in the descriptor. */
332 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
333 /** Array of line indices pointing to the next non-comment line. */
334 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
335} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
336/**
337 * Cache entry for translating extent/sector to a sector number in that
338 * extent.
339 */
340typedef struct VMDKGTCACHEENTRY
341{
342 /** Extent number for which this entry is valid. */
343 uint32_t uExtent;
344 /** GT data block number. */
345 uint64_t uGTBlock;
346 /** Data part of the cache entry. */
347 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
348} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
349/**
350 * Cache data structure for blocks of grain table entries. For now this is a
351 * fixed size direct mapping cache, but this should be adapted to the size of
352 * the sparse image and maybe converted to a set-associative cache. The
353 * implementation below implements a write-through cache with write allocate.
354 */
355typedef struct VMDKGTCACHE
356{
357 /** Cache entries. */
358 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
359 /** Number of cache entries (currently unused). */
360 unsigned cEntries;
361} VMDKGTCACHE, *PVMDKGTCACHE;
362/**
363 * Complete VMDK image data structure. Mainly a collection of extents and a few
364 * extra global data fields.
365 */
366typedef struct VMDKIMAGE
367{
368 /** Image name. */
369 const char *pszFilename;
370 /** Descriptor file if applicable. */
371 PVMDKFILE pFile;
372 /** Pointer to the per-disk VD interface list. */
373 PVDINTERFACE pVDIfsDisk;
374 /** Pointer to the per-image VD interface list. */
375 PVDINTERFACE pVDIfsImage;
376 /** Error interface. */
377 PVDINTERFACEERROR pIfError;
378 /** I/O interface. */
379 PVDINTERFACEIOINT pIfIo;
380 /** Pointer to the image extents. */
381 PVMDKEXTENT pExtents;
382 /** Number of image extents. */
383 unsigned cExtents;
384 /** Pointer to the files list, for opening a file referenced multiple
385 * times only once (happens mainly with raw partition access). */
386 PVMDKFILE pFiles;
387 /**
388 * Pointer to an array of segment entries for async I/O.
389 * This is an optimization because the task number to submit is not known
390 * and allocating/freeing an array in the read/write functions every time
391 * is too expensive.
392 */
393 PPDMDATASEG paSegments;
394 /** Entries available in the segments array. */
395 unsigned cSegments;
396 /** Open flags passed by VBoxHD layer. */
397 unsigned uOpenFlags;
398 /** Image flags defined during creation or determined during open. */
399 unsigned uImageFlags;
400 /** Total size of the image. */
401 uint64_t cbSize;
402 /** Physical geometry of this image. */
403 VDGEOMETRY PCHSGeometry;
404 /** Logical geometry of this image. */
405 VDGEOMETRY LCHSGeometry;
406 /** Image UUID. */
407 RTUUID ImageUuid;
408 /** Image modification UUID. */
409 RTUUID ModificationUuid;
410 /** Parent image UUID. */
411 RTUUID ParentUuid;
412 /** Parent image modification UUID. */
413 RTUUID ParentModificationUuid;
414 /** Pointer to grain table cache, if this image contains sparse extents. */
415 PVMDKGTCACHE pGTCache;
416 /** Pointer to the descriptor (NULL if no separate descriptor file). */
417 char *pDescData;
418 /** Allocation size of the descriptor file. */
419 size_t cbDescAlloc;
420 /** Parsed descriptor file content. */
421 VMDKDESCRIPTOR Descriptor;
422 /** The static region list. */
423 VDREGIONLIST RegionList;
424} VMDKIMAGE;
425/** State for the input/output callout of the inflate reader/deflate writer. */
426typedef struct VMDKCOMPRESSIO
427{
428 /* Image this operation relates to. */
429 PVMDKIMAGE pImage;
430 /* Current read position. */
431 ssize_t iOffset;
432 /* Size of the compressed grain buffer (available data). */
433 size_t cbCompGrain;
434 /* Pointer to the compressed grain buffer. */
435 void *pvCompGrain;
436} VMDKCOMPRESSIO;
437/** Tracks async grain allocation. */
438typedef struct VMDKGRAINALLOCASYNC
439{
440 /** Flag whether the allocation failed. */
441 bool fIoErr;
442 /** Current number of transfers pending.
443 * If reached 0 and there is an error the old state is restored. */
444 unsigned cIoXfersPending;
445 /** Sector number */
446 uint64_t uSector;
447 /** Flag whether the grain table needs to be updated. */
448 bool fGTUpdateNeeded;
449 /** Extent the allocation happens. */
450 PVMDKEXTENT pExtent;
451 /** Position of the new grain, required for the grain table update. */
452 uint64_t uGrainOffset;
453 /** Grain table sector. */
454 uint64_t uGTSector;
455 /** Backup grain table sector. */
456 uint64_t uRGTSector;
457} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
458/**
459 * State information for vmdkRename() and helpers.
460 */
461typedef struct VMDKRENAMESTATE
462{
463 /** Array of old filenames. */
464 char **apszOldName;
465 /** Array of new filenames. */
466 char **apszNewName;
467 /** Array of new lines in the extent descriptor. */
468 char **apszNewLines;
469 /** Name of the old descriptor file if not a sparse image. */
470 char *pszOldDescName;
471 /** Flag whether we called vmdkFreeImage(). */
472 bool fImageFreed;
473 /** Flag whther the descriptor is embedded in the image (sparse) or
474 * in a separate file. */
475 bool fEmbeddedDesc;
476 /** Number of extents in the image. */
477 unsigned cExtents;
478 /** New base filename. */
479 char *pszNewBaseName;
480 /** The old base filename. */
481 char *pszOldBaseName;
482 /** New full filename. */
483 char *pszNewFullName;
484 /** Old full filename. */
485 char *pszOldFullName;
486 /** The old image name. */
487 const char *pszOldImageName;
488 /** Copy of the original VMDK descriptor. */
489 VMDKDESCRIPTOR DescriptorCopy;
490 /** Copy of the extent state for sparse images. */
491 VMDKEXTENT ExtentCopy;
492} VMDKRENAMESTATE;
493/** Pointer to a VMDK rename state. */
494typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
495
496
497/*********************************************************************************************************************************
498* Static Variables *
499*********************************************************************************************************************************/
500/** NULL-terminated array of supported file extensions. */
501static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
502{
503 {"vmdk", VDTYPE_HDD},
504 {NULL, VDTYPE_INVALID}
505};
506/** NULL-terminated array of configuration option. */
507static const VDCONFIGINFO s_aVmdkConfigInfo[] =
508{
509 /* Options for VMDK raw disks */
510 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
511 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
512 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
513 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
514 /* End of options list */
515 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
516};
517
518
519/*********************************************************************************************************************************
520* Internal Functions *
521*********************************************************************************************************************************/
522static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
523static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
524 bool fDelete);
525static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
526static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
527static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
528static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
529static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
530 void *pvUser, int rcReq);
531/**
532 * Internal: open a file (using a file descriptor cache to ensure each file
533 * is only opened once - anything else can cause locking problems).
534 */
535static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
536 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
537{
538 int rc = VINF_SUCCESS;
539 PVMDKFILE pVmdkFile;
540 for (pVmdkFile = pImage->pFiles;
541 pVmdkFile != NULL;
542 pVmdkFile = pVmdkFile->pNext)
543 {
544 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
545 {
546 Assert(fOpen == pVmdkFile->fOpen);
547 pVmdkFile->uReferences++;
548 *ppVmdkFile = pVmdkFile;
549 return rc;
550 }
551 }
552 /* If we get here, there's no matching entry in the cache. */
553 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
554 if (!pVmdkFile)
555 {
556 *ppVmdkFile = NULL;
557 return VERR_NO_MEMORY;
558 }
559 pVmdkFile->pszFilename = RTStrDup(pszFilename);
560 if (!pVmdkFile->pszFilename)
561 {
562 RTMemFree(pVmdkFile);
563 *ppVmdkFile = NULL;
564 return VERR_NO_MEMORY;
565 }
566 if (pszBasename)
567 {
568 pVmdkFile->pszBasename = RTStrDup(pszBasename);
569 if (!pVmdkFile->pszBasename)
570 {
571 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
572 RTMemFree(pVmdkFile);
573 *ppVmdkFile = NULL;
574 return VERR_NO_MEMORY;
575 }
576 }
577 pVmdkFile->fOpen = fOpen;
578 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
579 &pVmdkFile->pStorage);
580 if (RT_SUCCESS(rc))
581 {
582 pVmdkFile->uReferences = 1;
583 pVmdkFile->pImage = pImage;
584 pVmdkFile->pNext = pImage->pFiles;
585 if (pImage->pFiles)
586 pImage->pFiles->pPrev = pVmdkFile;
587 pImage->pFiles = pVmdkFile;
588 *ppVmdkFile = pVmdkFile;
589 }
590 else
591 {
592 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
593 RTMemFree(pVmdkFile);
594 *ppVmdkFile = NULL;
595 }
596 return rc;
597}
598/**
599 * Internal: close a file, updating the file descriptor cache.
600 */
601static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
602{
603 int rc = VINF_SUCCESS;
604 PVMDKFILE pVmdkFile = *ppVmdkFile;
605 AssertPtr(pVmdkFile);
606 pVmdkFile->fDelete |= fDelete;
607 Assert(pVmdkFile->uReferences);
608 pVmdkFile->uReferences--;
609 if (pVmdkFile->uReferences == 0)
610 {
611 PVMDKFILE pPrev;
612 PVMDKFILE pNext;
613 /* Unchain the element from the list. */
614 pPrev = pVmdkFile->pPrev;
615 pNext = pVmdkFile->pNext;
616 if (pNext)
617 pNext->pPrev = pPrev;
618 if (pPrev)
619 pPrev->pNext = pNext;
620 else
621 pImage->pFiles = pNext;
622 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
623 bool fFileDel = pVmdkFile->fDelete;
624 if ( pVmdkFile->pszBasename
625 && fFileDel)
626 {
627 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
628 if ( RTPathHasPath(pVmdkFile->pszBasename)
629 || !pszSuffix
630 || ( strcmp(pszSuffix, ".vmdk")
631 && strcmp(pszSuffix, ".bin")
632 && strcmp(pszSuffix, ".img")))
633 fFileDel = false;
634 }
635 if (fFileDel)
636 {
637 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
638 if (RT_SUCCESS(rc))
639 rc = rc2;
640 }
641 else if (pVmdkFile->fDelete)
642 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
643 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
644 if (pVmdkFile->pszBasename)
645 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
646 RTMemFree(pVmdkFile);
647 }
648 *ppVmdkFile = NULL;
649 return rc;
650}
651/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
652#ifndef VMDK_USE_BLOCK_DECOMP_API
653static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
654{
655 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
656 size_t cbInjected = 0;
657 Assert(cbBuf);
658 if (pInflateState->iOffset < 0)
659 {
660 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
661 pvBuf = (uint8_t *)pvBuf + 1;
662 cbBuf--;
663 cbInjected = 1;
664 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
665 }
666 if (!cbBuf)
667 {
668 if (pcbBuf)
669 *pcbBuf = cbInjected;
670 return VINF_SUCCESS;
671 }
672 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
673 memcpy(pvBuf,
674 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
675 cbBuf);
676 pInflateState->iOffset += cbBuf;
677 Assert(pcbBuf);
678 *pcbBuf = cbBuf + cbInjected;
679 return VINF_SUCCESS;
680}
681#endif
682/**
683 * Internal: read from a file and inflate the compressed data,
684 * distinguishing between async and normal operation
685 */
686DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
687 uint64_t uOffset, void *pvBuf,
688 size_t cbToRead, const void *pcvMarker,
689 uint64_t *puLBA, uint32_t *pcbMarkerData)
690{
691 int rc;
692#ifndef VMDK_USE_BLOCK_DECOMP_API
693 PRTZIPDECOMP pZip = NULL;
694#endif
695 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
696 size_t cbCompSize, cbActuallyRead;
697 if (!pcvMarker)
698 {
699 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
700 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
701 if (RT_FAILURE(rc))
702 return rc;
703 }
704 else
705 {
706 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
707 /* pcvMarker endianness has already been partially transformed, fix it */
708 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
709 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
710 }
711 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
712 if (cbCompSize == 0)
713 {
714 AssertMsgFailed(("VMDK: corrupted marker\n"));
715 return VERR_VD_VMDK_INVALID_FORMAT;
716 }
717 /* Sanity check - the expansion ratio should be much less than 2. */
718 Assert(cbCompSize < 2 * cbToRead);
719 if (cbCompSize >= 2 * cbToRead)
720 return VERR_VD_VMDK_INVALID_FORMAT;
721 /* Compressed grain marker. Data follows immediately. */
722 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
723 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
724 (uint8_t *)pExtent->pvCompGrain
725 + RT_UOFFSETOF(VMDKMARKER, uType),
726 RT_ALIGN_Z( cbCompSize
727 + RT_UOFFSETOF(VMDKMARKER, uType),
728 512)
729 - RT_UOFFSETOF(VMDKMARKER, uType));
730 if (puLBA)
731 *puLBA = RT_LE2H_U64(pMarker->uSector);
732 if (pcbMarkerData)
733 *pcbMarkerData = RT_ALIGN( cbCompSize
734 + RT_UOFFSETOF(VMDKMARKER, uType),
735 512);
736#ifdef VMDK_USE_BLOCK_DECOMP_API
737 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
738 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
739 pvBuf, cbToRead, &cbActuallyRead);
740#else
741 VMDKCOMPRESSIO InflateState;
742 InflateState.pImage = pImage;
743 InflateState.iOffset = -1;
744 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
745 InflateState.pvCompGrain = pExtent->pvCompGrain;
746 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
747 if (RT_FAILURE(rc))
748 return rc;
749 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
750 RTZipDecompDestroy(pZip);
751#endif /* !VMDK_USE_BLOCK_DECOMP_API */
752 if (RT_FAILURE(rc))
753 {
754 if (rc == VERR_ZIP_CORRUPTED)
755 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
756 return rc;
757 }
758 if (cbActuallyRead != cbToRead)
759 rc = VERR_VD_VMDK_INVALID_FORMAT;
760 return rc;
761}
762static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
763{
764 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
765 Assert(cbBuf);
766 if (pDeflateState->iOffset < 0)
767 {
768 pvBuf = (const uint8_t *)pvBuf + 1;
769 cbBuf--;
770 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
771 }
772 if (!cbBuf)
773 return VINF_SUCCESS;
774 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
775 return VERR_BUFFER_OVERFLOW;
776 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
777 pvBuf, cbBuf);
778 pDeflateState->iOffset += cbBuf;
779 return VINF_SUCCESS;
780}
781/**
782 * Internal: deflate the uncompressed data and write to a file,
783 * distinguishing between async and normal operation
784 */
785DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
786 uint64_t uOffset, const void *pvBuf,
787 size_t cbToWrite, uint64_t uLBA,
788 uint32_t *pcbMarkerData)
789{
790 int rc;
791 PRTZIPCOMP pZip = NULL;
792 VMDKCOMPRESSIO DeflateState;
793 DeflateState.pImage = pImage;
794 DeflateState.iOffset = -1;
795 DeflateState.cbCompGrain = pExtent->cbCompGrain;
796 DeflateState.pvCompGrain = pExtent->pvCompGrain;
797 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
798 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
799 if (RT_FAILURE(rc))
800 return rc;
801 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
802 if (RT_SUCCESS(rc))
803 rc = RTZipCompFinish(pZip);
804 RTZipCompDestroy(pZip);
805 if (RT_SUCCESS(rc))
806 {
807 Assert( DeflateState.iOffset > 0
808 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
809 /* pad with zeroes to get to a full sector size */
810 uint32_t uSize = DeflateState.iOffset;
811 if (uSize % 512)
812 {
813 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
814 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
815 uSizeAlign - uSize);
816 uSize = uSizeAlign;
817 }
818 if (pcbMarkerData)
819 *pcbMarkerData = uSize;
820 /* Compressed grain marker. Data follows immediately. */
821 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
822 pMarker->uSector = RT_H2LE_U64(uLBA);
823 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
824 - RT_UOFFSETOF(VMDKMARKER, uType));
825 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
826 uOffset, pMarker, uSize);
827 if (RT_FAILURE(rc))
828 return rc;
829 }
830 return rc;
831}
832/**
833 * Internal: check if all files are closed, prevent leaking resources.
834 */
835static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
836{
837 int rc = VINF_SUCCESS, rc2;
838 PVMDKFILE pVmdkFile;
839 Assert(pImage->pFiles == NULL);
840 for (pVmdkFile = pImage->pFiles;
841 pVmdkFile != NULL;
842 pVmdkFile = pVmdkFile->pNext)
843 {
844 LogRel(("VMDK: leaking reference to file \"%s\"\n",
845 pVmdkFile->pszFilename));
846 pImage->pFiles = pVmdkFile->pNext;
847 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
848 if (RT_SUCCESS(rc))
849 rc = rc2;
850 }
851 return rc;
852}
853/**
854 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
855 * critical non-ASCII characters.
856 */
857static char *vmdkEncodeString(const char *psz)
858{
859 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
860 char *pszDst = szEnc;
861 AssertPtr(psz);
862 for (; *psz; psz = RTStrNextCp(psz))
863 {
864 char *pszDstPrev = pszDst;
865 RTUNICP Cp = RTStrGetCp(psz);
866 if (Cp == '\\')
867 {
868 pszDst = RTStrPutCp(pszDst, Cp);
869 pszDst = RTStrPutCp(pszDst, Cp);
870 }
871 else if (Cp == '\n')
872 {
873 pszDst = RTStrPutCp(pszDst, '\\');
874 pszDst = RTStrPutCp(pszDst, 'n');
875 }
876 else if (Cp == '\r')
877 {
878 pszDst = RTStrPutCp(pszDst, '\\');
879 pszDst = RTStrPutCp(pszDst, 'r');
880 }
881 else
882 pszDst = RTStrPutCp(pszDst, Cp);
883 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
884 {
885 pszDst = pszDstPrev;
886 break;
887 }
888 }
889 *pszDst = '\0';
890 return RTStrDup(szEnc);
891}
892/**
893 * Internal: decode a string and store it into the specified string.
894 */
895static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
896{
897 int rc = VINF_SUCCESS;
898 char szBuf[4];
899 if (!cb)
900 return VERR_BUFFER_OVERFLOW;
901 AssertPtr(psz);
902 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
903 {
904 char *pszDst = szBuf;
905 RTUNICP Cp = RTStrGetCp(pszEncoded);
906 if (Cp == '\\')
907 {
908 pszEncoded = RTStrNextCp(pszEncoded);
909 RTUNICP CpQ = RTStrGetCp(pszEncoded);
910 if (CpQ == 'n')
911 RTStrPutCp(pszDst, '\n');
912 else if (CpQ == 'r')
913 RTStrPutCp(pszDst, '\r');
914 else if (CpQ == '\0')
915 {
916 rc = VERR_VD_VMDK_INVALID_HEADER;
917 break;
918 }
919 else
920 RTStrPutCp(pszDst, CpQ);
921 }
922 else
923 pszDst = RTStrPutCp(pszDst, Cp);
924 /* Need to leave space for terminating NUL. */
925 if ((size_t)(pszDst - szBuf) + 1 >= cb)
926 {
927 rc = VERR_BUFFER_OVERFLOW;
928 break;
929 }
930 memcpy(psz, szBuf, pszDst - szBuf);
931 psz += pszDst - szBuf;
932 }
933 *psz = '\0';
934 return rc;
935}
936/**
937 * Internal: free all buffers associated with grain directories.
938 */
939static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
940{
941 if (pExtent->pGD)
942 {
943 RTMemFree(pExtent->pGD);
944 pExtent->pGD = NULL;
945 }
946 if (pExtent->pRGD)
947 {
948 RTMemFree(pExtent->pRGD);
949 pExtent->pRGD = NULL;
950 }
951}
952/**
953 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
954 * images.
955 */
956static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
957{
958 int rc = VINF_SUCCESS;
959 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
960 {
961 /* streamOptimized extents need a compressed grain buffer, which must
962 * be big enough to hold uncompressible data (which needs ~8 bytes
963 * more than the uncompressed data), the marker and padding. */
964 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
965 + 8 + sizeof(VMDKMARKER), 512);
966 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
967 if (RT_LIKELY(pExtent->pvCompGrain))
968 {
969 /* streamOptimized extents need a decompressed grain buffer. */
970 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
971 if (!pExtent->pvGrain)
972 rc = VERR_NO_MEMORY;
973 }
974 else
975 rc = VERR_NO_MEMORY;
976 }
977 if (RT_FAILURE(rc))
978 vmdkFreeStreamBuffers(pExtent);
979 return rc;
980}
981/**
982 * Internal: allocate all buffers associated with grain directories.
983 */
984static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
985{
986 RT_NOREF1(pImage);
987 int rc = VINF_SUCCESS;
988 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
989 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
990 if (RT_LIKELY(pExtent->pGD))
991 {
992 if (pExtent->uSectorRGD)
993 {
994 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
995 if (RT_UNLIKELY(!pExtent->pRGD))
996 rc = VERR_NO_MEMORY;
997 }
998 }
999 else
1000 rc = VERR_NO_MEMORY;
1001 if (RT_FAILURE(rc))
1002 vmdkFreeGrainDirectory(pExtent);
1003 return rc;
1004}
1005/**
1006 * Converts the grain directory from little to host endianess.
1007 *
1008 * @returns nothing.
1009 * @param pGD The grain directory.
1010 * @param cGDEntries Number of entries in the grain directory to convert.
1011 */
1012DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1013{
1014 uint32_t *pGDTmp = pGD;
1015 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1016 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1017}
1018/**
1019 * Read the grain directory and allocated grain tables verifying them against
1020 * their back up copies if available.
1021 *
1022 * @returns VBox status code.
1023 * @param pImage Image instance data.
1024 * @param pExtent The VMDK extent.
1025 */
1026static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1027{
1028 int rc = VINF_SUCCESS;
1029 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1030 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1031 && pExtent->uSectorGD != VMDK_GD_AT_END
1032 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1033 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1034 if (RT_SUCCESS(rc))
1035 {
1036 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1037 * but in reality they are not compressed. */
1038 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1039 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1040 pExtent->pGD, cbGD);
1041 if (RT_SUCCESS(rc))
1042 {
1043 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1044 if ( pExtent->uSectorRGD
1045 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1046 {
1047 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1048 * but in reality they are not compressed. */
1049 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1050 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1051 pExtent->pRGD, cbGD);
1052 if (RT_SUCCESS(rc))
1053 {
1054 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1055 /* Check grain table and redundant grain table for consistency. */
1056 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1057 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1058 size_t cbGTBuffersMax = _1M;
1059 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1060 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1061 if ( !pTmpGT1
1062 || !pTmpGT2)
1063 rc = VERR_NO_MEMORY;
1064 size_t i = 0;
1065 uint32_t *pGDTmp = pExtent->pGD;
1066 uint32_t *pRGDTmp = pExtent->pRGD;
1067 /* Loop through all entries. */
1068 while (i < pExtent->cGDEntries)
1069 {
1070 uint32_t uGTStart = *pGDTmp;
1071 uint32_t uRGTStart = *pRGDTmp;
1072 size_t cbGTRead = cbGT;
1073 /* If no grain table is allocated skip the entry. */
1074 if (*pGDTmp == 0 && *pRGDTmp == 0)
1075 {
1076 i++;
1077 continue;
1078 }
1079 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1080 {
1081 /* Just one grain directory entry refers to a not yet allocated
1082 * grain table or both grain directory copies refer to the same
1083 * grain table. Not allowed. */
1084 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1085 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1086 break;
1087 }
1088 i++;
1089 pGDTmp++;
1090 pRGDTmp++;
1091 /*
1092 * Read a few tables at once if adjacent to decrease the number
1093 * of I/O requests. Read at maximum 1MB at once.
1094 */
1095 while ( i < pExtent->cGDEntries
1096 && cbGTRead < cbGTBuffersMax)
1097 {
1098 /* If no grain table is allocated skip the entry. */
1099 if (*pGDTmp == 0 && *pRGDTmp == 0)
1100 {
1101 i++;
1102 continue;
1103 }
1104 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1105 {
1106 /* Just one grain directory entry refers to a not yet allocated
1107 * grain table or both grain directory copies refer to the same
1108 * grain table. Not allowed. */
1109 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1110 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1111 break;
1112 }
1113 /* Check that the start offsets are adjacent.*/
1114 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1115 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1116 break;
1117 i++;
1118 pGDTmp++;
1119 pRGDTmp++;
1120 cbGTRead += cbGT;
1121 }
1122 /* Increase buffers if required. */
1123 if ( RT_SUCCESS(rc)
1124 && cbGTBuffers < cbGTRead)
1125 {
1126 uint32_t *pTmp;
1127 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1128 if (pTmp)
1129 {
1130 pTmpGT1 = pTmp;
1131 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1132 if (pTmp)
1133 pTmpGT2 = pTmp;
1134 else
1135 rc = VERR_NO_MEMORY;
1136 }
1137 else
1138 rc = VERR_NO_MEMORY;
1139 if (rc == VERR_NO_MEMORY)
1140 {
1141 /* Reset to the old values. */
1142 rc = VINF_SUCCESS;
1143 i -= cbGTRead / cbGT;
1144 cbGTRead = cbGT;
1145 /* Don't try to increase the buffer again in the next run. */
1146 cbGTBuffersMax = cbGTBuffers;
1147 }
1148 }
1149 if (RT_SUCCESS(rc))
1150 {
1151 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1152 * but in reality they are not compressed. */
1153 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1154 VMDK_SECTOR2BYTE(uGTStart),
1155 pTmpGT1, cbGTRead);
1156 if (RT_FAILURE(rc))
1157 {
1158 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1159 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1160 break;
1161 }
1162 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1163 * but in reality they are not compressed. */
1164 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1165 VMDK_SECTOR2BYTE(uRGTStart),
1166 pTmpGT2, cbGTRead);
1167 if (RT_FAILURE(rc))
1168 {
1169 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1170 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1171 break;
1172 }
1173 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1174 {
1175 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1176 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1177 break;
1178 }
1179 }
1180 } /* while (i < pExtent->cGDEntries) */
1181 /** @todo figure out what to do for unclean VMDKs. */
1182 if (pTmpGT1)
1183 RTMemFree(pTmpGT1);
1184 if (pTmpGT2)
1185 RTMemFree(pTmpGT2);
1186 }
1187 else
1188 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1189 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1190 }
1191 }
1192 else
1193 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1194 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1195 }
1196 if (RT_FAILURE(rc))
1197 vmdkFreeGrainDirectory(pExtent);
1198 return rc;
1199}
1200/**
1201 * Creates a new grain directory for the given extent at the given start sector.
1202 *
1203 * @returns VBox status code.
1204 * @param pImage Image instance data.
1205 * @param pExtent The VMDK extent.
1206 * @param uStartSector Where the grain directory should be stored in the image.
1207 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1208 */
1209static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1210 uint64_t uStartSector, bool fPreAlloc)
1211{
1212 int rc = VINF_SUCCESS;
1213 unsigned i;
1214 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1215 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1216 size_t cbGTRounded;
1217 uint64_t cbOverhead;
1218 if (fPreAlloc)
1219 {
1220 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1221 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1222 }
1223 else
1224 {
1225 /* Use a dummy start sector for layout computation. */
1226 if (uStartSector == VMDK_GD_AT_END)
1227 uStartSector = 1;
1228 cbGTRounded = 0;
1229 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1230 }
1231 /* For streamOptimized extents there is only one grain directory,
1232 * and for all others take redundant grain directory into account. */
1233 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1234 {
1235 cbOverhead = RT_ALIGN_64(cbOverhead,
1236 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1237 }
1238 else
1239 {
1240 cbOverhead += cbGDRounded + cbGTRounded;
1241 cbOverhead = RT_ALIGN_64(cbOverhead,
1242 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1243 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1244 }
1245 if (RT_SUCCESS(rc))
1246 {
1247 pExtent->uAppendPosition = cbOverhead;
1248 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1249 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1250 {
1251 pExtent->uSectorRGD = 0;
1252 pExtent->uSectorGD = uStartSector;
1253 }
1254 else
1255 {
1256 pExtent->uSectorRGD = uStartSector;
1257 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1258 }
1259 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1260 if (RT_SUCCESS(rc))
1261 {
1262 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1263 if ( RT_SUCCESS(rc)
1264 && fPreAlloc)
1265 {
1266 uint32_t uGTSectorLE;
1267 uint64_t uOffsetSectors;
1268 if (pExtent->pRGD)
1269 {
1270 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1271 for (i = 0; i < pExtent->cGDEntries; i++)
1272 {
1273 pExtent->pRGD[i] = uOffsetSectors;
1274 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1275 /* Write the redundant grain directory entry to disk. */
1276 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1277 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1278 &uGTSectorLE, sizeof(uGTSectorLE));
1279 if (RT_FAILURE(rc))
1280 {
1281 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1282 break;
1283 }
1284 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1285 }
1286 }
1287 if (RT_SUCCESS(rc))
1288 {
1289 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1290 for (i = 0; i < pExtent->cGDEntries; i++)
1291 {
1292 pExtent->pGD[i] = uOffsetSectors;
1293 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1294 /* Write the grain directory entry to disk. */
1295 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1296 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1297 &uGTSectorLE, sizeof(uGTSectorLE));
1298 if (RT_FAILURE(rc))
1299 {
1300 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1301 break;
1302 }
1303 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1304 }
1305 }
1306 }
1307 }
1308 }
1309 if (RT_FAILURE(rc))
1310 vmdkFreeGrainDirectory(pExtent);
1311 return rc;
1312}
1313/**
1314 * Unquotes the given string returning the result in a separate buffer.
1315 *
1316 * @returns VBox status code.
1317 * @param pImage The VMDK image state.
1318 * @param pszStr The string to unquote.
1319 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1320 * free.
1321 * @param ppszNext Where to store the pointer to any character following
1322 * the quoted value, optional.
1323 */
1324static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1325 char **ppszUnquoted, char **ppszNext)
1326{
1327 const char *pszStart = pszStr;
1328 char *pszQ;
1329 char *pszUnquoted;
1330 /* Skip over whitespace. */
1331 while (*pszStr == ' ' || *pszStr == '\t')
1332 pszStr++;
1333 if (*pszStr != '"')
1334 {
1335 pszQ = (char *)pszStr;
1336 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1337 pszQ++;
1338 }
1339 else
1340 {
1341 pszStr++;
1342 pszQ = (char *)strchr(pszStr, '"');
1343 if (pszQ == NULL)
1344 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1345 pImage->pszFilename, pszStart);
1346 }
1347 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1348 if (!pszUnquoted)
1349 return VERR_NO_MEMORY;
1350 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1351 pszUnquoted[pszQ - pszStr] = '\0';
1352 *ppszUnquoted = pszUnquoted;
1353 if (ppszNext)
1354 *ppszNext = pszQ + 1;
1355 return VINF_SUCCESS;
1356}
1357static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1358 const char *pszLine)
1359{
1360 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1361 ssize_t cbDiff = strlen(pszLine) + 1;
1362 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1363 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1364 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1365 memcpy(pEnd, pszLine, cbDiff);
1366 pDescriptor->cLines++;
1367 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1368 pDescriptor->fDirty = true;
1369 return VINF_SUCCESS;
1370}
1371static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1372 const char *pszKey, const char **ppszValue)
1373{
1374 size_t cbKey = strlen(pszKey);
1375 const char *pszValue;
1376 while (uStart != 0)
1377 {
1378 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1379 {
1380 /* Key matches, check for a '=' (preceded by whitespace). */
1381 pszValue = pDescriptor->aLines[uStart] + cbKey;
1382 while (*pszValue == ' ' || *pszValue == '\t')
1383 pszValue++;
1384 if (*pszValue == '=')
1385 {
1386 *ppszValue = pszValue + 1;
1387 break;
1388 }
1389 }
1390 uStart = pDescriptor->aNextLines[uStart];
1391 }
1392 return !!uStart;
1393}
1394static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1395 unsigned uStart,
1396 const char *pszKey, const char *pszValue)
1397{
1398 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1399 size_t cbKey = strlen(pszKey);
1400 unsigned uLast = 0;
1401 while (uStart != 0)
1402 {
1403 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1404 {
1405 /* Key matches, check for a '=' (preceded by whitespace). */
1406 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1407 while (*pszTmp == ' ' || *pszTmp == '\t')
1408 pszTmp++;
1409 if (*pszTmp == '=')
1410 {
1411 pszTmp++;
1412 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1413 * bloat and potentially out of space error? */
1414 while (*pszTmp == ' ' || *pszTmp == '\t')
1415 pszTmp++;
1416 break;
1417 }
1418 }
1419 if (!pDescriptor->aNextLines[uStart])
1420 uLast = uStart;
1421 uStart = pDescriptor->aNextLines[uStart];
1422 }
1423 if (uStart)
1424 {
1425 if (pszValue)
1426 {
1427 /* Key already exists, replace existing value. */
1428 size_t cbOldVal = strlen(pszTmp);
1429 size_t cbNewVal = strlen(pszValue);
1430 ssize_t cbDiff = cbNewVal - cbOldVal;
1431 /* Check for buffer overflow. */
1432 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1433 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1434 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1435 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1436 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1437 memcpy(pszTmp, pszValue, cbNewVal + 1);
1438 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1439 pDescriptor->aLines[i] += cbDiff;
1440 }
1441 else
1442 {
1443 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1444 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1445 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1446 {
1447 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1448 if (pDescriptor->aNextLines[i])
1449 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1450 else
1451 pDescriptor->aNextLines[i-1] = 0;
1452 }
1453 pDescriptor->cLines--;
1454 /* Adjust starting line numbers of following descriptor sections. */
1455 if (uStart < pDescriptor->uFirstExtent)
1456 pDescriptor->uFirstExtent--;
1457 if (uStart < pDescriptor->uFirstDDB)
1458 pDescriptor->uFirstDDB--;
1459 }
1460 }
1461 else
1462 {
1463 /* Key doesn't exist, append after the last entry in this category. */
1464 if (!pszValue)
1465 {
1466 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1467 return VINF_SUCCESS;
1468 }
1469 cbKey = strlen(pszKey);
1470 size_t cbValue = strlen(pszValue);
1471 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1472 /* Check for buffer overflow. */
1473 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1474 || ( pDescriptor->aLines[pDescriptor->cLines]
1475 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1476 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1477 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1478 {
1479 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1480 if (pDescriptor->aNextLines[i - 1])
1481 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1482 else
1483 pDescriptor->aNextLines[i] = 0;
1484 }
1485 uStart = uLast + 1;
1486 pDescriptor->aNextLines[uLast] = uStart;
1487 pDescriptor->aNextLines[uStart] = 0;
1488 pDescriptor->cLines++;
1489 pszTmp = pDescriptor->aLines[uStart];
1490 memmove(pszTmp + cbDiff, pszTmp,
1491 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1492 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1493 pDescriptor->aLines[uStart][cbKey] = '=';
1494 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1495 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1496 pDescriptor->aLines[i] += cbDiff;
1497 /* Adjust starting line numbers of following descriptor sections. */
1498 if (uStart <= pDescriptor->uFirstExtent)
1499 pDescriptor->uFirstExtent++;
1500 if (uStart <= pDescriptor->uFirstDDB)
1501 pDescriptor->uFirstDDB++;
1502 }
1503 pDescriptor->fDirty = true;
1504 return VINF_SUCCESS;
1505}
1506static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1507 uint32_t *puValue)
1508{
1509 const char *pszValue;
1510 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1511 &pszValue))
1512 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1513 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1514}
1515/**
1516 * Returns the value of the given key as a string allocating the necessary memory.
1517 *
1518 * @returns VBox status code.
1519 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1520 * @param pImage The VMDK image state.
1521 * @param pDescriptor The descriptor to fetch the value from.
1522 * @param pszKey The key to get the value from.
1523 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1524 * free.
1525 */
1526static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1527 const char *pszKey, char **ppszValue)
1528{
1529 const char *pszValue;
1530 char *pszValueUnquoted;
1531 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1532 &pszValue))
1533 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1534 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1535 if (RT_FAILURE(rc))
1536 return rc;
1537 *ppszValue = pszValueUnquoted;
1538 return rc;
1539}
1540static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1541 const char *pszKey, const char *pszValue)
1542{
1543 char *pszValueQuoted;
1544 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1545 if (!pszValueQuoted)
1546 return VERR_NO_STR_MEMORY;
1547 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1548 pszValueQuoted);
1549 RTStrFree(pszValueQuoted);
1550 return rc;
1551}
1552static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1553 PVMDKDESCRIPTOR pDescriptor)
1554{
1555 RT_NOREF1(pImage);
1556 unsigned uEntry = pDescriptor->uFirstExtent;
1557 ssize_t cbDiff;
1558 if (!uEntry)
1559 return;
1560 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1561 /* Move everything including \0 in the entry marking the end of buffer. */
1562 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1563 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1564 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1565 {
1566 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1567 if (pDescriptor->aNextLines[i])
1568 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1569 else
1570 pDescriptor->aNextLines[i - 1] = 0;
1571 }
1572 pDescriptor->cLines--;
1573 if (pDescriptor->uFirstDDB)
1574 pDescriptor->uFirstDDB--;
1575 return;
1576}
1577static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1578 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1579 VMDKETYPE enmType, const char *pszBasename,
1580 uint64_t uSectorOffset)
1581{
1582 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1583 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1584 char *pszTmp;
1585 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1586 char szExt[1024];
1587 ssize_t cbDiff;
1588 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1589 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1590 /* Find last entry in extent description. */
1591 while (uStart)
1592 {
1593 if (!pDescriptor->aNextLines[uStart])
1594 uLast = uStart;
1595 uStart = pDescriptor->aNextLines[uStart];
1596 }
1597 if (enmType == VMDKETYPE_ZERO)
1598 {
1599 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1600 cNominalSectors, apszType[enmType]);
1601 }
1602 else if (enmType == VMDKETYPE_FLAT)
1603 {
1604 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1605 apszAccess[enmAccess], cNominalSectors,
1606 apszType[enmType], pszBasename, uSectorOffset);
1607 }
1608 else
1609 {
1610 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1611 apszAccess[enmAccess], cNominalSectors,
1612 apszType[enmType], pszBasename);
1613 }
1614 cbDiff = strlen(szExt) + 1;
1615 /* Check for buffer overflow. */
1616 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1617 || ( pDescriptor->aLines[pDescriptor->cLines]
1618 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1619 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1620 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1621 {
1622 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1623 if (pDescriptor->aNextLines[i - 1])
1624 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1625 else
1626 pDescriptor->aNextLines[i] = 0;
1627 }
1628 uStart = uLast + 1;
1629 pDescriptor->aNextLines[uLast] = uStart;
1630 pDescriptor->aNextLines[uStart] = 0;
1631 pDescriptor->cLines++;
1632 pszTmp = pDescriptor->aLines[uStart];
1633 memmove(pszTmp + cbDiff, pszTmp,
1634 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1635 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1636 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1637 pDescriptor->aLines[i] += cbDiff;
1638 /* Adjust starting line numbers of following descriptor sections. */
1639 if (uStart <= pDescriptor->uFirstDDB)
1640 pDescriptor->uFirstDDB++;
1641 pDescriptor->fDirty = true;
1642 return VINF_SUCCESS;
1643}
1644/**
1645 * Returns the value of the given key from the DDB as a string allocating
1646 * the necessary memory.
1647 *
1648 * @returns VBox status code.
1649 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1650 * @param pImage The VMDK image state.
1651 * @param pDescriptor The descriptor to fetch the value from.
1652 * @param pszKey The key to get the value from.
1653 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1654 * free.
1655 */
1656static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1657 const char *pszKey, char **ppszValue)
1658{
1659 const char *pszValue;
1660 char *pszValueUnquoted;
1661 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1662 &pszValue))
1663 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1664 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1665 if (RT_FAILURE(rc))
1666 return rc;
1667 *ppszValue = pszValueUnquoted;
1668 return rc;
1669}
1670static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1671 const char *pszKey, uint32_t *puValue)
1672{
1673 const char *pszValue;
1674 char *pszValueUnquoted;
1675 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1676 &pszValue))
1677 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1678 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1679 if (RT_FAILURE(rc))
1680 return rc;
1681 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1682 RTMemTmpFree(pszValueUnquoted);
1683 return rc;
1684}
1685static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1686 const char *pszKey, PRTUUID pUuid)
1687{
1688 const char *pszValue;
1689 char *pszValueUnquoted;
1690 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1691 &pszValue))
1692 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1693 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1694 if (RT_FAILURE(rc))
1695 return rc;
1696 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1697 RTMemTmpFree(pszValueUnquoted);
1698 return rc;
1699}
1700static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1701 const char *pszKey, const char *pszVal)
1702{
1703 int rc;
1704 char *pszValQuoted;
1705 if (pszVal)
1706 {
1707 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1708 if (!pszValQuoted)
1709 return VERR_NO_STR_MEMORY;
1710 }
1711 else
1712 pszValQuoted = NULL;
1713 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1714 pszValQuoted);
1715 if (pszValQuoted)
1716 RTStrFree(pszValQuoted);
1717 return rc;
1718}
1719static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1720 const char *pszKey, PCRTUUID pUuid)
1721{
1722 char *pszUuid;
1723 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1724 if (!pszUuid)
1725 return VERR_NO_STR_MEMORY;
1726 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1727 pszUuid);
1728 RTStrFree(pszUuid);
1729 return rc;
1730}
1731static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1732 const char *pszKey, uint32_t uValue)
1733{
1734 char *pszValue;
1735 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1736 if (!pszValue)
1737 return VERR_NO_STR_MEMORY;
1738 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1739 pszValue);
1740 RTStrFree(pszValue);
1741 return rc;
1742}
1743/**
1744 * Splits the descriptor data into individual lines checking for correct line
1745 * endings and descriptor size.
1746 *
1747 * @returns VBox status code.
1748 * @param pImage The image instance.
1749 * @param pDesc The descriptor.
1750 * @param pszTmp The raw descriptor data from the image.
1751 */
1752static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
1753{
1754 unsigned cLine = 0;
1755 int rc = VINF_SUCCESS;
1756 while ( RT_SUCCESS(rc)
1757 && *pszTmp != '\0')
1758 {
1759 pDesc->aLines[cLine++] = pszTmp;
1760 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1761 {
1762 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1763 rc = VERR_VD_VMDK_INVALID_HEADER;
1764 break;
1765 }
1766 while (*pszTmp != '\0' && *pszTmp != '\n')
1767 {
1768 if (*pszTmp == '\r')
1769 {
1770 if (*(pszTmp + 1) != '\n')
1771 {
1772 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1773 break;
1774 }
1775 else
1776 {
1777 /* Get rid of CR character. */
1778 *pszTmp = '\0';
1779 }
1780 }
1781 pszTmp++;
1782 }
1783 if (RT_FAILURE(rc))
1784 break;
1785 /* Get rid of LF character. */
1786 if (*pszTmp == '\n')
1787 {
1788 *pszTmp = '\0';
1789 pszTmp++;
1790 }
1791 }
1792 if (RT_SUCCESS(rc))
1793 {
1794 pDesc->cLines = cLine;
1795 /* Pointer right after the end of the used part of the buffer. */
1796 pDesc->aLines[cLine] = pszTmp;
1797 }
1798 return rc;
1799}
1800static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1801 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1802{
1803 pDescriptor->cbDescAlloc = cbDescData;
1804 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
1805 if (RT_SUCCESS(rc))
1806 {
1807 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1808 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
1809 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
1810 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
1811 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1812 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1813 else
1814 {
1815 unsigned uLastNonEmptyLine = 0;
1816 /* Initialize those, because we need to be able to reopen an image. */
1817 pDescriptor->uFirstDesc = 0;
1818 pDescriptor->uFirstExtent = 0;
1819 pDescriptor->uFirstDDB = 0;
1820 for (unsigned i = 0; i < pDescriptor->cLines; i++)
1821 {
1822 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1823 {
1824 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1825 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1826 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1827 {
1828 /* An extent descriptor. */
1829 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1830 {
1831 /* Incorrect ordering of entries. */
1832 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1833 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1834 break;
1835 }
1836 if (!pDescriptor->uFirstExtent)
1837 {
1838 pDescriptor->uFirstExtent = i;
1839 uLastNonEmptyLine = 0;
1840 }
1841 }
1842 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1843 {
1844 /* A disk database entry. */
1845 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1846 {
1847 /* Incorrect ordering of entries. */
1848 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1849 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1850 break;
1851 }
1852 if (!pDescriptor->uFirstDDB)
1853 {
1854 pDescriptor->uFirstDDB = i;
1855 uLastNonEmptyLine = 0;
1856 }
1857 }
1858 else
1859 {
1860 /* A normal entry. */
1861 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1862 {
1863 /* Incorrect ordering of entries. */
1864 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1865 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1866 break;
1867 }
1868 if (!pDescriptor->uFirstDesc)
1869 {
1870 pDescriptor->uFirstDesc = i;
1871 uLastNonEmptyLine = 0;
1872 }
1873 }
1874 if (uLastNonEmptyLine)
1875 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1876 uLastNonEmptyLine = i;
1877 }
1878 }
1879 }
1880 }
1881 return rc;
1882}
1883static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
1884 PCVDGEOMETRY pPCHSGeometry)
1885{
1886 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1887 VMDK_DDB_GEO_PCHS_CYLINDERS,
1888 pPCHSGeometry->cCylinders);
1889 if (RT_FAILURE(rc))
1890 return rc;
1891 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1892 VMDK_DDB_GEO_PCHS_HEADS,
1893 pPCHSGeometry->cHeads);
1894 if (RT_FAILURE(rc))
1895 return rc;
1896 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1897 VMDK_DDB_GEO_PCHS_SECTORS,
1898 pPCHSGeometry->cSectors);
1899 return rc;
1900}
1901static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
1902 PCVDGEOMETRY pLCHSGeometry)
1903{
1904 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1905 VMDK_DDB_GEO_LCHS_CYLINDERS,
1906 pLCHSGeometry->cCylinders);
1907 if (RT_FAILURE(rc))
1908 return rc;
1909 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1910 VMDK_DDB_GEO_LCHS_HEADS,
1911 pLCHSGeometry->cHeads);
1912 if (RT_FAILURE(rc))
1913 return rc;
1914 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1915 VMDK_DDB_GEO_LCHS_SECTORS,
1916 pLCHSGeometry->cSectors);
1917 return rc;
1918}
1919static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
1920 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1921{
1922 pDescriptor->uFirstDesc = 0;
1923 pDescriptor->uFirstExtent = 0;
1924 pDescriptor->uFirstDDB = 0;
1925 pDescriptor->cLines = 0;
1926 pDescriptor->cbDescAlloc = cbDescData;
1927 pDescriptor->fDirty = false;
1928 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
1929 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
1930 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
1931 if (RT_SUCCESS(rc))
1932 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
1933 if (RT_SUCCESS(rc))
1934 {
1935 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
1936 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1937 }
1938 if (RT_SUCCESS(rc))
1939 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
1940 if (RT_SUCCESS(rc))
1941 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
1942 if (RT_SUCCESS(rc))
1943 {
1944 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
1945 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1946 }
1947 if (RT_SUCCESS(rc))
1948 {
1949 /* The trailing space is created by VMware, too. */
1950 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
1951 }
1952 if (RT_SUCCESS(rc))
1953 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
1954 if (RT_SUCCESS(rc))
1955 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1956 if (RT_SUCCESS(rc))
1957 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
1958 if (RT_SUCCESS(rc))
1959 {
1960 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
1961 /* Now that the framework is in place, use the normal functions to insert
1962 * the remaining keys. */
1963 char szBuf[9];
1964 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
1965 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
1966 "CID", szBuf);
1967 }
1968 if (RT_SUCCESS(rc))
1969 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
1970 "parentCID", "ffffffff");
1971 if (RT_SUCCESS(rc))
1972 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
1973 return rc;
1974}
1975static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
1976{
1977 int rc;
1978 unsigned cExtents;
1979 unsigned uLine;
1980 unsigned i;
1981 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
1982 &pImage->Descriptor);
1983 if (RT_FAILURE(rc))
1984 return rc;
1985 /* Check version, must be 1. */
1986 uint32_t uVersion;
1987 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
1988 if (RT_FAILURE(rc))
1989 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
1990 if (uVersion != 1)
1991 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
1992 /* Get image creation type and determine image flags. */
1993 char *pszCreateType = NULL; /* initialized to make gcc shut up */
1994 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
1995 &pszCreateType);
1996 if (RT_FAILURE(rc))
1997 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
1998 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
1999 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2000 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2001 else if ( !strcmp(pszCreateType, "partitionedDevice")
2002 || !strcmp(pszCreateType, "fullDevice"))
2003 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2004 else if (!strcmp(pszCreateType, "streamOptimized"))
2005 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2006 else if (!strcmp(pszCreateType, "vmfs"))
2007 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2008 RTMemTmpFree(pszCreateType);
2009 /* Count the number of extent config entries. */
2010 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2011 uLine != 0;
2012 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2013 /* nothing */;
2014 if (!pImage->pDescData && cExtents != 1)
2015 {
2016 /* Monolithic image, must have only one extent (already opened). */
2017 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2018 }
2019 if (pImage->pDescData)
2020 {
2021 /* Non-monolithic image, extents need to be allocated. */
2022 rc = vmdkCreateExtents(pImage, cExtents);
2023 if (RT_FAILURE(rc))
2024 return rc;
2025 }
2026 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2027 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2028 {
2029 char *pszLine = pImage->Descriptor.aLines[uLine];
2030 /* Access type of the extent. */
2031 if (!strncmp(pszLine, "RW", 2))
2032 {
2033 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2034 pszLine += 2;
2035 }
2036 else if (!strncmp(pszLine, "RDONLY", 6))
2037 {
2038 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2039 pszLine += 6;
2040 }
2041 else if (!strncmp(pszLine, "NOACCESS", 8))
2042 {
2043 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2044 pszLine += 8;
2045 }
2046 else
2047 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2048 if (*pszLine++ != ' ')
2049 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2050 /* Nominal size of the extent. */
2051 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2052 &pImage->pExtents[i].cNominalSectors);
2053 if (RT_FAILURE(rc))
2054 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2055 if (*pszLine++ != ' ')
2056 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2057 /* Type of the extent. */
2058 if (!strncmp(pszLine, "SPARSE", 6))
2059 {
2060 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2061 pszLine += 6;
2062 }
2063 else if (!strncmp(pszLine, "FLAT", 4))
2064 {
2065 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2066 pszLine += 4;
2067 }
2068 else if (!strncmp(pszLine, "ZERO", 4))
2069 {
2070 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2071 pszLine += 4;
2072 }
2073 else if (!strncmp(pszLine, "VMFS", 4))
2074 {
2075 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2076 pszLine += 4;
2077 }
2078 else
2079 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2080 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2081 {
2082 /* This one has no basename or offset. */
2083 if (*pszLine == ' ')
2084 pszLine++;
2085 if (*pszLine != '\0')
2086 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2087 pImage->pExtents[i].pszBasename = NULL;
2088 }
2089 else
2090 {
2091 /* All other extent types have basename and optional offset. */
2092 if (*pszLine++ != ' ')
2093 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2094 /* Basename of the image. Surrounded by quotes. */
2095 char *pszBasename;
2096 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2097 if (RT_FAILURE(rc))
2098 return rc;
2099 pImage->pExtents[i].pszBasename = pszBasename;
2100 if (*pszLine == ' ')
2101 {
2102 pszLine++;
2103 if (*pszLine != '\0')
2104 {
2105 /* Optional offset in extent specified. */
2106 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2107 &pImage->pExtents[i].uSectorOffset);
2108 if (RT_FAILURE(rc))
2109 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2110 }
2111 }
2112 if (*pszLine != '\0')
2113 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2114 }
2115 }
2116 /* Determine PCHS geometry (autogenerate if necessary). */
2117 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2118 VMDK_DDB_GEO_PCHS_CYLINDERS,
2119 &pImage->PCHSGeometry.cCylinders);
2120 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2121 pImage->PCHSGeometry.cCylinders = 0;
2122 else if (RT_FAILURE(rc))
2123 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2124 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2125 VMDK_DDB_GEO_PCHS_HEADS,
2126 &pImage->PCHSGeometry.cHeads);
2127 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2128 pImage->PCHSGeometry.cHeads = 0;
2129 else if (RT_FAILURE(rc))
2130 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2131 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2132 VMDK_DDB_GEO_PCHS_SECTORS,
2133 &pImage->PCHSGeometry.cSectors);
2134 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2135 pImage->PCHSGeometry.cSectors = 0;
2136 else if (RT_FAILURE(rc))
2137 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2138 if ( pImage->PCHSGeometry.cCylinders == 0
2139 || pImage->PCHSGeometry.cHeads == 0
2140 || pImage->PCHSGeometry.cHeads > 16
2141 || pImage->PCHSGeometry.cSectors == 0
2142 || pImage->PCHSGeometry.cSectors > 63)
2143 {
2144 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2145 * as the total image size isn't known yet). */
2146 pImage->PCHSGeometry.cCylinders = 0;
2147 pImage->PCHSGeometry.cHeads = 16;
2148 pImage->PCHSGeometry.cSectors = 63;
2149 }
2150 /* Determine LCHS geometry (set to 0 if not specified). */
2151 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2152 VMDK_DDB_GEO_LCHS_CYLINDERS,
2153 &pImage->LCHSGeometry.cCylinders);
2154 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2155 pImage->LCHSGeometry.cCylinders = 0;
2156 else if (RT_FAILURE(rc))
2157 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2158 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2159 VMDK_DDB_GEO_LCHS_HEADS,
2160 &pImage->LCHSGeometry.cHeads);
2161 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2162 pImage->LCHSGeometry.cHeads = 0;
2163 else if (RT_FAILURE(rc))
2164 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2165 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2166 VMDK_DDB_GEO_LCHS_SECTORS,
2167 &pImage->LCHSGeometry.cSectors);
2168 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2169 pImage->LCHSGeometry.cSectors = 0;
2170 else if (RT_FAILURE(rc))
2171 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2172 if ( pImage->LCHSGeometry.cCylinders == 0
2173 || pImage->LCHSGeometry.cHeads == 0
2174 || pImage->LCHSGeometry.cSectors == 0)
2175 {
2176 pImage->LCHSGeometry.cCylinders = 0;
2177 pImage->LCHSGeometry.cHeads = 0;
2178 pImage->LCHSGeometry.cSectors = 0;
2179 }
2180 /* Get image UUID. */
2181 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2182 &pImage->ImageUuid);
2183 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2184 {
2185 /* Image without UUID. Probably created by VMware and not yet used
2186 * by VirtualBox. Can only be added for images opened in read/write
2187 * mode, so don't bother producing a sensible UUID otherwise. */
2188 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2189 RTUuidClear(&pImage->ImageUuid);
2190 else
2191 {
2192 rc = RTUuidCreate(&pImage->ImageUuid);
2193 if (RT_FAILURE(rc))
2194 return rc;
2195 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2196 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2197 if (RT_FAILURE(rc))
2198 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2199 }
2200 }
2201 else if (RT_FAILURE(rc))
2202 return rc;
2203 /* Get image modification UUID. */
2204 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2205 VMDK_DDB_MODIFICATION_UUID,
2206 &pImage->ModificationUuid);
2207 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2208 {
2209 /* Image without UUID. Probably created by VMware and not yet used
2210 * by VirtualBox. Can only be added for images opened in read/write
2211 * mode, so don't bother producing a sensible UUID otherwise. */
2212 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2213 RTUuidClear(&pImage->ModificationUuid);
2214 else
2215 {
2216 rc = RTUuidCreate(&pImage->ModificationUuid);
2217 if (RT_FAILURE(rc))
2218 return rc;
2219 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2220 VMDK_DDB_MODIFICATION_UUID,
2221 &pImage->ModificationUuid);
2222 if (RT_FAILURE(rc))
2223 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2224 }
2225 }
2226 else if (RT_FAILURE(rc))
2227 return rc;
2228 /* Get UUID of parent image. */
2229 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2230 &pImage->ParentUuid);
2231 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2232 {
2233 /* Image without UUID. Probably created by VMware and not yet used
2234 * by VirtualBox. Can only be added for images opened in read/write
2235 * mode, so don't bother producing a sensible UUID otherwise. */
2236 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2237 RTUuidClear(&pImage->ParentUuid);
2238 else
2239 {
2240 rc = RTUuidClear(&pImage->ParentUuid);
2241 if (RT_FAILURE(rc))
2242 return rc;
2243 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2244 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2245 if (RT_FAILURE(rc))
2246 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2247 }
2248 }
2249 else if (RT_FAILURE(rc))
2250 return rc;
2251 /* Get parent image modification UUID. */
2252 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2253 VMDK_DDB_PARENT_MODIFICATION_UUID,
2254 &pImage->ParentModificationUuid);
2255 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2256 {
2257 /* Image without UUID. Probably created by VMware and not yet used
2258 * by VirtualBox. Can only be added for images opened in read/write
2259 * mode, so don't bother producing a sensible UUID otherwise. */
2260 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2261 RTUuidClear(&pImage->ParentModificationUuid);
2262 else
2263 {
2264 RTUuidClear(&pImage->ParentModificationUuid);
2265 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2266 VMDK_DDB_PARENT_MODIFICATION_UUID,
2267 &pImage->ParentModificationUuid);
2268 if (RT_FAILURE(rc))
2269 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2270 }
2271 }
2272 else if (RT_FAILURE(rc))
2273 return rc;
2274 return VINF_SUCCESS;
2275}
2276/**
2277 * Internal : Prepares the descriptor to write to the image.
2278 */
2279static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2280 void **ppvData, size_t *pcbData)
2281{
2282 int rc = VINF_SUCCESS;
2283 /*
2284 * Allocate temporary descriptor buffer.
2285 * In case there is no limit allocate a default
2286 * and increase if required.
2287 */
2288 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2289 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2290 size_t offDescriptor = 0;
2291 if (!pszDescriptor)
2292 return VERR_NO_MEMORY;
2293 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2294 {
2295 const char *psz = pImage->Descriptor.aLines[i];
2296 size_t cb = strlen(psz);
2297 /*
2298 * Increase the descriptor if there is no limit and
2299 * there is not enough room left for this line.
2300 */
2301 if (offDescriptor + cb + 1 > cbDescriptor)
2302 {
2303 if (cbLimit)
2304 {
2305 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2306 break;
2307 }
2308 else
2309 {
2310 char *pszDescriptorNew = NULL;
2311 LogFlow(("Increasing descriptor cache\n"));
2312 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2313 if (!pszDescriptorNew)
2314 {
2315 rc = VERR_NO_MEMORY;
2316 break;
2317 }
2318 pszDescriptor = pszDescriptorNew;
2319 cbDescriptor += cb + 4 * _1K;
2320 }
2321 }
2322 if (cb > 0)
2323 {
2324 memcpy(pszDescriptor + offDescriptor, psz, cb);
2325 offDescriptor += cb;
2326 }
2327 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2328 offDescriptor++;
2329 }
2330 if (RT_SUCCESS(rc))
2331 {
2332 *ppvData = pszDescriptor;
2333 *pcbData = offDescriptor;
2334 }
2335 else if (pszDescriptor)
2336 RTMemFree(pszDescriptor);
2337 return rc;
2338}
2339/**
2340 * Internal: write/update the descriptor part of the image.
2341 */
2342static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2343{
2344 int rc = VINF_SUCCESS;
2345 uint64_t cbLimit;
2346 uint64_t uOffset;
2347 PVMDKFILE pDescFile;
2348 void *pvDescriptor = NULL;
2349 size_t cbDescriptor;
2350 if (pImage->pDescData)
2351 {
2352 /* Separate descriptor file. */
2353 uOffset = 0;
2354 cbLimit = 0;
2355 pDescFile = pImage->pFile;
2356 }
2357 else
2358 {
2359 /* Embedded descriptor file. */
2360 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2361 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2362 pDescFile = pImage->pExtents[0].pFile;
2363 }
2364 /* Bail out if there is no file to write to. */
2365 if (pDescFile == NULL)
2366 return VERR_INVALID_PARAMETER;
2367 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2368 if (RT_SUCCESS(rc))
2369 {
2370 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2371 uOffset, pvDescriptor,
2372 cbLimit ? cbLimit : cbDescriptor,
2373 pIoCtx, NULL, NULL);
2374 if ( RT_FAILURE(rc)
2375 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2376 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2377 }
2378 if (RT_SUCCESS(rc) && !cbLimit)
2379 {
2380 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2381 if (RT_FAILURE(rc))
2382 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2383 }
2384 if (RT_SUCCESS(rc))
2385 pImage->Descriptor.fDirty = false;
2386 if (pvDescriptor)
2387 RTMemFree(pvDescriptor);
2388 return rc;
2389}
2390/**
2391 * Internal: validate the consistency check values in a binary header.
2392 */
2393static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2394{
2395 int rc = VINF_SUCCESS;
2396 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2397 {
2398 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2399 return rc;
2400 }
2401 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2402 {
2403 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2404 return rc;
2405 }
2406 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2407 && ( pHeader->singleEndLineChar != '\n'
2408 || pHeader->nonEndLineChar != ' '
2409 || pHeader->doubleEndLineChar1 != '\r'
2410 || pHeader->doubleEndLineChar2 != '\n') )
2411 {
2412 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2413 return rc;
2414 }
2415 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2416 {
2417 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2418 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2419 return rc;
2420 }
2421 return rc;
2422}
2423/**
2424 * Internal: read metadata belonging to an extent with binary header, i.e.
2425 * as found in monolithic files.
2426 */
2427static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2428 bool fMagicAlreadyRead)
2429{
2430 SparseExtentHeader Header;
2431 int rc;
2432 if (!fMagicAlreadyRead)
2433 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2434 &Header, sizeof(Header));
2435 else
2436 {
2437 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2438 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2439 RT_UOFFSETOF(SparseExtentHeader, version),
2440 &Header.version,
2441 sizeof(Header)
2442 - RT_UOFFSETOF(SparseExtentHeader, version));
2443 }
2444 if (RT_SUCCESS(rc))
2445 {
2446 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2447 if (RT_SUCCESS(rc))
2448 {
2449 uint64_t cbFile = 0;
2450 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2451 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2452 pExtent->fFooter = true;
2453 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2454 || ( pExtent->fFooter
2455 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2456 {
2457 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2458 if (RT_FAILURE(rc))
2459 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2460 }
2461 if (RT_SUCCESS(rc))
2462 {
2463 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2464 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2465 if ( pExtent->fFooter
2466 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2467 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2468 {
2469 /* Read the footer, which comes before the end-of-stream marker. */
2470 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2471 cbFile - 2*512, &Header,
2472 sizeof(Header));
2473 if (RT_FAILURE(rc))
2474 {
2475 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2476 rc = VERR_VD_VMDK_INVALID_HEADER;
2477 }
2478 if (RT_SUCCESS(rc))
2479 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2480 /* Prohibit any writes to this extent. */
2481 pExtent->uAppendPosition = 0;
2482 }
2483 if (RT_SUCCESS(rc))
2484 {
2485 pExtent->uVersion = RT_LE2H_U32(Header.version);
2486 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2487 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2488 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2489 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2490 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2491 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2492 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2493 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2494 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2495 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2496 {
2497 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2498 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2499 }
2500 else
2501 {
2502 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2503 pExtent->uSectorRGD = 0;
2504 }
2505 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2506 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2507 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2508 if ( RT_SUCCESS(rc)
2509 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2510 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2511 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2512 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2513 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2514 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2515 if (RT_SUCCESS(rc))
2516 {
2517 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2518 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2519 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2520 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2521 else
2522 {
2523 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2524 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2525 /* Fix up the number of descriptor sectors, as some flat images have
2526 * really just one, and this causes failures when inserting the UUID
2527 * values and other extra information. */
2528 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2529 {
2530 /* Do it the easy way - just fix it for flat images which have no
2531 * other complicated metadata which needs space too. */
2532 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2533 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2534 pExtent->cDescriptorSectors = 4;
2535 }
2536 }
2537 }
2538 }
2539 }
2540 }
2541 }
2542 else
2543 {
2544 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2545 rc = VERR_VD_VMDK_INVALID_HEADER;
2546 }
2547 if (RT_FAILURE(rc))
2548 vmdkFreeExtentData(pImage, pExtent, false);
2549 return rc;
2550}
2551/**
2552 * Internal: read additional metadata belonging to an extent. For those
2553 * extents which have no additional metadata just verify the information.
2554 */
2555static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2556{
2557 int rc = VINF_SUCCESS;
2558/* disabled the check as there are too many truncated vmdk images out there */
2559#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2560 uint64_t cbExtentSize;
2561 /* The image must be a multiple of a sector in size and contain the data
2562 * area (flat images only). If not, it means the image is at least
2563 * truncated, or even seriously garbled. */
2564 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2565 if (RT_FAILURE(rc))
2566 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2567 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2568 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2569 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2570 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2571#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2572 if ( RT_SUCCESS(rc)
2573 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2574 {
2575 /* The spec says that this must be a power of two and greater than 8,
2576 * but probably they meant not less than 8. */
2577 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2578 || pExtent->cSectorsPerGrain < 8)
2579 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2580 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2581 else
2582 {
2583 /* This code requires that a grain table must hold a power of two multiple
2584 * of the number of entries per GT cache entry. */
2585 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2586 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2587 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2588 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2589 else
2590 {
2591 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2592 if (RT_SUCCESS(rc))
2593 {
2594 /* Prohibit any writes to this streamOptimized extent. */
2595 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2596 pExtent->uAppendPosition = 0;
2597 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2598 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2599 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2600 rc = vmdkReadGrainDirectory(pImage, pExtent);
2601 else
2602 {
2603 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2604 pExtent->cbGrainStreamRead = 0;
2605 }
2606 }
2607 }
2608 }
2609 }
2610 if (RT_FAILURE(rc))
2611 vmdkFreeExtentData(pImage, pExtent, false);
2612 return rc;
2613}
2614/**
2615 * Internal: write/update the metadata for a sparse extent.
2616 */
2617static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2618 uint64_t uOffset, PVDIOCTX pIoCtx)
2619{
2620 SparseExtentHeader Header;
2621 memset(&Header, '\0', sizeof(Header));
2622 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2623 Header.version = RT_H2LE_U32(pExtent->uVersion);
2624 Header.flags = RT_H2LE_U32(RT_BIT(0));
2625 if (pExtent->pRGD)
2626 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2627 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2628 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2629 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2630 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2631 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2632 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2633 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2634 if (pExtent->fFooter && uOffset == 0)
2635 {
2636 if (pExtent->pRGD)
2637 {
2638 Assert(pExtent->uSectorRGD);
2639 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2640 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2641 }
2642 else
2643 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2644 }
2645 else
2646 {
2647 if (pExtent->pRGD)
2648 {
2649 Assert(pExtent->uSectorRGD);
2650 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2651 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2652 }
2653 else
2654 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2655 }
2656 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2657 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2658 Header.singleEndLineChar = '\n';
2659 Header.nonEndLineChar = ' ';
2660 Header.doubleEndLineChar1 = '\r';
2661 Header.doubleEndLineChar2 = '\n';
2662 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2663 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
2664 uOffset, &Header, sizeof(Header),
2665 pIoCtx, NULL, NULL);
2666 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2667 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2668 return rc;
2669}
2670/**
2671 * Internal: free the buffers used for streamOptimized images.
2672 */
2673static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
2674{
2675 if (pExtent->pvCompGrain)
2676 {
2677 RTMemFree(pExtent->pvCompGrain);
2678 pExtent->pvCompGrain = NULL;
2679 }
2680 if (pExtent->pvGrain)
2681 {
2682 RTMemFree(pExtent->pvGrain);
2683 pExtent->pvGrain = NULL;
2684 }
2685}
2686/**
2687 * Internal: free the memory used by the extent data structure, optionally
2688 * deleting the referenced files.
2689 *
2690 * @returns VBox status code.
2691 * @param pImage Pointer to the image instance data.
2692 * @param pExtent The extent to free.
2693 * @param fDelete Flag whether to delete the backing storage.
2694 */
2695static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2696 bool fDelete)
2697{
2698 int rc = VINF_SUCCESS;
2699 vmdkFreeGrainDirectory(pExtent);
2700 if (pExtent->pDescData)
2701 {
2702 RTMemFree(pExtent->pDescData);
2703 pExtent->pDescData = NULL;
2704 }
2705 if (pExtent->pFile != NULL)
2706 {
2707 /* Do not delete raw extents, these have full and base names equal. */
2708 rc = vmdkFileClose(pImage, &pExtent->pFile,
2709 fDelete
2710 && pExtent->pszFullname
2711 && pExtent->pszBasename
2712 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2713 }
2714 if (pExtent->pszBasename)
2715 {
2716 RTMemTmpFree((void *)pExtent->pszBasename);
2717 pExtent->pszBasename = NULL;
2718 }
2719 if (pExtent->pszFullname)
2720 {
2721 RTStrFree((char *)(void *)pExtent->pszFullname);
2722 pExtent->pszFullname = NULL;
2723 }
2724 vmdkFreeStreamBuffers(pExtent);
2725 return rc;
2726}
2727/**
2728 * Internal: allocate grain table cache if necessary for this image.
2729 */
2730static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2731{
2732 PVMDKEXTENT pExtent;
2733 /* Allocate grain table cache if any sparse extent is present. */
2734 for (unsigned i = 0; i < pImage->cExtents; i++)
2735 {
2736 pExtent = &pImage->pExtents[i];
2737 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2738 {
2739 /* Allocate grain table cache. */
2740 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
2741 if (!pImage->pGTCache)
2742 return VERR_NO_MEMORY;
2743 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
2744 {
2745 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
2746 pGCE->uExtent = UINT32_MAX;
2747 }
2748 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
2749 break;
2750 }
2751 }
2752 return VINF_SUCCESS;
2753}
2754/**
2755 * Internal: allocate the given number of extents.
2756 */
2757static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
2758{
2759 int rc = VINF_SUCCESS;
2760 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
2761 if (pExtents)
2762 {
2763 for (unsigned i = 0; i < cExtents; i++)
2764 {
2765 pExtents[i].pFile = NULL;
2766 pExtents[i].pszBasename = NULL;
2767 pExtents[i].pszFullname = NULL;
2768 pExtents[i].pGD = NULL;
2769 pExtents[i].pRGD = NULL;
2770 pExtents[i].pDescData = NULL;
2771 pExtents[i].uVersion = 1;
2772 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
2773 pExtents[i].uExtent = i;
2774 pExtents[i].pImage = pImage;
2775 }
2776 pImage->pExtents = pExtents;
2777 pImage->cExtents = cExtents;
2778 }
2779 else
2780 rc = VERR_NO_MEMORY;
2781 return rc;
2782}
2783/**
2784 * Reads and processes the descriptor embedded in sparse images.
2785 *
2786 * @returns VBox status code.
2787 * @param pImage VMDK image instance.
2788 * @param pFile The sparse file handle.
2789 */
2790static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
2791{
2792 /* It's a hosted single-extent image. */
2793 int rc = vmdkCreateExtents(pImage, 1);
2794 if (RT_SUCCESS(rc))
2795 {
2796 /* The opened file is passed to the extent. No separate descriptor
2797 * file, so no need to keep anything open for the image. */
2798 PVMDKEXTENT pExtent = &pImage->pExtents[0];
2799 pExtent->pFile = pFile;
2800 pImage->pFile = NULL;
2801 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
2802 if (RT_LIKELY(pExtent->pszFullname))
2803 {
2804 /* As we're dealing with a monolithic image here, there must
2805 * be a descriptor embedded in the image file. */
2806 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
2807 if ( RT_SUCCESS(rc)
2808 && pExtent->uDescriptorSector
2809 && pExtent->cDescriptorSectors)
2810 {
2811 /* HACK: extend the descriptor if it is unusually small and it fits in
2812 * the unused space after the image header. Allows opening VMDK files
2813 * with extremely small descriptor in read/write mode.
2814 *
2815 * The previous version introduced a possible regression for VMDK stream
2816 * optimized images from VMware which tend to have only a single sector sized
2817 * descriptor. Increasing the descriptor size resulted in adding the various uuid
2818 * entries required to make it work with VBox but for stream optimized images
2819 * the updated binary header wasn't written to the disk creating a mismatch
2820 * between advertised and real descriptor size.
2821 *
2822 * The descriptor size will be increased even if opened readonly now if there
2823 * enough room but the new value will not be written back to the image.
2824 */
2825 if ( pExtent->cDescriptorSectors < 3
2826 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
2827 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
2828 {
2829 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
2830 pExtent->cDescriptorSectors = 4;
2831 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2832 {
2833 /*
2834 * Update the on disk number now to make sure we don't introduce inconsistencies
2835 * in case of stream optimized images from VMware where the descriptor is just
2836 * one sector big (the binary header is not written to disk for complete
2837 * stream optimized images in vmdkFlushImage()).
2838 */
2839 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
2840 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
2841 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
2842 &u64DescSizeNew, sizeof(u64DescSizeNew));
2843 if (RT_FAILURE(rc))
2844 {
2845 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
2846 /* Restore the old size and carry on. */
2847 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
2848 }
2849 }
2850 }
2851 /* Read the descriptor from the extent. */
2852 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2853 if (RT_LIKELY(pExtent->pDescData))
2854 {
2855 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2856 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
2857 pExtent->pDescData,
2858 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2859 if (RT_SUCCESS(rc))
2860 {
2861 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
2862 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2863 if ( RT_SUCCESS(rc)
2864 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2865 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
2866 {
2867 rc = vmdkReadMetaExtent(pImage, pExtent);
2868 if (RT_SUCCESS(rc))
2869 {
2870 /* Mark the extent as unclean if opened in read-write mode. */
2871 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2872 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
2873 {
2874 pExtent->fUncleanShutdown = true;
2875 pExtent->fMetaDirty = true;
2876 }
2877 }
2878 }
2879 else if (RT_SUCCESS(rc))
2880 rc = VERR_NOT_SUPPORTED;
2881 }
2882 else
2883 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
2884 }
2885 else
2886 rc = VERR_NO_MEMORY;
2887 }
2888 else if (RT_SUCCESS(rc))
2889 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
2890 }
2891 else
2892 rc = VERR_NO_MEMORY;
2893 }
2894 return rc;
2895}
2896/**
2897 * Reads the descriptor from a pure text file.
2898 *
2899 * @returns VBox status code.
2900 * @param pImage VMDK image instance.
2901 * @param pFile The descriptor file handle.
2902 */
2903static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
2904{
2905 /* Allocate at least 10K, and make sure that there is 5K free space
2906 * in case new entries need to be added to the descriptor. Never
2907 * allocate more than 128K, because that's no valid descriptor file
2908 * and will result in the correct "truncated read" error handling. */
2909 uint64_t cbFileSize;
2910 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
2911 if ( RT_SUCCESS(rc)
2912 && cbFileSize >= 50)
2913 {
2914 uint64_t cbSize = cbFileSize;
2915 if (cbSize % VMDK_SECTOR2BYTE(10))
2916 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
2917 else
2918 cbSize += VMDK_SECTOR2BYTE(10);
2919 cbSize = RT_MIN(cbSize, _128K);
2920 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
2921 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
2922 if (RT_LIKELY(pImage->pDescData))
2923 {
2924 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
2925 RT_MIN(pImage->cbDescAlloc, cbFileSize));
2926 if (RT_SUCCESS(rc))
2927 {
2928#if 0 /** @todo Revisit */
2929 cbRead += sizeof(u32Magic);
2930 if (cbRead == pImage->cbDescAlloc)
2931 {
2932 /* Likely the read is truncated. Better fail a bit too early
2933 * (normally the descriptor is much smaller than our buffer). */
2934 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
2935 goto out;
2936 }
2937#endif
2938 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
2939 pImage->cbDescAlloc);
2940 if (RT_SUCCESS(rc))
2941 {
2942 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
2943 {
2944 PVMDKEXTENT pExtent = &pImage->pExtents[i];
2945 if (pExtent->pszBasename)
2946 {
2947 /* Hack to figure out whether the specified name in the
2948 * extent descriptor is absolute. Doesn't always work, but
2949 * should be good enough for now. */
2950 char *pszFullname;
2951 /** @todo implement proper path absolute check. */
2952 if (pExtent->pszBasename[0] == RTPATH_SLASH)
2953 {
2954 pszFullname = RTStrDup(pExtent->pszBasename);
2955 if (!pszFullname)
2956 {
2957 rc = VERR_NO_MEMORY;
2958 break;
2959 }
2960 }
2961 else
2962 {
2963 char *pszDirname = RTStrDup(pImage->pszFilename);
2964 if (!pszDirname)
2965 {
2966 rc = VERR_NO_MEMORY;
2967 break;
2968 }
2969 RTPathStripFilename(pszDirname);
2970 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
2971 RTStrFree(pszDirname);
2972 if (!pszFullname)
2973 {
2974 rc = VERR_NO_STR_MEMORY;
2975 break;
2976 }
2977 }
2978 pExtent->pszFullname = pszFullname;
2979 }
2980 else
2981 pExtent->pszFullname = NULL;
2982 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
2983 switch (pExtent->enmType)
2984 {
2985 case VMDKETYPE_HOSTED_SPARSE:
2986 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
2987 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
2988 if (RT_FAILURE(rc))
2989 {
2990 /* Do NOT signal an appropriate error here, as the VD
2991 * layer has the choice of retrying the open if it
2992 * failed. */
2993 break;
2994 }
2995 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
2996 false /* fMagicAlreadyRead */);
2997 if (RT_FAILURE(rc))
2998 break;
2999 rc = vmdkReadMetaExtent(pImage, pExtent);
3000 if (RT_FAILURE(rc))
3001 break;
3002 /* Mark extent as unclean if opened in read-write mode. */
3003 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3004 {
3005 pExtent->fUncleanShutdown = true;
3006 pExtent->fMetaDirty = true;
3007 }
3008 break;
3009 case VMDKETYPE_VMFS:
3010 case VMDKETYPE_FLAT:
3011 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3012 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3013 if (RT_FAILURE(rc))
3014 {
3015 /* Do NOT signal an appropriate error here, as the VD
3016 * layer has the choice of retrying the open if it
3017 * failed. */
3018 break;
3019 }
3020 break;
3021 case VMDKETYPE_ZERO:
3022 /* Nothing to do. */
3023 break;
3024 default:
3025 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3026 }
3027 }
3028 }
3029 }
3030 else
3031 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3032 }
3033 else
3034 rc = VERR_NO_MEMORY;
3035 }
3036 else if (RT_SUCCESS(rc))
3037 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3038 return rc;
3039}
3040/**
3041 * Read and process the descriptor based on the image type.
3042 *
3043 * @returns VBox status code.
3044 * @param pImage VMDK image instance.
3045 * @param pFile VMDK file handle.
3046 */
3047static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3048{
3049 uint32_t u32Magic;
3050 /* Read magic (if present). */
3051 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3052 &u32Magic, sizeof(u32Magic));
3053 if (RT_SUCCESS(rc))
3054 {
3055 /* Handle the file according to its magic number. */
3056 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3057 rc = vmdkDescriptorReadSparse(pImage, pFile);
3058 else
3059 rc = vmdkDescriptorReadAscii(pImage, pFile);
3060 }
3061 else
3062 {
3063 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3064 rc = VERR_VD_VMDK_INVALID_HEADER;
3065 }
3066 return rc;
3067}
3068/**
3069 * Internal: Open an image, constructing all necessary data structures.
3070 */
3071static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3072{
3073 pImage->uOpenFlags = uOpenFlags;
3074 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3075 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3076 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3077 /*
3078 * Open the image.
3079 * We don't have to check for asynchronous access because
3080 * we only support raw access and the opened file is a description
3081 * file were no data is stored.
3082 */
3083 PVMDKFILE pFile;
3084 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3085 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3086 if (RT_SUCCESS(rc))
3087 {
3088 pImage->pFile = pFile;
3089 rc = vmdkDescriptorRead(pImage, pFile);
3090 if (RT_SUCCESS(rc))
3091 {
3092 /* Determine PCHS geometry if not set. */
3093 if (pImage->PCHSGeometry.cCylinders == 0)
3094 {
3095 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3096 / pImage->PCHSGeometry.cHeads
3097 / pImage->PCHSGeometry.cSectors;
3098 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3099 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3100 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3101 {
3102 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3103 AssertRC(rc);
3104 }
3105 }
3106 /* Update the image metadata now in case has changed. */
3107 rc = vmdkFlushImage(pImage, NULL);
3108 if (RT_SUCCESS(rc))
3109 {
3110 /* Figure out a few per-image constants from the extents. */
3111 pImage->cbSize = 0;
3112 for (unsigned i = 0; i < pImage->cExtents; i++)
3113 {
3114 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3115 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3116 {
3117 /* Here used to be a check whether the nominal size of an extent
3118 * is a multiple of the grain size. The spec says that this is
3119 * always the case, but unfortunately some files out there in the
3120 * wild violate the spec (e.g. ReactOS 0.3.1). */
3121 }
3122 else if ( pExtent->enmType == VMDKETYPE_FLAT
3123 || pExtent->enmType == VMDKETYPE_ZERO)
3124 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3125 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3126 }
3127 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3128 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3129 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3130 rc = vmdkAllocateGrainTableCache(pImage);
3131 }
3132 }
3133 }
3134 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3135 * choice of retrying the open if it failed. */
3136 if (RT_SUCCESS(rc))
3137 {
3138 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3139 pImage->RegionList.fFlags = 0;
3140 pImage->RegionList.cRegions = 1;
3141 pRegion->offRegion = 0; /* Disk start. */
3142 pRegion->cbBlock = 512;
3143 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3144 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3145 pRegion->cbData = 512;
3146 pRegion->cbMetadata = 0;
3147 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3148 }
3149 else
3150 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3151 return rc;
3152}
3153/**
3154 * Frees a raw descriptor.
3155 * @internal
3156 */
3157static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3158{
3159 if (!pRawDesc)
3160 return VINF_SUCCESS;
3161 RTStrFree(pRawDesc->pszRawDisk);
3162 pRawDesc->pszRawDisk = NULL;
3163 /* Partitions: */
3164 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3165 {
3166 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3167 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3168 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3169 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3170 }
3171 RTMemFree(pRawDesc->pPartDescs);
3172 pRawDesc->pPartDescs = NULL;
3173 RTMemFree(pRawDesc);
3174 return VINF_SUCCESS;
3175}
3176/**
3177 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3178 * returning the pointer to the first new entry.
3179 * @internal
3180 */
3181static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3182{
3183 uint32_t const cOld = pRawDesc->cPartDescs;
3184 uint32_t const cNew = cOld + cToAdd;
3185 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3186 cOld * sizeof(pRawDesc->pPartDescs[0]),
3187 cNew * sizeof(pRawDesc->pPartDescs[0]));
3188 if (paNew)
3189 {
3190 pRawDesc->cPartDescs = cNew;
3191 pRawDesc->pPartDescs = paNew;
3192 *ppRet = &paNew[cOld];
3193 return VINF_SUCCESS;
3194 }
3195 *ppRet = NULL;
3196 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3197 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3198 pImage->pszFilename, cOld, cNew);
3199}
3200/**
3201 * @callback_method_impl{FNRTSORTCMP}
3202 */
3203static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3204{
3205 RT_NOREF(pvUser);
3206 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3207 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3208}
3209/**
3210 * Post processes the partition descriptors.
3211 *
3212 * Sorts them and check that they don't overlap.
3213 */
3214static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3215{
3216 /*
3217 * Sort data areas in ascending order of start.
3218 */
3219 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3220 /*
3221 * Check that we don't have overlapping descriptors. If we do, that's an
3222 * indication that the drive is corrupt or that the RTDvm code is buggy.
3223 */
3224 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3225 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3226 {
3227 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3228 if (offLast <= paPartDescs[i].offStartInVDisk)
3229 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3230 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3231 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3232 paPartDescs[i].pvPartitionData ? " (data)" : "");
3233 offLast -= 1;
3234 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3235 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3236 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3237 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3238 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3239 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3240 if (offLast >= cbSize)
3241 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3242 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3243 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3244 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3245 }
3246 return VINF_SUCCESS;
3247}
3248#ifdef RT_OS_LINUX
3249/**
3250 * Searches the dir specified in @a pszBlockDevDir for subdirectories with a
3251 * 'dev' file matching @a uDevToLocate.
3252 *
3253 * This is used both
3254 *
3255 * @returns IPRT status code, errors have been reported properly.
3256 * @param pImage For error reporting.
3257 * @param pszBlockDevDir Input: Path to the directory search under.
3258 * Output: Path to the directory containing information
3259 * for @a uDevToLocate.
3260 * @param cbBlockDevDir The size of the buffer @a pszBlockDevDir points to.
3261 * @param uDevToLocate The device number of the block device info dir to
3262 * locate.
3263 * @param pszDevToLocate For error reporting.
3264 */
3265static int vmdkFindSysBlockDevPath(PVMDKIMAGE pImage, char *pszBlockDevDir, size_t cbBlockDevDir,
3266 dev_t uDevToLocate, const char *pszDevToLocate)
3267{
3268 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir);
3269 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW);
3270 RTDIR hDir = NIL_RTDIR;
3271 int rc = RTDirOpen(&hDir, pszBlockDevDir);
3272 if (RT_SUCCESS(rc))
3273 {
3274 for (;;)
3275 {
3276 RTDIRENTRY Entry;
3277 rc = RTDirRead(hDir, &Entry, NULL);
3278 if (RT_SUCCESS(rc))
3279 {
3280 /* We're interested in directories and symlinks. */
3281 if ( Entry.enmType == RTDIRENTRYTYPE_DIRECTORY
3282 || Entry.enmType == RTDIRENTRYTYPE_SYMLINK
3283 || Entry.enmType == RTDIRENTRYTYPE_UNKNOWN)
3284 {
3285 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName);
3286 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */
3287 dev_t uThisDevNo = ~uDevToLocate;
3288 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir);
3289 if (RT_SUCCESS(rc) && uThisDevNo == uDevToLocate)
3290 break;
3291 }
3292 }
3293 else
3294 {
3295 pszBlockDevDir[cchDir] = '\0';
3296 if (rc == VERR_NO_MORE_FILES)
3297 rc = vdIfError(pImage->pIfError, VERR_NOT_FOUND, RT_SRC_POS,
3298 N_("VMDK: Image path: '%s'. Failed to locate device corresponding to '%s' under '%s'"),
3299 pImage->pszFilename, pszDevToLocate, pszBlockDevDir);
3300 else
3301 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3302 N_("VMDK: Image path: '%s'. RTDirRead failed enumerating '%s': %Rrc"),
3303 pImage->pszFilename, pszBlockDevDir, rc);
3304 break;
3305 }
3306 }
3307 RTDirClose(hDir);
3308 }
3309 else
3310 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3311 N_("VMDK: Image path: '%s'. Failed to open dir '%s' for listing: %Rrc"),
3312 pImage->pszFilename, pszBlockDevDir, rc);
3313 return rc;
3314}
3315#endif /* RT_OS_LINUX */
3316#ifdef RT_OS_FREEBSD
3317/**
3318 * Reads the config data from the provider and returns offset and size
3319 *
3320 * @return IPRT status code
3321 * @param pProvider GEOM provider representing partition
3322 * @param pcbOffset Placeholder for the offset of the partition
3323 * @param pcbSize Placeholder for the size of the partition
3324 */
3325static int vmdkReadPartitionsParamsFromProvider(gprovider *pProvider, uint64_t *pcbOffset, uint64_t *pcbSize)
3326{
3327 gconfig *pConfEntry;
3328 int rc = VERR_NOT_FOUND;
3329 /*
3330 * Required parameters are located in the list containing key/value pairs.
3331 * Both key and value are in text form. Manuals tells nothing about the fact
3332 * that the both parameters should be present in the list. Thus, there are
3333 * cases when only one parameter is presented. To handle such cases we treat
3334 * absent params as zero allowing the caller decide the case is either correct
3335 * or an error.
3336 */
3337 uint64_t cbOffset = 0;
3338 uint64_t cbSize = 0;
3339 LIST_FOREACH(pConfEntry, &pProvider->lg_config, lg_config)
3340 {
3341 if (RTStrCmp(pConfEntry->lg_name, "offset") == 0)
3342 {
3343 cbOffset = RTStrToUInt64(pConfEntry->lg_val);
3344 rc = VINF_SUCCESS;
3345 }
3346 else if (RTStrCmp(pConfEntry->lg_name, "length") == 0)
3347 {
3348 cbSize = RTStrToUInt64(pConfEntry->lg_val);
3349 rc = VINF_SUCCESS;
3350 }
3351 }
3352 if (RT_SUCCESS(rc))
3353 {
3354 *pcbOffset = cbOffset;
3355 *pcbSize = cbSize;
3356 }
3357 return rc;
3358}
3359/**
3360 * Searches the partition specified by name and calculates its size and absolute offset.
3361 *
3362 * @return IPRT status code.
3363 * @param pParentClass Class containing pParentGeom
3364 * @param pszParentGeomName Name of the parent geom where we are looking for provider
3365 * @param pszProviderName Name of the provider we are looking for
3366 * @param pcbAbsoluteOffset Placeholder for the absolute offset of the partition, i.e. offset from the beginning of the disk
3367 * @param psbSize Placeholder for the size of the partition.
3368 */
3369static int vmdkFindPartitionParamsByName(gclass *pParentClass, const char *pszParentGeomName, const char *pszProviderName,
3370 uint64_t *pcbAbsoluteOffset, uint64_t *pcbSize)
3371{
3372 AssertReturn(pParentClass, VERR_INVALID_PARAMETER);
3373 AssertReturn(pszParentGeomName, VERR_INVALID_PARAMETER);
3374 AssertReturn(pszProviderName, VERR_INVALID_PARAMETER);
3375 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER);
3376 AssertReturn(pcbSize, VERR_INVALID_PARAMETER);
3377 ggeom *pParentGeom;
3378 int rc = VERR_NOT_FOUND;
3379 LIST_FOREACH(pParentGeom, &pParentClass->lg_geom, lg_geom)
3380 {
3381 if (RTStrCmp(pParentGeom->lg_name, pszParentGeomName) == 0)
3382 {
3383 rc = VINF_SUCCESS;
3384 break;
3385 }
3386 }
3387 if (RT_FAILURE(rc))
3388 return rc;
3389 gprovider *pProvider;
3390 /*
3391 * First, go over providers without handling EBR or BSDLabel
3392 * partitions for case when looking provider is child
3393 * of the givng geom, to reduce searching time
3394 */
3395 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3396 {
3397 if (RTStrCmp(pProvider->lg_name, pszProviderName) == 0)
3398 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize);
3399 }
3400 /*
3401 * No provider found. Go over the parent geom again
3402 * and make recursions if geom represents EBR or BSDLabel.
3403 * In this case given parent geom contains only EBR or BSDLabel
3404 * partition itself and their own partitions are in the separate
3405 * geoms. Also, partition offsets are relative to geom, so
3406 * we have to add offset from child provider with parent geoms
3407 * provider
3408 */
3409 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3410 {
3411 uint64_t cbOffset = 0;
3412 uint64_t cbSize = 0;
3413 rc = vmdkReadPartitionsParamsFromProvider(pProvider, &cbOffset, &cbSize);
3414 if (RT_FAILURE(rc))
3415 return rc;
3416 uint64_t cbProviderOffset = 0;
3417 uint64_t cbProviderSize = 0;
3418 rc = vmdkFindPartitionParamsByName(pParentClass, pProvider->lg_name, pszProviderName, &cbProviderOffset, &cbProviderSize);
3419 if (RT_SUCCESS(rc))
3420 {
3421 *pcbAbsoluteOffset = cbOffset + cbProviderOffset;
3422 *pcbSize = cbProviderSize;
3423 return rc;
3424 }
3425 }
3426 return VERR_NOT_FOUND;
3427}
3428#endif
3429/**
3430 * Attempts to verify the raw partition path.
3431 *
3432 * We don't want to trust RTDvm and the partition device node morphing blindly.
3433 */
3434static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3435 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3436{
3437 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3438 /*
3439 * Try open the raw partition device.
3440 */
3441 RTFILE hRawPart = NIL_RTFILE;
3442 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
3443 if (RT_FAILURE(rc))
3444 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3445 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
3446 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
3447 /*
3448 * Compare the partition UUID if we can get it.
3449 */
3450#ifdef RT_OS_WINDOWS
3451 DWORD cbReturned;
3452 /* 1. Get the device numbers for both handles, they should have the same disk. */
3453 STORAGE_DEVICE_NUMBER DevNum1;
3454 RT_ZERO(DevNum1);
3455 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3456 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
3457 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3458 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3459 pImage->pszFilename, pszRawDrive, GetLastError());
3460 STORAGE_DEVICE_NUMBER DevNum2;
3461 RT_ZERO(DevNum2);
3462 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3463 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
3464 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3465 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3466 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
3467 if ( RT_SUCCESS(rc)
3468 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
3469 || DevNum1.DeviceType != DevNum2.DeviceType))
3470 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3471 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
3472 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3473 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
3474 if (RT_SUCCESS(rc))
3475 {
3476 /* Get the partitions from the raw drive and match up with the volume info
3477 from RTDvm. The partition number is found in DevNum2. */
3478 DWORD cbNeeded = 0;
3479 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3480 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
3481 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
3482 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
3483 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
3484 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
3485 if (pLayout)
3486 {
3487 cbReturned = 0;
3488 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3489 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
3490 {
3491 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
3492 unsigned iEntry = 0;
3493 while ( iEntry < pLayout->PartitionCount
3494 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
3495 iEntry++;
3496 if (iEntry < pLayout->PartitionCount)
3497 {
3498 /* Compare the basics */
3499 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
3500 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
3501 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3502 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
3503 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3504 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
3505 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
3506 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3507 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
3508 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3509 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
3510 /** @todo We could compare the MBR type, GPT type and ID. */
3511 RT_NOREF(hVol);
3512 }
3513 else
3514 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3515 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
3516 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3517 DevNum2.PartitionNumber, pLayout->PartitionCount);
3518# ifndef LOG_ENABLED
3519 if (RT_FAILURE(rc))
3520# endif
3521 {
3522 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
3523 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
3524 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
3525 {
3526 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
3527 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
3528 pEntry->PartitionStyle, pEntry->RewritePartition));
3529 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
3530 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
3531 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
3532 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
3533 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
3534 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
3535 else
3536 LogRel(("\n"));
3537 }
3538 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
3539 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
3540 }
3541 }
3542 else
3543 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3544 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
3545 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
3546 RTMemTmpFree(pLayout);
3547 }
3548 else
3549 rc = VERR_NO_TMP_MEMORY;
3550 }
3551#elif defined(RT_OS_LINUX)
3552 RT_NOREF(hVol);
3553 /* Stat the two devices first to get their device numbers. (We probably
3554 could make some assumptions here about the major & minor number assignments
3555 for legacy nodes, but it doesn't hold up for nvme, so we'll skip that.) */
3556 struct stat StDrive, StPart;
3557 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
3558 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3559 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3560 else if (fstat((int)RTFileToNative(hRawPart), &StPart) != 0)
3561 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3562 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pPartDesc->pszRawDevice, errno);
3563 else
3564 {
3565 /* Scan the directories immediately under /sys/block/ for one with a
3566 'dev' file matching the drive's device number: */
3567 char szSysPath[RTPATH_MAX];
3568 rc = RTLinuxConstructPath(szSysPath, sizeof(szSysPath), "block/");
3569 AssertRCReturn(rc, rc); /* this shall not fail */
3570 if (RTDirExists(szSysPath))
3571 {
3572 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive);
3573 /* Now, scan the directories under that again for a partition device
3574 matching the hRawPart device's number: */
3575 if (RT_SUCCESS(rc))
3576 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice);
3577 /* Having found the /sys/block/device/partition/ path, we can finally
3578 read the partition attributes and compare with hVol. */
3579 if (RT_SUCCESS(rc))
3580 {
3581 /* partition number: */
3582 int64_t iLnxPartition = 0;
3583 rc = RTLinuxSysFsReadIntFile(10, &iLnxPartition, "%s/partition", szSysPath);
3584 if (RT_SUCCESS(rc) && iLnxPartition != idxPartition)
3585 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3586 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Partition number %RI64, expected %RU32"),
3587 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition);
3588 /* else: ignore failure? */
3589 /* start offset: */
3590 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */
3591 if (RT_SUCCESS(rc))
3592 {
3593 int64_t offLnxStart = -1;
3594 rc = RTLinuxSysFsReadIntFile(10, &offLnxStart, "%s/start", szSysPath);
3595 offLnxStart *= cbLnxSector;
3596 if (RT_SUCCESS(rc) && offLnxStart != (int64_t)pPartDesc->offStartInVDisk)
3597 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3598 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
3599 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, offLnxStart, pPartDesc->offStartInVDisk);
3600 /* else: ignore failure? */
3601 }
3602 /* the size: */
3603 if (RT_SUCCESS(rc))
3604 {
3605 int64_t cbLnxData = -1;
3606 rc = RTLinuxSysFsReadIntFile(10, &cbLnxData, "%s/size", szSysPath);
3607 cbLnxData *= cbLnxSector;
3608 if (RT_SUCCESS(rc) && cbLnxData != (int64_t)pPartDesc->cbData)
3609 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3610 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
3611 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbLnxData, pPartDesc->cbData);
3612 /* else: ignore failure? */
3613 }
3614 }
3615 }
3616 /* else: We've got nothing to work on, so only do content comparison. */
3617 }
3618#elif defined(RT_OS_FREEBSD)
3619 char szDriveDevName[256];
3620 char* pszDevName = fdevname_r(RTFileToNative(hRawDrive), szDriveDevName, 256);
3621 if (pszDevName == NULL)
3622 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3623 N_("VMDK: Image path: '%s'. '%s' is not a drive path"), pImage->pszFilename, pszRawDrive);
3624 char szPartDevName[256];
3625 if (RT_SUCCESS(rc))
3626 {
3627 pszDevName = fdevname_r(RTFileToNative(hRawPart), szPartDevName, 256);
3628 if (pszDevName == NULL)
3629 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3630 N_("VMDK: Image path: '%s'. '%s' is not a partition path"), pImage->pszFilename, pPartDesc->pszRawDevice);
3631 }
3632 if (RT_SUCCESS(rc))
3633 {
3634 gmesh geomMesh;
3635 int err = geom_gettree(&geomMesh);
3636 if (err == 0)
3637 {
3638 /* Find root class containg partitions info */
3639 gclass* pPartClass;
3640 LIST_FOREACH(pPartClass, &geomMesh.lg_class, lg_class)
3641 {
3642 if (RTStrCmp(pPartClass->lg_name, "PART") == 0)
3643 break;
3644 }
3645 if (pPartClass == NULL || RTStrCmp(pPartClass->lg_name, "PART") != 0)
3646 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS,
3647 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename);
3648 if (RT_SUCCESS(rc))
3649 {
3650 /* Find provider representing partition device */
3651 uint64_t cbOffset;
3652 uint64_t cbSize;
3653 rc = vmdkFindPartitionParamsByName(pPartClass, szDriveDevName, szPartDevName, &cbOffset, &cbSize);
3654 if (RT_SUCCESS(rc))
3655 {
3656 if (cbOffset != pPartDesc->offStartInVDisk)
3657 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3658 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
3659 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3660 if (cbSize != pPartDesc->cbData)
3661 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3662 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
3663 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3664 }
3665 else
3666 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3667 N_("VMDK: Image path: '%s'. Error getting geom provider for the partition '%s' of the drive '%s' in the GEOM tree: %Rrc"),
3668 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc);
3669 }
3670 geom_deletetree(&geomMesh);
3671 }
3672 else
3673 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(err), RT_SRC_POS,
3674 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err);
3675 }
3676#elif defined(RT_OS_SOLARIS)
3677 RT_NOREF(hVol);
3678 dk_cinfo dkiDriveInfo;
3679 dk_cinfo dkiPartInfo;
3680 if (ioctl(RTFileToNative(hRawDrive), DKIOCINFO, (caddr_t)&dkiDriveInfo) == -1)
3681 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3682 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3683 else if (ioctl(RTFileToNative(hRawPart), DKIOCINFO, (caddr_t)&dkiPartInfo) == -1)
3684 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3685 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3686 else if ( dkiDriveInfo.dki_ctype != dkiPartInfo.dki_ctype
3687 || dkiDriveInfo.dki_cnum != dkiPartInfo.dki_cnum
3688 || dkiDriveInfo.dki_addr != dkiPartInfo.dki_addr
3689 || dkiDriveInfo.dki_unit != dkiPartInfo.dki_unit
3690 || dkiDriveInfo.dki_slave != dkiPartInfo.dki_slave)
3691 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3692 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x)"),
3693 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3694 dkiDriveInfo.dki_ctype, dkiPartInfo.dki_ctype, dkiDriveInfo.dki_cnum, dkiPartInfo.dki_cnum,
3695 dkiDriveInfo.dki_addr, dkiPartInfo.dki_addr, dkiDriveInfo.dki_unit, dkiPartInfo.dki_unit,
3696 dkiDriveInfo.dki_slave, dkiPartInfo.dki_slave);
3697 else
3698 {
3699 uint64_t cbOffset = 0;
3700 uint64_t cbSize = 0;
3701 dk_gpt *pEfi = NULL;
3702 int idxEfiPart = efi_alloc_and_read(RTFileToNative(hRawPart), &pEfi);
3703 if (idxEfiPart >= 0)
3704 {
3705 if ((uint32_t)dkiPartInfo.dki_partition + 1 == idxPartition)
3706 {
3707 cbOffset = pEfi->efi_parts[idxEfiPart].p_start * pEfi->efi_lbasize;
3708 cbSize = pEfi->efi_parts[idxEfiPart].p_size * pEfi->efi_lbasize;
3709 }
3710 else
3711 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3712 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
3713 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3714 idxPartition, (uint32_t)dkiPartInfo.dki_partition + 1);
3715 efi_free(pEfi);
3716 }
3717 else
3718 {
3719 /*
3720 * Manual says the efi_alloc_and_read returns VT_EINVAL if no EFI partition table found.
3721 * Actually, the function returns any error, e.g. VT_ERROR. Thus, we are not sure, is it
3722 * real error or just no EFI table found. Therefore, let's try to obtain partition info
3723 * using another way. If there is an error, it returns errno which will be handled below.
3724 */
3725 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition;
3726 if (numPartition > NDKMAP)
3727 numPartition -= NDKMAP;
3728 if (numPartition != idxPartition)
3729 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3730 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
3731 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3732 idxPartition, numPartition);
3733 else
3734 {
3735 dk_minfo_ext mediaInfo;
3736 if (ioctl(RTFileToNative(hRawPart), DKIOCGMEDIAINFOEXT, (caddr_t)&mediaInfo) == -1)
3737 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3738 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
3739 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3740 else
3741 {
3742 extpart_info extPartInfo;
3743 if (ioctl(RTFileToNative(hRawPart), DKIOCEXTPARTINFO, (caddr_t)&extPartInfo) != -1)
3744 {
3745 cbOffset = (uint64_t)extPartInfo.p_start * mediaInfo.dki_lbsize;
3746 cbSize = (uint64_t)extPartInfo.p_length * mediaInfo.dki_lbsize;
3747 }
3748 else
3749 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3750 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
3751 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3752 }
3753 }
3754 }
3755 if (RT_SUCCESS(rc) && cbOffset != pPartDesc->offStartInVDisk)
3756 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3757 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
3758 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3759 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData)
3760 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3761 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
3762 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3763 }
3764#else
3765 RT_NOREF(hVol); /* PORTME */
3766#endif
3767 if (RT_SUCCESS(rc))
3768 {
3769 /*
3770 * Compare the first 32 sectors of the partition.
3771 *
3772 * This might not be conclusive, but for partitions formatted with the more
3773 * common file systems it should be as they have a superblock copy at or near
3774 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
3775 */
3776 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
3777 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
3778 if (pbSector1 != NULL)
3779 {
3780 uint8_t *pbSector2 = pbSector1 + cbToCompare;
3781 /* Do the comparing, we repeat if it fails and the data might be volatile. */
3782 uint64_t uPrevCrc1 = 0;
3783 uint64_t uPrevCrc2 = 0;
3784 uint32_t cStable = 0;
3785 for (unsigned iTry = 0; iTry < 256; iTry++)
3786 {
3787 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
3788 if (RT_SUCCESS(rc))
3789 {
3790 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
3791 if (RT_SUCCESS(rc))
3792 {
3793 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
3794 {
3795 rc = VERR_MISMATCH;
3796 /* Do data stability checks before repeating: */
3797 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
3798 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
3799 if ( uPrevCrc1 != uCrc1
3800 || uPrevCrc2 != uCrc2)
3801 cStable = 0;
3802 else if (++cStable > 4)
3803 break;
3804 uPrevCrc1 = uCrc1;
3805 uPrevCrc2 = uCrc2;
3806 continue;
3807 }
3808 rc = VINF_SUCCESS;
3809 }
3810 else
3811 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3812 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
3813 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
3814 }
3815 else
3816 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3817 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
3818 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
3819 break;
3820 }
3821 if (rc == VERR_MISMATCH)
3822 {
3823 /* Find the first mismatching bytes: */
3824 size_t offMissmatch = 0;
3825 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
3826 offMissmatch++;
3827 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
3828 if (cStable > 0)
3829 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3830 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
3831 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
3832 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
3833 else
3834 {
3835 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
3836 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3837 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
3838 rc = -rc;
3839 }
3840 }
3841 RTMemTmpFree(pbSector1);
3842 }
3843 else
3844 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
3845 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
3846 pImage->pszFilename, cbToCompare * 2);
3847 }
3848 RTFileClose(hRawPart);
3849 return rc;
3850}
3851#ifdef RT_OS_WINDOWS
3852/**
3853 * Construct the device name for the given partition number.
3854 */
3855static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
3856 char **ppszRawPartition)
3857{
3858 int rc = VINF_SUCCESS;
3859 DWORD cbReturned = 0;
3860 STORAGE_DEVICE_NUMBER DevNum;
3861 RT_ZERO(DevNum);
3862 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3863 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
3864 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
3865 else
3866 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3867 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3868 pImage->pszFilename, pszRawDrive, GetLastError());
3869 return rc;
3870}
3871#endif /* RT_OS_WINDOWS */
3872/**
3873 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
3874 * 'Partitions' configuration value is present.
3875 *
3876 * @returns VBox status code, error message has been set on failure.
3877 *
3878 * @note Caller is assumed to clean up @a pRawDesc and release
3879 * @a *phVolToRelease.
3880 * @internal
3881 */
3882static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
3883 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
3884 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
3885 PRTDVMVOLUME phVolToRelease)
3886{
3887 *phVolToRelease = NIL_RTDVMVOLUME;
3888 /* Check sanity/understanding. */
3889 Assert(fPartitions);
3890 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
3891 /*
3892 * Allocate on descriptor for each volume up front.
3893 */
3894 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
3895 PVDISKRAWPARTDESC paPartDescs = NULL;
3896 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
3897 AssertRCReturn(rc, rc);
3898 /*
3899 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
3900 */
3901 uint32_t fPartitionsLeft = fPartitions;
3902 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
3903 for (uint32_t i = 0; i < cVolumes; i++)
3904 {
3905 /*
3906 * Get the next/first volume and release the current.
3907 */
3908 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
3909 if (i == 0)
3910 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
3911 else
3912 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
3913 if (RT_FAILURE(rc))
3914 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3915 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
3916 pImage->pszFilename, i, pszRawDrive, rc);
3917 uint32_t cRefs = RTDvmVolumeRelease(hVol);
3918 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
3919 *phVolToRelease = hVol = hVolNext;
3920 /*
3921 * Depending on the fPartitions selector and associated read-only mask,
3922 * the guest either gets read-write or read-only access (bits set)
3923 * or no access (selector bit clear, access directed to the VMDK).
3924 */
3925 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
3926 uint64_t offVolumeEndIgnored = 0;
3927 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
3928 if (RT_FAILURE(rc))
3929 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3930 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
3931 pImage->pszFilename, i, pszRawDrive, rc);
3932 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
3933 /* Note! The index must match IHostDrivePartition::number. */
3934 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
3935 if ( idxPartition < 32
3936 && (fPartitions & RT_BIT_32(idxPartition)))
3937 {
3938 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
3939 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
3940 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
3941 if (!fRelative)
3942 {
3943 /*
3944 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
3945 */
3946 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
3947 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
3948 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
3949 }
3950 else
3951 {
3952 /*
3953 * Relative means access the partition data via the device node for that
3954 * partition, allowing the sysadmin/OS to allow a user access to individual
3955 * partitions without necessarily being able to compromise the host OS.
3956 * Obviously, the creation of the VMDK requires read access to the main
3957 * device node for the drive, but that's a one-time thing and can be done
3958 * by the sysadmin. Here data starts at offset zero in the device node.
3959 */
3960 paPartDescs[i].offStartInDevice = 0;
3961#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
3962 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
3963 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
3964#elif defined(RT_OS_LINUX)
3965 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
3966 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
3967 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
3968#elif defined(RT_OS_WINDOWS)
3969 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
3970 AssertRCReturn(rc, rc);
3971#elif defined(RT_OS_SOLARIS)
3972 if (pRawDesc->enmPartitioningType == VDISKPARTTYPE_MBR)
3973 {
3974 /*
3975 * MBR partitions have device nodes in form /dev/(r)dsk/cXtYdZpK
3976 * where X is the controller,
3977 * Y is target (SCSI device number),
3978 * Z is disk number,
3979 * K is partition number,
3980 * where p0 is the whole disk
3981 * p1-pN are the partitions of the disk
3982 */
3983 const char *pszRawDrivePath = pszRawDrive;
3984 char szDrivePath[RTPATH_MAX];
3985 size_t cbRawDrive = strlen(pszRawDrive);
3986 if ( cbRawDrive > 1 && strcmp(&pszRawDrive[cbRawDrive - 2], "p0") == 0)
3987 {
3988 memcpy(szDrivePath, pszRawDrive, cbRawDrive - 2);
3989 szDrivePath[cbRawDrive - 2] = '\0';
3990 pszRawDrivePath = szDrivePath;
3991 }
3992 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%sp%u", pszRawDrivePath, idxPartition);
3993 }
3994 else /* GPT */
3995 {
3996 /*
3997 * GPT partitions have device nodes in form /dev/(r)dsk/cXtYdZsK
3998 * where X is the controller,
3999 * Y is target (SCSI device number),
4000 * Z is disk number,
4001 * K is partition number, zero based. Can be only from 0 to 6.
4002 * Thus, only partitions numbered 0 through 6 have device nodes.
4003 */
4004 if (idxPartition > 7)
4005 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4006 N_("VMDK: Image path: '%s'. the partition #%u on '%s' has no device node and can not be specified with 'Relative' property"),
4007 pImage->pszFilename, idxPartition, pszRawDrive);
4008 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition - 1);
4009 }
4010#else
4011 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
4012#endif
4013 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4014 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
4015 AssertRCReturn(rc, rc);
4016 }
4017 }
4018 else
4019 {
4020 /* Not accessible to the guest. */
4021 paPartDescs[i].offStartInDevice = 0;
4022 paPartDescs[i].pszRawDevice = NULL;
4023 }
4024 } /* for each volume */
4025 RTDvmVolumeRelease(hVol);
4026 *phVolToRelease = NIL_RTDVMVOLUME;
4027 /*
4028 * Check that we found all the partitions the user selected.
4029 */
4030 if (fPartitionsLeft)
4031 {
4032 char szLeft[3 * sizeof(fPartitions) * 8];
4033 size_t cchLeft = 0;
4034 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
4035 if (fPartitionsLeft & RT_BIT_32(i))
4036 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
4037 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4038 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
4039 pImage->pszFilename, pszRawDrive, szLeft);
4040 }
4041 return VINF_SUCCESS;
4042}
4043/**
4044 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
4045 * of the partition tables and associated padding areas when the 'Partitions'
4046 * configuration value is present.
4047 *
4048 * The guest is not allowed access to the partition tables, however it needs
4049 * them to be able to access the drive. So, create descriptors for each of the
4050 * tables and attach the current disk content. vmdkCreateRawImage() will later
4051 * write the content to the VMDK. Any changes the guest later makes to the
4052 * partition tables will then go to the VMDK copy, rather than the host drive.
4053 *
4054 * @returns VBox status code, error message has been set on failure.
4055 *
4056 * @note Caller is assumed to clean up @a pRawDesc
4057 * @internal
4058 */
4059static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4060 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
4061{
4062 /*
4063 * Query the locations.
4064 */
4065 /* Determin how many locations there are: */
4066 size_t cLocations = 0;
4067 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
4068 if (rc != VERR_BUFFER_OVERFLOW)
4069 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4070 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4071 pImage->pszFilename, pszRawDrive, rc);
4072 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
4073 /* We can allocate the partition descriptors here to save an intentation level. */
4074 PVDISKRAWPARTDESC paPartDescs = NULL;
4075 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
4076 AssertRCReturn(rc, rc);
4077 /* Allocate the result table and repeat the location table query: */
4078 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
4079 if (!paLocations)
4080 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
4081 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
4082 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
4083 if (RT_SUCCESS(rc))
4084 {
4085 /*
4086 * Translate them into descriptors.
4087 *
4088 * We restrict the amount of partition alignment padding to 4MiB as more
4089 * will just be a waste of space. The use case for including the padding
4090 * are older boot loaders and boot manager (including one by a team member)
4091 * that put data and code in the 62 sectors between the MBR and the first
4092 * partition (total of 63). Later CHS was abandond and partition started
4093 * being aligned on power of two sector boundraries (typically 64KiB or
4094 * 1MiB depending on the media size).
4095 */
4096 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
4097 {
4098 Assert(paLocations[i].cb > 0);
4099 if (paLocations[i].cb <= _64M)
4100 {
4101 /* Create the partition descriptor entry: */
4102 //paPartDescs[i].pszRawDevice = NULL;
4103 //paPartDescs[i].offStartInDevice = 0;
4104 //paPartDescs[i].uFlags = 0;
4105 paPartDescs[i].offStartInVDisk = paLocations[i].off;
4106 paPartDescs[i].cbData = paLocations[i].cb;
4107 if (paPartDescs[i].cbData < _4M)
4108 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4109 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4110 if (paPartDescs[i].pvPartitionData)
4111 {
4112 /* Read the content from the drive: */
4113 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4114 (size_t)paPartDescs[i].cbData, NULL);
4115 if (RT_SUCCESS(rc))
4116 {
4117 /* Do we have custom boot sector code? */
4118 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4119 {
4120 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4121 Instead we fail as we weren't able to do what the user requested us to do.
4122 Better if the user knows than starts questioning why the guest isn't
4123 booting as expected. */
4124 if (cbBootSector <= paPartDescs[i].cbData)
4125 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4126 else
4127 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4128 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4129 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4130 }
4131 }
4132 else
4133 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4134 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4135 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4136 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4137 }
4138 else
4139 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4140 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4141 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4142 }
4143 else
4144 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4145 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4146 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4147 }
4148 }
4149 else
4150 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4151 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4152 pImage->pszFilename, pszRawDrive, rc);
4153 RTMemFree(paLocations);
4154 return rc;
4155}
4156/**
4157 * Opens the volume manager for the raw drive when in selected-partition mode.
4158 *
4159 * @param pImage The VMDK image (for errors).
4160 * @param hRawDrive The raw drive handle.
4161 * @param pszRawDrive The raw drive device path (for errors).
4162 * @param cbSector The sector size.
4163 * @param phVolMgr Where to return the handle to the volume manager on
4164 * success.
4165 * @returns VBox status code, errors have been reported.
4166 * @internal
4167 */
4168static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4169{
4170 *phVolMgr = NIL_RTDVM;
4171 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4172 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4173 if (RT_FAILURE(rc))
4174 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4175 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4176 pImage->pszFilename, pszRawDrive, rc);
4177 RTDVM hVolMgr = NIL_RTDVM;
4178 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4179 RTVfsFileRelease(hVfsFile);
4180 if (RT_FAILURE(rc))
4181 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4182 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4183 pImage->pszFilename, pszRawDrive, rc);
4184 rc = RTDvmMapOpen(hVolMgr);
4185 if (RT_SUCCESS(rc))
4186 {
4187 *phVolMgr = hVolMgr;
4188 return VINF_SUCCESS;
4189 }
4190 RTDvmRelease(hVolMgr);
4191 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4192 pImage->pszFilename, pszRawDrive, rc);
4193}
4194/**
4195 * Opens the raw drive device and get the sizes for it.
4196 *
4197 * @param pImage The image (for error reporting).
4198 * @param pszRawDrive The device/whatever to open.
4199 * @param phRawDrive Where to return the file handle.
4200 * @param pcbRawDrive Where to return the size.
4201 * @param pcbSector Where to return the sector size.
4202 * @returns IPRT status code, errors have been reported.
4203 * @internal
4204 */
4205static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4206 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4207{
4208 /*
4209 * Open the device for the raw drive.
4210 */
4211 RTFILE hRawDrive = NIL_RTFILE;
4212 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4213 if (RT_FAILURE(rc))
4214 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4215 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4216 pImage->pszFilename, pszRawDrive, rc);
4217 /*
4218 * Get the sector size.
4219 */
4220 uint32_t cbSector = 0;
4221 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4222 if (RT_SUCCESS(rc))
4223 {
4224 /* sanity checks */
4225 if ( cbSector >= 512
4226 && cbSector <= _64K
4227 && RT_IS_POWER_OF_TWO(cbSector))
4228 {
4229 /*
4230 * Get the size.
4231 */
4232 uint64_t cbRawDrive = 0;
4233 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4234 if (RT_SUCCESS(rc))
4235 {
4236 /* Check whether cbSize is actually sensible. */
4237 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4238 {
4239 *phRawDrive = hRawDrive;
4240 *pcbRawDrive = cbRawDrive;
4241 *pcbSector = cbSector;
4242 return VINF_SUCCESS;
4243 }
4244 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4245 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4246 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4247 }
4248 else
4249 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4250 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4251 pImage->pszFilename, pszRawDrive, rc);
4252 }
4253 else
4254 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4255 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4256 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4257 }
4258 else
4259 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4260 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4261 pImage->pszFilename, pszRawDrive, rc);
4262 RTFileClose(hRawDrive);
4263 return rc;
4264}
4265/**
4266 * Reads the raw disk configuration, leaving initalization and cleanup to the
4267 * caller (regardless of return status).
4268 *
4269 * @returns VBox status code, errors properly reported.
4270 * @internal
4271 */
4272static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4273 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4274 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4275 char **ppszFreeMe)
4276{
4277 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4278 if (!pImgCfg)
4279 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4280 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4281 /*
4282 * RawDrive = path
4283 */
4284 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4285 if (RT_FAILURE(rc))
4286 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4287 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4288 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4289 /*
4290 * Partitions=n[r][,...]
4291 */
4292 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4293 *pfPartitions = *pfPartitionsReadOnly = 0;
4294 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4295 if (RT_SUCCESS(rc))
4296 {
4297 char *psz = *ppszFreeMe;
4298 while (*psz != '\0')
4299 {
4300 char *pszNext;
4301 uint32_t u32;
4302 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4303 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4304 rc = -rc;
4305 if (RT_FAILURE(rc))
4306 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4307 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4308 pImage->pszFilename, rc, psz);
4309 if (u32 >= cMaxPartitionBits)
4310 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4311 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4312 pImage->pszFilename, u32, cMaxPartitionBits);
4313 *pfPartitions |= RT_BIT_32(u32);
4314 psz = pszNext;
4315 if (*psz == 'r')
4316 {
4317 *pfPartitionsReadOnly |= RT_BIT_32(u32);
4318 psz++;
4319 }
4320 if (*psz == ',')
4321 psz++;
4322 else if (*psz != '\0')
4323 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4324 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
4325 pImage->pszFilename, psz);
4326 }
4327 RTStrFree(*ppszFreeMe);
4328 *ppszFreeMe = NULL;
4329 }
4330 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4331 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4332 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4333 /*
4334 * BootSector=base64
4335 */
4336 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
4337 if (RT_SUCCESS(rc))
4338 {
4339 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
4340 if (cbBootSector < 0)
4341 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
4342 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
4343 pImage->pszFilename, *ppszRawDrive);
4344 if (cbBootSector == 0)
4345 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4346 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
4347 pImage->pszFilename, *ppszRawDrive);
4348 if (cbBootSector > _4M) /* this is just a preliminary max */
4349 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4350 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
4351 pImage->pszFilename, *ppszRawDrive, cbBootSector);
4352 /* Refuse the boot sector if whole-drive. This used to be done quietly,
4353 however, bird disagrees and thinks the user should be told that what
4354 he/she/it tries to do isn't possible. There should be less head
4355 scratching this way when the guest doesn't do the expected thing. */
4356 if (!*pfPartitions)
4357 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4358 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
4359 pImage->pszFilename, *ppszRawDrive);
4360 *pcbBootSector = (size_t)cbBootSector;
4361 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
4362 if (!*ppvBootSector)
4363 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4364 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
4365 pImage->pszFilename, cbBootSector, *ppszRawDrive);
4366 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
4367 if (RT_FAILURE(rc))
4368 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4369 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
4370 pImage->pszFilename, *ppszRawDrive, rc);
4371 RTStrFree(*ppszFreeMe);
4372 *ppszFreeMe = NULL;
4373 }
4374 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4375 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4376 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4377 /*
4378 * Relative=0/1
4379 */
4380 *pfRelative = false;
4381 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
4382 if (RT_SUCCESS(rc))
4383 {
4384 if (!*pfPartitions && *pfRelative != false)
4385 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4386 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
4387 pImage->pszFilename);
4388#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) && !defined(RT_OS_SOLARIS) /* PORTME */
4389 if (*pfRelative == true)
4390 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4391 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
4392 pImage->pszFilename);
4393#endif
4394 }
4395 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4396 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4397 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4398 else
4399#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
4400 *pfRelative = true;
4401#else
4402 *pfRelative = false;
4403#endif
4404 return VINF_SUCCESS;
4405}
4406/**
4407 * Creates a raw drive (nee disk) descriptor.
4408 *
4409 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
4410 * here much later. That's one of the reasons why we produce a descriptor just
4411 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
4412 *
4413 * @returns VBox status code.
4414 * @param pImage The image.
4415 * @param ppRaw Where to return the raw drive descriptor. Caller must
4416 * free it using vmdkRawDescFree regardless of the status
4417 * code.
4418 * @internal
4419 */
4420static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
4421{
4422 /* Make sure it's NULL. */
4423 *ppRaw = NULL;
4424 /*
4425 * Read the configuration.
4426 */
4427 char *pszRawDrive = NULL;
4428 uint32_t fPartitions = 0; /* zero if whole-drive */
4429 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
4430 void *pvBootSector = NULL;
4431 size_t cbBootSector = 0;
4432 bool fRelative = false;
4433 char *pszFreeMe = NULL; /* lazy bird cleanup. */
4434 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
4435 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
4436 RTStrFree(pszFreeMe);
4437 if (RT_SUCCESS(rc))
4438 {
4439 /*
4440 * Open the device, getting the sector size and drive size.
4441 */
4442 uint64_t cbSize = 0;
4443 uint32_t cbSector = 0;
4444 RTFILE hRawDrive = NIL_RTFILE;
4445 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
4446 if (RT_SUCCESS(rc))
4447 {
4448 /*
4449 * Create the raw-drive descriptor
4450 */
4451 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
4452 if (pRawDesc)
4453 {
4454 pRawDesc->szSignature[0] = 'R';
4455 pRawDesc->szSignature[1] = 'A';
4456 pRawDesc->szSignature[2] = 'W';
4457 //pRawDesc->szSignature[3] = '\0';
4458 if (!fPartitions)
4459 {
4460 /*
4461 * It's simple for when doing the whole drive.
4462 */
4463 pRawDesc->uFlags = VDISKRAW_DISK;
4464 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
4465 }
4466 else
4467 {
4468 /*
4469 * In selected partitions mode we've got a lot more work ahead of us.
4470 */
4471 pRawDesc->uFlags = VDISKRAW_NORMAL;
4472 //pRawDesc->pszRawDisk = NULL;
4473 //pRawDesc->cPartDescs = 0;
4474 //pRawDesc->pPartDescs = NULL;
4475 /* We need to parse the partition map to complete the descriptor: */
4476 RTDVM hVolMgr = NIL_RTDVM;
4477 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
4478 if (RT_SUCCESS(rc))
4479 {
4480 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
4481 if ( enmFormatType == RTDVMFORMATTYPE_MBR
4482 || enmFormatType == RTDVMFORMATTYPE_GPT)
4483 {
4484 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
4485 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
4486 /* Add copies of the partition tables: */
4487 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
4488 pvBootSector, cbBootSector);
4489 if (RT_SUCCESS(rc))
4490 {
4491 /* Add descriptors for the partitions/volumes, indicating which
4492 should be accessible and how to access them: */
4493 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
4494 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
4495 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
4496 RTDvmVolumeRelease(hVolRelease);
4497 /* Finally, sort the partition and check consistency (overlaps, etc): */
4498 if (RT_SUCCESS(rc))
4499 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
4500 }
4501 }
4502 else
4503 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4504 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
4505 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
4506 RTDvmRelease(hVolMgr);
4507 }
4508 }
4509 if (RT_SUCCESS(rc))
4510 {
4511 /*
4512 * We succeeded.
4513 */
4514 *ppRaw = pRawDesc;
4515 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
4516 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
4517 if (pRawDesc->cPartDescs)
4518 {
4519 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
4520 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
4521 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
4522 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
4523 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
4524 }
4525 }
4526 else
4527 vmdkRawDescFree(pRawDesc);
4528 }
4529 else
4530 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4531 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
4532 pImage->pszFilename, sizeof(*pRawDesc));
4533 RTFileClose(hRawDrive);
4534 }
4535 }
4536 RTStrFree(pszRawDrive);
4537 RTMemFree(pvBootSector);
4538 return rc;
4539}
4540/**
4541 * Internal: create VMDK images for raw disk/partition access.
4542 */
4543static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
4544 uint64_t cbSize)
4545{
4546 int rc = VINF_SUCCESS;
4547 PVMDKEXTENT pExtent;
4548 if (pRaw->uFlags & VDISKRAW_DISK)
4549 {
4550 /* Full raw disk access. This requires setting up a descriptor
4551 * file and open the (flat) raw disk. */
4552 rc = vmdkCreateExtents(pImage, 1);
4553 if (RT_FAILURE(rc))
4554 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4555 pExtent = &pImage->pExtents[0];
4556 /* Create raw disk descriptor file. */
4557 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4558 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4559 true /* fCreate */));
4560 if (RT_FAILURE(rc))
4561 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4562 /* Set up basename for extent description. Cannot use StrDup. */
4563 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
4564 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4565 if (!pszBasename)
4566 return VERR_NO_MEMORY;
4567 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
4568 pExtent->pszBasename = pszBasename;
4569 /* For raw disks the full name is identical to the base name. */
4570 pExtent->pszFullname = RTStrDup(pszBasename);
4571 if (!pExtent->pszFullname)
4572 return VERR_NO_MEMORY;
4573 pExtent->enmType = VMDKETYPE_FLAT;
4574 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
4575 pExtent->uSectorOffset = 0;
4576 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4577 pExtent->fMetaDirty = false;
4578 /* Open flat image, the raw disk. */
4579 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4580 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4581 false /* fCreate */));
4582 if (RT_FAILURE(rc))
4583 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
4584 }
4585 else
4586 {
4587 /* Raw partition access. This requires setting up a descriptor
4588 * file, write the partition information to a flat extent and
4589 * open all the (flat) raw disk partitions. */
4590 /* First pass over the partition data areas to determine how many
4591 * extents we need. One data area can require up to 2 extents, as
4592 * it might be necessary to skip over unpartitioned space. */
4593 unsigned cExtents = 0;
4594 uint64_t uStart = 0;
4595 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4596 {
4597 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4598 if (uStart > pPart->offStartInVDisk)
4599 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4600 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
4601 if (uStart < pPart->offStartInVDisk)
4602 cExtents++;
4603 uStart = pPart->offStartInVDisk + pPart->cbData;
4604 cExtents++;
4605 }
4606 /* Another extent for filling up the rest of the image. */
4607 if (uStart != cbSize)
4608 cExtents++;
4609 rc = vmdkCreateExtents(pImage, cExtents);
4610 if (RT_FAILURE(rc))
4611 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4612 /* Create raw partition descriptor file. */
4613 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4614 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4615 true /* fCreate */));
4616 if (RT_FAILURE(rc))
4617 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4618 /* Create base filename for the partition table extent. */
4619 /** @todo remove fixed buffer without creating memory leaks. */
4620 char pszPartition[1024];
4621 const char *pszBase = RTPathFilename(pImage->pszFilename);
4622 const char *pszSuff = RTPathSuffix(pszBase);
4623 if (pszSuff == NULL)
4624 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
4625 char *pszBaseBase = RTStrDup(pszBase);
4626 if (!pszBaseBase)
4627 return VERR_NO_MEMORY;
4628 RTPathStripSuffix(pszBaseBase);
4629 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
4630 pszBaseBase, pszSuff);
4631 RTStrFree(pszBaseBase);
4632 /* Second pass over the partitions, now define all extents. */
4633 uint64_t uPartOffset = 0;
4634 cExtents = 0;
4635 uStart = 0;
4636 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4637 {
4638 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4639 pExtent = &pImage->pExtents[cExtents++];
4640 if (uStart < pPart->offStartInVDisk)
4641 {
4642 pExtent->pszBasename = NULL;
4643 pExtent->pszFullname = NULL;
4644 pExtent->enmType = VMDKETYPE_ZERO;
4645 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
4646 pExtent->uSectorOffset = 0;
4647 pExtent->enmAccess = VMDKACCESS_READWRITE;
4648 pExtent->fMetaDirty = false;
4649 /* go to next extent */
4650 pExtent = &pImage->pExtents[cExtents++];
4651 }
4652 uStart = pPart->offStartInVDisk + pPart->cbData;
4653 if (pPart->pvPartitionData)
4654 {
4655 /* Set up basename for extent description. Can't use StrDup. */
4656 size_t cbBasename = strlen(pszPartition) + 1;
4657 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4658 if (!pszBasename)
4659 return VERR_NO_MEMORY;
4660 memcpy(pszBasename, pszPartition, cbBasename);
4661 pExtent->pszBasename = pszBasename;
4662 /* Set up full name for partition extent. */
4663 char *pszDirname = RTStrDup(pImage->pszFilename);
4664 if (!pszDirname)
4665 return VERR_NO_STR_MEMORY;
4666 RTPathStripFilename(pszDirname);
4667 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
4668 RTStrFree(pszDirname);
4669 if (!pszFullname)
4670 return VERR_NO_STR_MEMORY;
4671 pExtent->pszFullname = pszFullname;
4672 pExtent->enmType = VMDKETYPE_FLAT;
4673 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4674 pExtent->uSectorOffset = uPartOffset;
4675 pExtent->enmAccess = VMDKACCESS_READWRITE;
4676 pExtent->fMetaDirty = false;
4677 /* Create partition table flat image. */
4678 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4679 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4680 true /* fCreate */));
4681 if (RT_FAILURE(rc))
4682 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
4683 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4684 VMDK_SECTOR2BYTE(uPartOffset),
4685 pPart->pvPartitionData,
4686 pPart->cbData);
4687 if (RT_FAILURE(rc))
4688 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
4689 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
4690 }
4691 else
4692 {
4693 if (pPart->pszRawDevice)
4694 {
4695 /* Set up basename for extent descr. Can't use StrDup. */
4696 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
4697 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4698 if (!pszBasename)
4699 return VERR_NO_MEMORY;
4700 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
4701 pExtent->pszBasename = pszBasename;
4702 /* For raw disks full name is identical to base name. */
4703 pExtent->pszFullname = RTStrDup(pszBasename);
4704 if (!pExtent->pszFullname)
4705 return VERR_NO_MEMORY;
4706 pExtent->enmType = VMDKETYPE_FLAT;
4707 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4708 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
4709 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4710 pExtent->fMetaDirty = false;
4711 /* Open flat image, the raw partition. */
4712 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4713 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4714 false /* fCreate */));
4715 if (RT_FAILURE(rc))
4716 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
4717 }
4718 else
4719 {
4720 pExtent->pszBasename = NULL;
4721 pExtent->pszFullname = NULL;
4722 pExtent->enmType = VMDKETYPE_ZERO;
4723 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4724 pExtent->uSectorOffset = 0;
4725 pExtent->enmAccess = VMDKACCESS_READWRITE;
4726 pExtent->fMetaDirty = false;
4727 }
4728 }
4729 }
4730 /* Another extent for filling up the rest of the image. */
4731 if (uStart != cbSize)
4732 {
4733 pExtent = &pImage->pExtents[cExtents++];
4734 pExtent->pszBasename = NULL;
4735 pExtent->pszFullname = NULL;
4736 pExtent->enmType = VMDKETYPE_ZERO;
4737 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
4738 pExtent->uSectorOffset = 0;
4739 pExtent->enmAccess = VMDKACCESS_READWRITE;
4740 pExtent->fMetaDirty = false;
4741 }
4742 }
4743 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4744 (pRaw->uFlags & VDISKRAW_DISK) ?
4745 "fullDevice" : "partitionedDevice");
4746 if (RT_FAILURE(rc))
4747 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4748 return rc;
4749}
4750/**
4751 * Internal: create a regular (i.e. file-backed) VMDK image.
4752 */
4753static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
4754 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
4755 unsigned uPercentStart, unsigned uPercentSpan)
4756{
4757 int rc = VINF_SUCCESS;
4758 unsigned cExtents = 1;
4759 uint64_t cbOffset = 0;
4760 uint64_t cbRemaining = cbSize;
4761 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4762 {
4763 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
4764 /* Do proper extent computation: need one smaller extent if the total
4765 * size isn't evenly divisible by the split size. */
4766 if (cbSize % VMDK_2G_SPLIT_SIZE)
4767 cExtents++;
4768 }
4769 rc = vmdkCreateExtents(pImage, cExtents);
4770 if (RT_FAILURE(rc))
4771 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4772 /* Basename strings needed for constructing the extent names. */
4773 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
4774 AssertPtr(pszBasenameSubstr);
4775 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
4776 /* Create separate descriptor file if necessary. */
4777 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
4778 {
4779 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4780 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4781 true /* fCreate */));
4782 if (RT_FAILURE(rc))
4783 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
4784 }
4785 else
4786 pImage->pFile = NULL;
4787 /* Set up all extents. */
4788 for (unsigned i = 0; i < cExtents; i++)
4789 {
4790 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4791 uint64_t cbExtent = cbRemaining;
4792 /* Set up fullname/basename for extent description. Cannot use StrDup
4793 * for basename, as it is not guaranteed that the memory can be freed
4794 * with RTMemTmpFree, which must be used as in other code paths
4795 * StrDup is not usable. */
4796 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4797 {
4798 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
4799 if (!pszBasename)
4800 return VERR_NO_MEMORY;
4801 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
4802 pExtent->pszBasename = pszBasename;
4803 }
4804 else
4805 {
4806 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
4807 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
4808 RTPathStripSuffix(pszBasenameBase);
4809 char *pszTmp;
4810 size_t cbTmp;
4811 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4812 {
4813 if (cExtents == 1)
4814 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
4815 pszBasenameSuff);
4816 else
4817 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
4818 i+1, pszBasenameSuff);
4819 }
4820 else
4821 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
4822 pszBasenameSuff);
4823 RTStrFree(pszBasenameBase);
4824 if (!pszTmp)
4825 return VERR_NO_STR_MEMORY;
4826 cbTmp = strlen(pszTmp) + 1;
4827 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
4828 if (!pszBasename)
4829 {
4830 RTStrFree(pszTmp);
4831 return VERR_NO_MEMORY;
4832 }
4833 memcpy(pszBasename, pszTmp, cbTmp);
4834 RTStrFree(pszTmp);
4835 pExtent->pszBasename = pszBasename;
4836 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4837 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
4838 }
4839 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
4840 if (!pszBasedirectory)
4841 return VERR_NO_STR_MEMORY;
4842 RTPathStripFilename(pszBasedirectory);
4843 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
4844 RTStrFree(pszBasedirectory);
4845 if (!pszFullname)
4846 return VERR_NO_STR_MEMORY;
4847 pExtent->pszFullname = pszFullname;
4848 /* Create file for extent. */
4849 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4850 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4851 true /* fCreate */));
4852 if (RT_FAILURE(rc))
4853 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
4854 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4855 {
4856 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
4857 0 /* fFlags */, pIfProgress,
4858 uPercentStart + cbOffset * uPercentSpan / cbSize,
4859 cbExtent * uPercentSpan / cbSize);
4860 if (RT_FAILURE(rc))
4861 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
4862 }
4863 /* Place descriptor file information (where integrated). */
4864 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4865 {
4866 pExtent->uDescriptorSector = 1;
4867 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
4868 /* The descriptor is part of the (only) extent. */
4869 pExtent->pDescData = pImage->pDescData;
4870 pImage->pDescData = NULL;
4871 }
4872 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4873 {
4874 uint64_t cSectorsPerGDE, cSectorsPerGD;
4875 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
4876 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
4877 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
4878 pExtent->cGTEntries = 512;
4879 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
4880 pExtent->cSectorsPerGDE = cSectorsPerGDE;
4881 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
4882 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
4883 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4884 {
4885 /* The spec says version is 1 for all VMDKs, but the vast
4886 * majority of streamOptimized VMDKs actually contain
4887 * version 3 - so go with the majority. Both are accepted. */
4888 pExtent->uVersion = 3;
4889 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
4890 }
4891 }
4892 else
4893 {
4894 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4895 pExtent->enmType = VMDKETYPE_VMFS;
4896 else
4897 pExtent->enmType = VMDKETYPE_FLAT;
4898 }
4899 pExtent->enmAccess = VMDKACCESS_READWRITE;
4900 pExtent->fUncleanShutdown = true;
4901 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
4902 pExtent->uSectorOffset = 0;
4903 pExtent->fMetaDirty = true;
4904 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4905 {
4906 /* fPreAlloc should never be false because VMware can't use such images. */
4907 rc = vmdkCreateGrainDirectory(pImage, pExtent,
4908 RT_MAX( pExtent->uDescriptorSector
4909 + pExtent->cDescriptorSectors,
4910 1),
4911 true /* fPreAlloc */);
4912 if (RT_FAILURE(rc))
4913 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
4914 }
4915 cbOffset += cbExtent;
4916 if (RT_SUCCESS(rc))
4917 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
4918 cbRemaining -= cbExtent;
4919 }
4920 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4921 {
4922 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
4923 * controller type is set in an image. */
4924 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
4925 if (RT_FAILURE(rc))
4926 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
4927 }
4928 const char *pszDescType = NULL;
4929 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4930 {
4931 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4932 pszDescType = "vmfs";
4933 else
4934 pszDescType = (cExtents == 1)
4935 ? "monolithicFlat" : "twoGbMaxExtentFlat";
4936 }
4937 else
4938 {
4939 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4940 pszDescType = "streamOptimized";
4941 else
4942 {
4943 pszDescType = (cExtents == 1)
4944 ? "monolithicSparse" : "twoGbMaxExtentSparse";
4945 }
4946 }
4947 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4948 pszDescType);
4949 if (RT_FAILURE(rc))
4950 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4951 return rc;
4952}
4953/**
4954 * Internal: Create a real stream optimized VMDK using only linear writes.
4955 */
4956static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
4957{
4958 int rc = vmdkCreateExtents(pImage, 1);
4959 if (RT_FAILURE(rc))
4960 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4961 /* Basename strings needed for constructing the extent names. */
4962 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
4963 AssertPtr(pszBasenameSubstr);
4964 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
4965 /* No separate descriptor file. */
4966 pImage->pFile = NULL;
4967 /* Set up all extents. */
4968 PVMDKEXTENT pExtent = &pImage->pExtents[0];
4969 /* Set up fullname/basename for extent description. Cannot use StrDup
4970 * for basename, as it is not guaranteed that the memory can be freed
4971 * with RTMemTmpFree, which must be used as in other code paths
4972 * StrDup is not usable. */
4973 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
4974 if (!pszBasename)
4975 return VERR_NO_MEMORY;
4976 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
4977 pExtent->pszBasename = pszBasename;
4978 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
4979 RTPathStripFilename(pszBasedirectory);
4980 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
4981 RTStrFree(pszBasedirectory);
4982 if (!pszFullname)
4983 return VERR_NO_STR_MEMORY;
4984 pExtent->pszFullname = pszFullname;
4985 /* Create file for extent. Make it write only, no reading allowed. */
4986 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4987 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4988 true /* fCreate */)
4989 & ~RTFILE_O_READ);
4990 if (RT_FAILURE(rc))
4991 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
4992 /* Place descriptor file information. */
4993 pExtent->uDescriptorSector = 1;
4994 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
4995 /* The descriptor is part of the (only) extent. */
4996 pExtent->pDescData = pImage->pDescData;
4997 pImage->pDescData = NULL;
4998 uint64_t cSectorsPerGDE, cSectorsPerGD;
4999 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5000 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
5001 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5002 pExtent->cGTEntries = 512;
5003 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5004 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5005 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5006 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5007 /* The spec says version is 1 for all VMDKs, but the vast
5008 * majority of streamOptimized VMDKs actually contain
5009 * version 3 - so go with the majority. Both are accepted. */
5010 pExtent->uVersion = 3;
5011 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5012 pExtent->fFooter = true;
5013 pExtent->enmAccess = VMDKACCESS_READONLY;
5014 pExtent->fUncleanShutdown = false;
5015 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5016 pExtent->uSectorOffset = 0;
5017 pExtent->fMetaDirty = true;
5018 /* Create grain directory, without preallocating it straight away. It will
5019 * be constructed on the fly when writing out the data and written when
5020 * closing the image. The end effect is that the full grain directory is
5021 * allocated, which is a requirement of the VMDK specs. */
5022 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
5023 false /* fPreAlloc */);
5024 if (RT_FAILURE(rc))
5025 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5026 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5027 "streamOptimized");
5028 if (RT_FAILURE(rc))
5029 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5030 return rc;
5031}
5032/**
5033 * Initializes the UUID fields in the DDB.
5034 *
5035 * @returns VBox status code.
5036 * @param pImage The VMDK image instance.
5037 */
5038static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5039{
5040 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5041 if (RT_SUCCESS(rc))
5042 {
5043 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5044 if (RT_SUCCESS(rc))
5045 {
5046 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5047 &pImage->ModificationUuid);
5048 if (RT_SUCCESS(rc))
5049 {
5050 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5051 &pImage->ParentModificationUuid);
5052 if (RT_FAILURE(rc))
5053 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5054 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5055 }
5056 else
5057 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5058 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5059 }
5060 else
5061 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5062 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5063 }
5064 else
5065 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5066 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5067 return rc;
5068}
5069/**
5070 * Internal: The actual code for creating any VMDK variant currently in
5071 * existence on hosted environments.
5072 */
5073static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5074 unsigned uImageFlags, const char *pszComment,
5075 PCVDGEOMETRY pPCHSGeometry,
5076 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5077 PVDINTERFACEPROGRESS pIfProgress,
5078 unsigned uPercentStart, unsigned uPercentSpan)
5079{
5080 pImage->uImageFlags = uImageFlags;
5081 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5082 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5083 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5084 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5085 &pImage->Descriptor);
5086 if (RT_SUCCESS(rc))
5087 {
5088 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5089 {
5090 /* Raw disk image (includes raw partition). */
5091 PVDISKRAW pRaw = NULL;
5092 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5093 if (RT_FAILURE(rc))
5094 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could get raw descriptor for '%s'"), pImage->pszFilename);
5095 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5096 vmdkRawDescFree(pRaw);
5097 }
5098 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5099 {
5100 /* Stream optimized sparse image (monolithic). */
5101 rc = vmdkCreateStreamImage(pImage, cbSize);
5102 }
5103 else
5104 {
5105 /* Regular fixed or sparse image (monolithic or split). */
5106 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5107 pIfProgress, uPercentStart,
5108 uPercentSpan * 95 / 100);
5109 }
5110 if (RT_SUCCESS(rc))
5111 {
5112 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5113 pImage->cbSize = cbSize;
5114 for (unsigned i = 0; i < pImage->cExtents; i++)
5115 {
5116 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5117 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5118 pExtent->cNominalSectors, pExtent->enmType,
5119 pExtent->pszBasename, pExtent->uSectorOffset);
5120 if (RT_FAILURE(rc))
5121 {
5122 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5123 break;
5124 }
5125 }
5126 if (RT_SUCCESS(rc))
5127 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5128 if ( RT_SUCCESS(rc)
5129 && pPCHSGeometry->cCylinders != 0
5130 && pPCHSGeometry->cHeads != 0
5131 && pPCHSGeometry->cSectors != 0)
5132 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5133 if ( RT_SUCCESS(rc)
5134 && pLCHSGeometry->cCylinders != 0
5135 && pLCHSGeometry->cHeads != 0
5136 && pLCHSGeometry->cSectors != 0)
5137 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5138 pImage->LCHSGeometry = *pLCHSGeometry;
5139 pImage->PCHSGeometry = *pPCHSGeometry;
5140 pImage->ImageUuid = *pUuid;
5141 RTUuidClear(&pImage->ParentUuid);
5142 RTUuidClear(&pImage->ModificationUuid);
5143 RTUuidClear(&pImage->ParentModificationUuid);
5144 if (RT_SUCCESS(rc))
5145 rc = vmdkCreateImageDdbUuidsInit(pImage);
5146 if (RT_SUCCESS(rc))
5147 rc = vmdkAllocateGrainTableCache(pImage);
5148 if (RT_SUCCESS(rc))
5149 {
5150 rc = vmdkSetImageComment(pImage, pszComment);
5151 if (RT_FAILURE(rc))
5152 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5153 }
5154 if (RT_SUCCESS(rc))
5155 {
5156 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5157 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5158 {
5159 /* streamOptimized is a bit special, we cannot trigger the flush
5160 * until all data has been written. So we write the necessary
5161 * information explicitly. */
5162 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5163 - pImage->Descriptor.aLines[0], 512));
5164 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5165 if (RT_SUCCESS(rc))
5166 {
5167 rc = vmdkWriteDescriptor(pImage, NULL);
5168 if (RT_FAILURE(rc))
5169 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5170 }
5171 else
5172 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5173 }
5174 else
5175 rc = vmdkFlushImage(pImage, NULL);
5176 }
5177 }
5178 }
5179 else
5180 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5181 if (RT_SUCCESS(rc))
5182 {
5183 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5184 pImage->RegionList.fFlags = 0;
5185 pImage->RegionList.cRegions = 1;
5186 pRegion->offRegion = 0; /* Disk start. */
5187 pRegion->cbBlock = 512;
5188 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5189 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5190 pRegion->cbData = 512;
5191 pRegion->cbMetadata = 0;
5192 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5193 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5194 }
5195 else
5196 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5197 return rc;
5198}
5199/**
5200 * Internal: Update image comment.
5201 */
5202static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5203{
5204 char *pszCommentEncoded = NULL;
5205 if (pszComment)
5206 {
5207 pszCommentEncoded = vmdkEncodeString(pszComment);
5208 if (!pszCommentEncoded)
5209 return VERR_NO_MEMORY;
5210 }
5211 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5212 "ddb.comment", pszCommentEncoded);
5213 if (pszCommentEncoded)
5214 RTStrFree(pszCommentEncoded);
5215 if (RT_FAILURE(rc))
5216 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5217 return VINF_SUCCESS;
5218}
5219/**
5220 * Internal. Clear the grain table buffer for real stream optimized writing.
5221 */
5222static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
5223{
5224 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5225 for (uint32_t i = 0; i < cCacheLines; i++)
5226 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
5227 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5228}
5229/**
5230 * Internal. Flush the grain table buffer for real stream optimized writing.
5231 */
5232static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5233 uint32_t uGDEntry)
5234{
5235 int rc = VINF_SUCCESS;
5236 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5237 /* VMware does not write out completely empty grain tables in the case
5238 * of streamOptimized images, which according to my interpretation of
5239 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
5240 * handle it without problems do it the same way and save some bytes. */
5241 bool fAllZero = true;
5242 for (uint32_t i = 0; i < cCacheLines; i++)
5243 {
5244 /* Convert the grain table to little endian in place, as it will not
5245 * be used at all after this function has been called. */
5246 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5247 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5248 if (*pGTTmp)
5249 {
5250 fAllZero = false;
5251 break;
5252 }
5253 if (!fAllZero)
5254 break;
5255 }
5256 if (fAllZero)
5257 return VINF_SUCCESS;
5258 uint64_t uFileOffset = pExtent->uAppendPosition;
5259 if (!uFileOffset)
5260 return VERR_INTERNAL_ERROR;
5261 /* Align to sector, as the previous write could have been any size. */
5262 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5263 /* Grain table marker. */
5264 uint8_t aMarker[512];
5265 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5266 memset(pMarker, '\0', sizeof(aMarker));
5267 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
5268 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
5269 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5270 aMarker, sizeof(aMarker));
5271 AssertRC(rc);
5272 uFileOffset += 512;
5273 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
5274 return VERR_INTERNAL_ERROR;
5275 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5276 for (uint32_t i = 0; i < cCacheLines; i++)
5277 {
5278 /* Convert the grain table to little endian in place, as it will not
5279 * be used at all after this function has been called. */
5280 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5281 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5282 *pGTTmp = RT_H2LE_U32(*pGTTmp);
5283 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5284 &pImage->pGTCache->aGTCache[i].aGTData[0],
5285 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5286 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5287 if (RT_FAILURE(rc))
5288 break;
5289 }
5290 Assert(!(uFileOffset % 512));
5291 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
5292 return rc;
5293}
5294/**
5295 * Internal. Free all allocated space for representing an image, and optionally
5296 * delete the image from disk.
5297 */
5298static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
5299{
5300 int rc = VINF_SUCCESS;
5301 /* Freeing a never allocated image (e.g. because the open failed) is
5302 * not signalled as an error. After all nothing bad happens. */
5303 if (pImage)
5304 {
5305 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5306 {
5307 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5308 {
5309 /* Check if all extents are clean. */
5310 for (unsigned i = 0; i < pImage->cExtents; i++)
5311 {
5312 Assert(!pImage->pExtents[i].fUncleanShutdown);
5313 }
5314 }
5315 else
5316 {
5317 /* Mark all extents as clean. */
5318 for (unsigned i = 0; i < pImage->cExtents; i++)
5319 {
5320 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
5321 && pImage->pExtents[i].fUncleanShutdown)
5322 {
5323 pImage->pExtents[i].fUncleanShutdown = false;
5324 pImage->pExtents[i].fMetaDirty = true;
5325 }
5326 /* From now on it's not safe to append any more data. */
5327 pImage->pExtents[i].uAppendPosition = 0;
5328 }
5329 }
5330 }
5331 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5332 {
5333 /* No need to write any pending data if the file will be deleted
5334 * or if the new file wasn't successfully created. */
5335 if ( !fDelete && pImage->pExtents
5336 && pImage->pExtents[0].cGTEntries
5337 && pImage->pExtents[0].uAppendPosition)
5338 {
5339 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5340 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5341 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5342 AssertRC(rc);
5343 vmdkStreamClearGT(pImage, pExtent);
5344 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
5345 {
5346 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5347 AssertRC(rc);
5348 }
5349 uint64_t uFileOffset = pExtent->uAppendPosition;
5350 if (!uFileOffset)
5351 return VERR_INTERNAL_ERROR;
5352 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5353 /* From now on it's not safe to append any more data. */
5354 pExtent->uAppendPosition = 0;
5355 /* Grain directory marker. */
5356 uint8_t aMarker[512];
5357 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5358 memset(pMarker, '\0', sizeof(aMarker));
5359 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
5360 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
5361 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5362 aMarker, sizeof(aMarker));
5363 AssertRC(rc);
5364 uFileOffset += 512;
5365 /* Write grain directory in little endian style. The array will
5366 * not be used after this, so convert in place. */
5367 uint32_t *pGDTmp = pExtent->pGD;
5368 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
5369 *pGDTmp = RT_H2LE_U32(*pGDTmp);
5370 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5371 uFileOffset, pExtent->pGD,
5372 pExtent->cGDEntries * sizeof(uint32_t));
5373 AssertRC(rc);
5374 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
5375 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
5376 uFileOffset = RT_ALIGN_64( uFileOffset
5377 + pExtent->cGDEntries * sizeof(uint32_t),
5378 512);
5379 /* Footer marker. */
5380 memset(pMarker, '\0', sizeof(aMarker));
5381 pMarker->uSector = VMDK_BYTE2SECTOR(512);
5382 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
5383 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5384 uFileOffset, aMarker, sizeof(aMarker));
5385 AssertRC(rc);
5386 uFileOffset += 512;
5387 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
5388 AssertRC(rc);
5389 uFileOffset += 512;
5390 /* End-of-stream marker. */
5391 memset(pMarker, '\0', sizeof(aMarker));
5392 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5393 uFileOffset, aMarker, sizeof(aMarker));
5394 AssertRC(rc);
5395 }
5396 }
5397 else if (!fDelete && fFlush)
5398 vmdkFlushImage(pImage, NULL);
5399 if (pImage->pExtents != NULL)
5400 {
5401 for (unsigned i = 0 ; i < pImage->cExtents; i++)
5402 {
5403 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
5404 if (RT_SUCCESS(rc))
5405 rc = rc2; /* Propogate any error when closing the file. */
5406 }
5407 RTMemFree(pImage->pExtents);
5408 pImage->pExtents = NULL;
5409 }
5410 pImage->cExtents = 0;
5411 if (pImage->pFile != NULL)
5412 {
5413 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
5414 if (RT_SUCCESS(rc))
5415 rc = rc2; /* Propogate any error when closing the file. */
5416 }
5417 int rc2 = vmdkFileCheckAllClose(pImage);
5418 if (RT_SUCCESS(rc))
5419 rc = rc2; /* Propogate any error when closing the file. */
5420 if (pImage->pGTCache)
5421 {
5422 RTMemFree(pImage->pGTCache);
5423 pImage->pGTCache = NULL;
5424 }
5425 if (pImage->pDescData)
5426 {
5427 RTMemFree(pImage->pDescData);
5428 pImage->pDescData = NULL;
5429 }
5430 }
5431 LogFlowFunc(("returns %Rrc\n", rc));
5432 return rc;
5433}
5434/**
5435 * Internal. Flush image data (and metadata) to disk.
5436 */
5437static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
5438{
5439 PVMDKEXTENT pExtent;
5440 int rc = VINF_SUCCESS;
5441 /* Update descriptor if changed. */
5442 if (pImage->Descriptor.fDirty)
5443 rc = vmdkWriteDescriptor(pImage, pIoCtx);
5444 if (RT_SUCCESS(rc))
5445 {
5446 for (unsigned i = 0; i < pImage->cExtents; i++)
5447 {
5448 pExtent = &pImage->pExtents[i];
5449 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
5450 {
5451 switch (pExtent->enmType)
5452 {
5453 case VMDKETYPE_HOSTED_SPARSE:
5454 if (!pExtent->fFooter)
5455 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
5456 else
5457 {
5458 uint64_t uFileOffset = pExtent->uAppendPosition;
5459 /* Simply skip writing anything if the streamOptimized
5460 * image hasn't been just created. */
5461 if (!uFileOffset)
5462 break;
5463 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5464 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
5465 uFileOffset, pIoCtx);
5466 }
5467 break;
5468 case VMDKETYPE_VMFS:
5469 case VMDKETYPE_FLAT:
5470 /* Nothing to do. */
5471 break;
5472 case VMDKETYPE_ZERO:
5473 default:
5474 AssertMsgFailed(("extent with type %d marked as dirty\n",
5475 pExtent->enmType));
5476 break;
5477 }
5478 }
5479 if (RT_FAILURE(rc))
5480 break;
5481 switch (pExtent->enmType)
5482 {
5483 case VMDKETYPE_HOSTED_SPARSE:
5484 case VMDKETYPE_VMFS:
5485 case VMDKETYPE_FLAT:
5486 /** @todo implement proper path absolute check. */
5487 if ( pExtent->pFile != NULL
5488 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5489 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
5490 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
5491 NULL, NULL);
5492 break;
5493 case VMDKETYPE_ZERO:
5494 /* No need to do anything for this extent. */
5495 break;
5496 default:
5497 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
5498 break;
5499 }
5500 }
5501 }
5502 return rc;
5503}
5504/**
5505 * Internal. Find extent corresponding to the sector number in the disk.
5506 */
5507static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
5508 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
5509{
5510 PVMDKEXTENT pExtent = NULL;
5511 int rc = VINF_SUCCESS;
5512 for (unsigned i = 0; i < pImage->cExtents; i++)
5513 {
5514 if (offSector < pImage->pExtents[i].cNominalSectors)
5515 {
5516 pExtent = &pImage->pExtents[i];
5517 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
5518 break;
5519 }
5520 offSector -= pImage->pExtents[i].cNominalSectors;
5521 }
5522 if (pExtent)
5523 *ppExtent = pExtent;
5524 else
5525 rc = VERR_IO_SECTOR_NOT_FOUND;
5526 return rc;
5527}
5528/**
5529 * Internal. Hash function for placing the grain table hash entries.
5530 */
5531static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
5532 unsigned uExtent)
5533{
5534 /** @todo this hash function is quite simple, maybe use a better one which
5535 * scrambles the bits better. */
5536 return (uSector + uExtent) % pCache->cEntries;
5537}
5538/**
5539 * Internal. Get sector number in the extent file from the relative sector
5540 * number in the extent.
5541 */
5542static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
5543 PVMDKEXTENT pExtent, uint64_t uSector,
5544 uint64_t *puExtentSector)
5545{
5546 PVMDKGTCACHE pCache = pImage->pGTCache;
5547 uint64_t uGDIndex, uGTSector, uGTBlock;
5548 uint32_t uGTHash, uGTBlockIndex;
5549 PVMDKGTCACHEENTRY pGTCacheEntry;
5550 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5551 int rc;
5552 /* For newly created and readonly/sequentially opened streamOptimized
5553 * images this must be a no-op, as the grain directory is not there. */
5554 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5555 && pExtent->uAppendPosition)
5556 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5557 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
5558 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
5559 {
5560 *puExtentSector = 0;
5561 return VINF_SUCCESS;
5562 }
5563 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5564 if (uGDIndex >= pExtent->cGDEntries)
5565 return VERR_OUT_OF_RANGE;
5566 uGTSector = pExtent->pGD[uGDIndex];
5567 if (!uGTSector)
5568 {
5569 /* There is no grain table referenced by this grain directory
5570 * entry. So there is absolutely no data in this area. */
5571 *puExtentSector = 0;
5572 return VINF_SUCCESS;
5573 }
5574 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5575 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5576 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5577 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5578 || pGTCacheEntry->uGTBlock != uGTBlock)
5579 {
5580 /* Cache miss, fetch data from disk. */
5581 PVDMETAXFER pMetaXfer;
5582 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5583 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5584 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
5585 if (RT_FAILURE(rc))
5586 return rc;
5587 /* We can release the metadata transfer immediately. */
5588 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5589 pGTCacheEntry->uExtent = pExtent->uExtent;
5590 pGTCacheEntry->uGTBlock = uGTBlock;
5591 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5592 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5593 }
5594 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5595 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
5596 if (uGrainSector)
5597 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
5598 else
5599 *puExtentSector = 0;
5600 return VINF_SUCCESS;
5601}
5602/**
5603 * Internal. Writes the grain and also if necessary the grain tables.
5604 * Uses the grain table cache as a true grain table.
5605 */
5606static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5607 uint64_t uSector, PVDIOCTX pIoCtx,
5608 uint64_t cbWrite)
5609{
5610 uint32_t uGrain;
5611 uint32_t uGDEntry, uLastGDEntry;
5612 uint32_t cbGrain = 0;
5613 uint32_t uCacheLine, uCacheEntry;
5614 const void *pData;
5615 int rc;
5616 /* Very strict requirements: always write at least one full grain, with
5617 * proper alignment. Everything else would require reading of already
5618 * written data, which we don't support for obvious reasons. The only
5619 * exception is the last grain, and only if the image size specifies
5620 * that only some portion holds data. In any case the write must be
5621 * within the image limits, no "overshoot" allowed. */
5622 if ( cbWrite == 0
5623 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
5624 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
5625 || uSector % pExtent->cSectorsPerGrain
5626 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
5627 return VERR_INVALID_PARAMETER;
5628 /* Clip write range to at most the rest of the grain. */
5629 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
5630 /* Do not allow to go back. */
5631 uGrain = uSector / pExtent->cSectorsPerGrain;
5632 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5633 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
5634 uGDEntry = uGrain / pExtent->cGTEntries;
5635 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5636 if (uGrain < pExtent->uLastGrainAccess)
5637 return VERR_VD_VMDK_INVALID_WRITE;
5638 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
5639 * to allocate something, we also need to detect the situation ourself. */
5640 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
5641 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
5642 return VINF_SUCCESS;
5643 if (uGDEntry != uLastGDEntry)
5644 {
5645 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5646 if (RT_FAILURE(rc))
5647 return rc;
5648 vmdkStreamClearGT(pImage, pExtent);
5649 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
5650 {
5651 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5652 if (RT_FAILURE(rc))
5653 return rc;
5654 }
5655 }
5656 uint64_t uFileOffset;
5657 uFileOffset = pExtent->uAppendPosition;
5658 if (!uFileOffset)
5659 return VERR_INTERNAL_ERROR;
5660 /* Align to sector, as the previous write could have been any size. */
5661 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5662 /* Paranoia check: extent type, grain table buffer presence and
5663 * grain table buffer space. Also grain table entry must be clear. */
5664 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
5665 || !pImage->pGTCache
5666 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
5667 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
5668 return VERR_INTERNAL_ERROR;
5669 /* Update grain table entry. */
5670 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5671 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5672 {
5673 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
5674 memset((char *)pExtent->pvGrain + cbWrite, '\0',
5675 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
5676 pData = pExtent->pvGrain;
5677 }
5678 else
5679 {
5680 RTSGSEG Segment;
5681 unsigned cSegments = 1;
5682 size_t cbSeg = 0;
5683 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
5684 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5685 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5686 pData = Segment.pvSeg;
5687 }
5688 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
5689 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5690 uSector, &cbGrain);
5691 if (RT_FAILURE(rc))
5692 {
5693 pExtent->uGrainSectorAbs = 0;
5694 AssertRC(rc);
5695 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5696 }
5697 pExtent->uLastGrainAccess = uGrain;
5698 pExtent->uAppendPosition += cbGrain;
5699 return rc;
5700}
5701/**
5702 * Internal: Updates the grain table during grain allocation.
5703 */
5704static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
5705 PVMDKGRAINALLOCASYNC pGrainAlloc)
5706{
5707 int rc = VINF_SUCCESS;
5708 PVMDKGTCACHE pCache = pImage->pGTCache;
5709 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5710 uint32_t uGTHash, uGTBlockIndex;
5711 uint64_t uGTSector, uRGTSector, uGTBlock;
5712 uint64_t uSector = pGrainAlloc->uSector;
5713 PVMDKGTCACHEENTRY pGTCacheEntry;
5714 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
5715 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
5716 uGTSector = pGrainAlloc->uGTSector;
5717 uRGTSector = pGrainAlloc->uRGTSector;
5718 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5719 /* Update the grain table (and the cache). */
5720 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5721 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5722 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5723 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5724 || pGTCacheEntry->uGTBlock != uGTBlock)
5725 {
5726 /* Cache miss, fetch data from disk. */
5727 LogFlow(("Cache miss, fetch data from disk\n"));
5728 PVDMETAXFER pMetaXfer = NULL;
5729 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5730 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5731 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5732 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
5733 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5734 {
5735 pGrainAlloc->cIoXfersPending++;
5736 pGrainAlloc->fGTUpdateNeeded = true;
5737 /* Leave early, we will be called again after the read completed. */
5738 LogFlowFunc(("Metadata read in progress, leaving\n"));
5739 return rc;
5740 }
5741 else if (RT_FAILURE(rc))
5742 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5743 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5744 pGTCacheEntry->uExtent = pExtent->uExtent;
5745 pGTCacheEntry->uGTBlock = uGTBlock;
5746 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5747 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5748 }
5749 else
5750 {
5751 /* Cache hit. Convert grain table block back to disk format, otherwise
5752 * the code below will write garbage for all but the updated entry. */
5753 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5754 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5755 }
5756 pGrainAlloc->fGTUpdateNeeded = false;
5757 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5758 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
5759 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
5760 /* Update grain table on disk. */
5761 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5762 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5763 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5764 vmdkAllocGrainComplete, pGrainAlloc);
5765 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5766 pGrainAlloc->cIoXfersPending++;
5767 else if (RT_FAILURE(rc))
5768 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5769 if (pExtent->pRGD)
5770 {
5771 /* Update backup grain table on disk. */
5772 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5773 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5774 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5775 vmdkAllocGrainComplete, pGrainAlloc);
5776 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5777 pGrainAlloc->cIoXfersPending++;
5778 else if (RT_FAILURE(rc))
5779 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5780 }
5781 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5782 return rc;
5783}
5784/**
5785 * Internal - complete the grain allocation by updating disk grain table if required.
5786 */
5787static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
5788{
5789 RT_NOREF1(rcReq);
5790 int rc = VINF_SUCCESS;
5791 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5792 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
5793 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
5794 pBackendData, pIoCtx, pvUser, rcReq));
5795 pGrainAlloc->cIoXfersPending--;
5796 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
5797 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
5798 if (!pGrainAlloc->cIoXfersPending)
5799 {
5800 /* Grain allocation completed. */
5801 RTMemFree(pGrainAlloc);
5802 }
5803 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
5804 return rc;
5805}
5806/**
5807 * Internal. Allocates a new grain table (if necessary).
5808 */
5809static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
5810 uint64_t uSector, uint64_t cbWrite)
5811{
5812 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
5813 uint64_t uGDIndex, uGTSector, uRGTSector;
5814 uint64_t uFileOffset;
5815 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
5816 int rc;
5817 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
5818 pCache, pExtent, pIoCtx, uSector, cbWrite));
5819 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
5820 if (!pGrainAlloc)
5821 return VERR_NO_MEMORY;
5822 pGrainAlloc->pExtent = pExtent;
5823 pGrainAlloc->uSector = uSector;
5824 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5825 if (uGDIndex >= pExtent->cGDEntries)
5826 {
5827 RTMemFree(pGrainAlloc);
5828 return VERR_OUT_OF_RANGE;
5829 }
5830 uGTSector = pExtent->pGD[uGDIndex];
5831 if (pExtent->pRGD)
5832 uRGTSector = pExtent->pRGD[uGDIndex];
5833 else
5834 uRGTSector = 0; /**< avoid compiler warning */
5835 if (!uGTSector)
5836 {
5837 LogFlow(("Allocating new grain table\n"));
5838 /* There is no grain table referenced by this grain directory
5839 * entry. So there is absolutely no data in this area. Allocate
5840 * a new grain table and put the reference to it in the GDs. */
5841 uFileOffset = pExtent->uAppendPosition;
5842 if (!uFileOffset)
5843 {
5844 RTMemFree(pGrainAlloc);
5845 return VERR_INTERNAL_ERROR;
5846 }
5847 Assert(!(uFileOffset % 512));
5848 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5849 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
5850 /* Normally the grain table is preallocated for hosted sparse extents
5851 * that support more than 32 bit sector numbers. So this shouldn't
5852 * ever happen on a valid extent. */
5853 if (uGTSector > UINT32_MAX)
5854 {
5855 RTMemFree(pGrainAlloc);
5856 return VERR_VD_VMDK_INVALID_HEADER;
5857 }
5858 /* Write grain table by writing the required number of grain table
5859 * cache chunks. Allocate memory dynamically here or we flood the
5860 * metadata cache with very small entries. */
5861 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
5862 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
5863 if (!paGTDataTmp)
5864 {
5865 RTMemFree(pGrainAlloc);
5866 return VERR_NO_MEMORY;
5867 }
5868 memset(paGTDataTmp, '\0', cbGTDataTmp);
5869 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5870 VMDK_SECTOR2BYTE(uGTSector),
5871 paGTDataTmp, cbGTDataTmp, pIoCtx,
5872 vmdkAllocGrainComplete, pGrainAlloc);
5873 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5874 pGrainAlloc->cIoXfersPending++;
5875 else if (RT_FAILURE(rc))
5876 {
5877 RTMemTmpFree(paGTDataTmp);
5878 RTMemFree(pGrainAlloc);
5879 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5880 }
5881 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
5882 + cbGTDataTmp, 512);
5883 if (pExtent->pRGD)
5884 {
5885 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5886 uFileOffset = pExtent->uAppendPosition;
5887 if (!uFileOffset)
5888 return VERR_INTERNAL_ERROR;
5889 Assert(!(uFileOffset % 512));
5890 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
5891 /* Normally the redundant grain table is preallocated for hosted
5892 * sparse extents that support more than 32 bit sector numbers. So
5893 * this shouldn't ever happen on a valid extent. */
5894 if (uRGTSector > UINT32_MAX)
5895 {
5896 RTMemTmpFree(paGTDataTmp);
5897 return VERR_VD_VMDK_INVALID_HEADER;
5898 }
5899 /* Write grain table by writing the required number of grain table
5900 * cache chunks. Allocate memory dynamically here or we flood the
5901 * metadata cache with very small entries. */
5902 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5903 VMDK_SECTOR2BYTE(uRGTSector),
5904 paGTDataTmp, cbGTDataTmp, pIoCtx,
5905 vmdkAllocGrainComplete, pGrainAlloc);
5906 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5907 pGrainAlloc->cIoXfersPending++;
5908 else if (RT_FAILURE(rc))
5909 {
5910 RTMemTmpFree(paGTDataTmp);
5911 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
5912 }
5913 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
5914 }
5915 RTMemTmpFree(paGTDataTmp);
5916 /* Update the grain directory on disk (doing it before writing the
5917 * grain table will result in a garbled extent if the operation is
5918 * aborted for some reason. Otherwise the worst that can happen is
5919 * some unused sectors in the extent. */
5920 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
5921 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5922 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
5923 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
5924 vmdkAllocGrainComplete, pGrainAlloc);
5925 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5926 pGrainAlloc->cIoXfersPending++;
5927 else if (RT_FAILURE(rc))
5928 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
5929 if (pExtent->pRGD)
5930 {
5931 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
5932 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5933 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
5934 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
5935 vmdkAllocGrainComplete, pGrainAlloc);
5936 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5937 pGrainAlloc->cIoXfersPending++;
5938 else if (RT_FAILURE(rc))
5939 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
5940 }
5941 /* As the final step update the in-memory copy of the GDs. */
5942 pExtent->pGD[uGDIndex] = uGTSector;
5943 if (pExtent->pRGD)
5944 pExtent->pRGD[uGDIndex] = uRGTSector;
5945 }
5946 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5947 pGrainAlloc->uGTSector = uGTSector;
5948 pGrainAlloc->uRGTSector = uRGTSector;
5949 uFileOffset = pExtent->uAppendPosition;
5950 if (!uFileOffset)
5951 return VERR_INTERNAL_ERROR;
5952 Assert(!(uFileOffset % 512));
5953 pGrainAlloc->uGrainOffset = uFileOffset;
5954 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5955 {
5956 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
5957 ("Accesses to stream optimized images must be synchronous\n"),
5958 VERR_INVALID_STATE);
5959 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5960 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
5961 /* Invalidate cache, just in case some code incorrectly allows mixing
5962 * of reads and writes. Normally shouldn't be needed. */
5963 pExtent->uGrainSectorAbs = 0;
5964 /* Write compressed data block and the markers. */
5965 uint32_t cbGrain = 0;
5966 size_t cbSeg = 0;
5967 RTSGSEG Segment;
5968 unsigned cSegments = 1;
5969 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
5970 &cSegments, cbWrite);
5971 Assert(cbSeg == cbWrite);
5972 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
5973 Segment.pvSeg, cbWrite, uSector, &cbGrain);
5974 if (RT_FAILURE(rc))
5975 {
5976 AssertRC(rc);
5977 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
5978 }
5979 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
5980 pExtent->uAppendPosition += cbGrain;
5981 }
5982 else
5983 {
5984 /* Write the data. Always a full grain, or we're in big trouble. */
5985 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
5986 uFileOffset, pIoCtx, cbWrite,
5987 vmdkAllocGrainComplete, pGrainAlloc);
5988 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5989 pGrainAlloc->cIoXfersPending++;
5990 else if (RT_FAILURE(rc))
5991 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
5992 pExtent->uAppendPosition += cbWrite;
5993 }
5994 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
5995 if (!pGrainAlloc->cIoXfersPending)
5996 {
5997 /* Grain allocation completed. */
5998 RTMemFree(pGrainAlloc);
5999 }
6000 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6001 return rc;
6002}
6003/**
6004 * Internal. Reads the contents by sequentially going over the compressed
6005 * grains (hoping that they are in sequence).
6006 */
6007static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6008 uint64_t uSector, PVDIOCTX pIoCtx,
6009 uint64_t cbRead)
6010{
6011 int rc;
6012 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6013 pImage, pExtent, uSector, pIoCtx, cbRead));
6014 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6015 ("Async I/O not supported for sequential stream optimized images\n"),
6016 VERR_INVALID_STATE);
6017 /* Do not allow to go back. */
6018 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6019 if (uGrain < pExtent->uLastGrainAccess)
6020 return VERR_VD_VMDK_INVALID_STATE;
6021 pExtent->uLastGrainAccess = uGrain;
6022 /* After a previous error do not attempt to recover, as it would need
6023 * seeking (in the general case backwards which is forbidden). */
6024 if (!pExtent->uGrainSectorAbs)
6025 return VERR_VD_VMDK_INVALID_STATE;
6026 /* Check if we need to read something from the image or if what we have
6027 * in the buffer is good to fulfill the request. */
6028 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6029 {
6030 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6031 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6032 /* Get the marker from the next data block - and skip everything which
6033 * is not a compressed grain. If it's a compressed grain which is for
6034 * the requested sector (or after), read it. */
6035 VMDKMARKER Marker;
6036 do
6037 {
6038 RT_ZERO(Marker);
6039 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6040 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6041 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6042 if (RT_FAILURE(rc))
6043 return rc;
6044 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6045 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6046 if (Marker.cbSize == 0)
6047 {
6048 /* A marker for something else than a compressed grain. */
6049 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6050 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6051 + RT_UOFFSETOF(VMDKMARKER, uType),
6052 &Marker.uType, sizeof(Marker.uType));
6053 if (RT_FAILURE(rc))
6054 return rc;
6055 Marker.uType = RT_LE2H_U32(Marker.uType);
6056 switch (Marker.uType)
6057 {
6058 case VMDK_MARKER_EOS:
6059 uGrainSectorAbs++;
6060 /* Read (or mostly skip) to the end of file. Uses the
6061 * Marker (LBA sector) as it is unused anyway. This
6062 * makes sure that really everything is read in the
6063 * success case. If this read fails it means the image
6064 * is truncated, but this is harmless so ignore. */
6065 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6066 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6067 + 511,
6068 &Marker.uSector, 1);
6069 break;
6070 case VMDK_MARKER_GT:
6071 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6072 break;
6073 case VMDK_MARKER_GD:
6074 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6075 break;
6076 case VMDK_MARKER_FOOTER:
6077 uGrainSectorAbs += 2;
6078 break;
6079 case VMDK_MARKER_UNSPECIFIED:
6080 /* Skip over the contents of the unspecified marker
6081 * type 4 which exists in some vSphere created files. */
6082 /** @todo figure out what the payload means. */
6083 uGrainSectorAbs += 1;
6084 break;
6085 default:
6086 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6087 pExtent->uGrainSectorAbs = 0;
6088 return VERR_VD_VMDK_INVALID_STATE;
6089 }
6090 pExtent->cbGrainStreamRead = 0;
6091 }
6092 else
6093 {
6094 /* A compressed grain marker. If it is at/after what we're
6095 * interested in read and decompress data. */
6096 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6097 {
6098 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6099 continue;
6100 }
6101 uint64_t uLBA = 0;
6102 uint32_t cbGrainStreamRead = 0;
6103 rc = vmdkFileInflateSync(pImage, pExtent,
6104 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6105 pExtent->pvGrain,
6106 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6107 &Marker, &uLBA, &cbGrainStreamRead);
6108 if (RT_FAILURE(rc))
6109 {
6110 pExtent->uGrainSectorAbs = 0;
6111 return rc;
6112 }
6113 if ( pExtent->uGrain
6114 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6115 {
6116 pExtent->uGrainSectorAbs = 0;
6117 return VERR_VD_VMDK_INVALID_STATE;
6118 }
6119 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6120 pExtent->cbGrainStreamRead = cbGrainStreamRead;
6121 break;
6122 }
6123 } while (Marker.uType != VMDK_MARKER_EOS);
6124 pExtent->uGrainSectorAbs = uGrainSectorAbs;
6125 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
6126 {
6127 pExtent->uGrain = UINT32_MAX;
6128 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
6129 * the next read would try to get more data, and we're at EOF. */
6130 pExtent->cbGrainStreamRead = 1;
6131 }
6132 }
6133 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
6134 {
6135 /* The next data block we have is not for this area, so just return
6136 * that there is no data. */
6137 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
6138 return VERR_VD_BLOCK_FREE;
6139 }
6140 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
6141 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6142 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
6143 cbRead);
6144 LogFlowFunc(("returns VINF_SUCCESS\n"));
6145 return VINF_SUCCESS;
6146}
6147/**
6148 * Replaces a fragment of a string with the specified string.
6149 *
6150 * @returns Pointer to the allocated UTF-8 string.
6151 * @param pszWhere UTF-8 string to search in.
6152 * @param pszWhat UTF-8 string to search for.
6153 * @param pszByWhat UTF-8 string to replace the found string with.
6154 *
6155 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
6156 * for updating the base name in the descriptor, the second is for
6157 * generating new filenames for extents. This code borked when
6158 * RTPathAbs started correcting the driver letter case on windows,
6159 * when strstr failed because the pExtent->pszFullname was not
6160 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
6161 * this by apply RTPathAbs to the places it wasn't applied.
6162 *
6163 * However, this highlights some undocumented ASSUMPTIONS as well as
6164 * terrible short commings of the approach.
6165 *
6166 * Given the right filename, it may also screw up the descriptor. Take
6167 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
6168 * we'll be asked to replace "Test0" with something, no problem. No,
6169 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
6170 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
6171 * its bum. The descriptor string must be parsed and reconstructed,
6172 * the lazy strstr approach doesn't cut it.
6173 *
6174 * I'm also curious as to what would be the correct escaping of '"' in
6175 * the file name and how that is supposed to be handled, because it
6176 * needs to be or such names must be rejected in several places (maybe
6177 * they are, I didn't check).
6178 *
6179 * When this function is used to replace the start of a path, I think
6180 * the assumption from the prep/setup code is that we kind of knows
6181 * what we're working on (I could be wrong). However, using strstr
6182 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
6183 * Especially on unix systems, weird stuff could happen if someone
6184 * unwittingly tinkers with the prep/setup code. What should really be
6185 * done here is using a new RTPathStartEx function that (via flags)
6186 * allows matching partial final component and returns the length of
6187 * what it matched up (in case it skipped slashes and '.' components).
6188 *
6189 */
6190static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
6191 const char *pszByWhat)
6192{
6193 AssertPtr(pszWhere);
6194 AssertPtr(pszWhat);
6195 AssertPtr(pszByWhat);
6196 const char *pszFoundStr = strstr(pszWhere, pszWhat);
6197 if (!pszFoundStr)
6198 {
6199 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
6200 return NULL;
6201 }
6202 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
6203 char *pszNewStr = RTStrAlloc(cbFinal);
6204 if (pszNewStr)
6205 {
6206 char *pszTmp = pszNewStr;
6207 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
6208 pszTmp += pszFoundStr - pszWhere;
6209 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
6210 pszTmp += strlen(pszByWhat);
6211 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
6212 }
6213 return pszNewStr;
6214}
6215/** @copydoc VDIMAGEBACKEND::pfnProbe */
6216static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
6217 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
6218{
6219 RT_NOREF(enmDesiredType);
6220 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
6221 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
6222 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
6223 int rc = VINF_SUCCESS;
6224 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6225 if (RT_LIKELY(pImage))
6226 {
6227 pImage->pszFilename = pszFilename;
6228 pImage->pFile = NULL;
6229 pImage->pExtents = NULL;
6230 pImage->pFiles = NULL;
6231 pImage->pGTCache = NULL;
6232 pImage->pDescData = NULL;
6233 pImage->pVDIfsDisk = pVDIfsDisk;
6234 pImage->pVDIfsImage = pVDIfsImage;
6235 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
6236 * much as possible in vmdkOpenImage. */
6237 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
6238 vmdkFreeImage(pImage, false, false /*fFlush*/);
6239 RTMemFree(pImage);
6240 if (RT_SUCCESS(rc))
6241 *penmType = VDTYPE_HDD;
6242 }
6243 else
6244 rc = VERR_NO_MEMORY;
6245 LogFlowFunc(("returns %Rrc\n", rc));
6246 return rc;
6247}
6248/** @copydoc VDIMAGEBACKEND::pfnOpen */
6249static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
6250 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6251 VDTYPE enmType, void **ppBackendData)
6252{
6253 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
6254 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
6255 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
6256 int rc;
6257 /* Check open flags. All valid flags are supported. */
6258 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6259 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
6260 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6261 if (RT_LIKELY(pImage))
6262 {
6263 pImage->pszFilename = pszFilename;
6264 pImage->pFile = NULL;
6265 pImage->pExtents = NULL;
6266 pImage->pFiles = NULL;
6267 pImage->pGTCache = NULL;
6268 pImage->pDescData = NULL;
6269 pImage->pVDIfsDisk = pVDIfsDisk;
6270 pImage->pVDIfsImage = pVDIfsImage;
6271 rc = vmdkOpenImage(pImage, uOpenFlags);
6272 if (RT_SUCCESS(rc))
6273 *ppBackendData = pImage;
6274 else
6275 RTMemFree(pImage);
6276 }
6277 else
6278 rc = VERR_NO_MEMORY;
6279 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6280 return rc;
6281}
6282/** @copydoc VDIMAGEBACKEND::pfnCreate */
6283static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
6284 unsigned uImageFlags, const char *pszComment,
6285 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
6286 PCRTUUID pUuid, unsigned uOpenFlags,
6287 unsigned uPercentStart, unsigned uPercentSpan,
6288 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6289 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
6290 void **ppBackendData)
6291{
6292 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
6293 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
6294 int rc;
6295 /* Check the VD container type and image flags. */
6296 if ( enmType != VDTYPE_HDD
6297 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
6298 return VERR_VD_INVALID_TYPE;
6299 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
6300 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
6301 && ( !cbSize
6302 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
6303 return VERR_VD_INVALID_SIZE;
6304 /* Check image flags for invalid combinations. */
6305 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6306 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
6307 return VERR_INVALID_PARAMETER;
6308 /* Check open flags. All valid flags are supported. */
6309 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6310 AssertReturn( VALID_PTR(pszFilename)
6311 && *pszFilename
6312 && VALID_PTR(pPCHSGeometry)
6313 && VALID_PTR(pLCHSGeometry)
6314 && !( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
6315 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
6316 VERR_INVALID_PARAMETER);
6317 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6318 if (RT_LIKELY(pImage))
6319 {
6320 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
6321 pImage->pszFilename = pszFilename;
6322 pImage->pFile = NULL;
6323 pImage->pExtents = NULL;
6324 pImage->pFiles = NULL;
6325 pImage->pGTCache = NULL;
6326 pImage->pDescData = NULL;
6327 pImage->pVDIfsDisk = pVDIfsDisk;
6328 pImage->pVDIfsImage = pVDIfsImage;
6329 /* Descriptors for split images can be pretty large, especially if the
6330 * filename is long. So prepare for the worst, and allocate quite some
6331 * memory for the descriptor in this case. */
6332 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
6333 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
6334 else
6335 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
6336 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
6337 if (RT_LIKELY(pImage->pDescData))
6338 {
6339 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
6340 pPCHSGeometry, pLCHSGeometry, pUuid,
6341 pIfProgress, uPercentStart, uPercentSpan);
6342 if (RT_SUCCESS(rc))
6343 {
6344 /* So far the image is opened in read/write mode. Make sure the
6345 * image is opened in read-only mode if the caller requested that. */
6346 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
6347 {
6348 vmdkFreeImage(pImage, false, true /*fFlush*/);
6349 rc = vmdkOpenImage(pImage, uOpenFlags);
6350 }
6351 if (RT_SUCCESS(rc))
6352 *ppBackendData = pImage;
6353 }
6354 if (RT_FAILURE(rc))
6355 RTMemFree(pImage->pDescData);
6356 }
6357 else
6358 rc = VERR_NO_MEMORY;
6359 if (RT_FAILURE(rc))
6360 RTMemFree(pImage);
6361 }
6362 else
6363 rc = VERR_NO_MEMORY;
6364 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6365 return rc;
6366}
6367/**
6368 * Prepares the state for renaming a VMDK image, setting up the state and allocating
6369 * memory.
6370 *
6371 * @returns VBox status code.
6372 * @param pImage VMDK image instance.
6373 * @param pRenameState The state to initialize.
6374 * @param pszFilename The new filename.
6375 */
6376static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6377{
6378 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
6379 int rc = VINF_SUCCESS;
6380 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
6381 /*
6382 * Allocate an array to store both old and new names of renamed files
6383 * in case we have to roll back the changes. Arrays are initialized
6384 * with zeros. We actually save stuff when and if we change it.
6385 */
6386 pRenameState->cExtents = pImage->cExtents;
6387 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6388 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6389 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
6390 if ( pRenameState->apszOldName
6391 && pRenameState->apszNewName
6392 && pRenameState->apszNewLines)
6393 {
6394 /* Save the descriptor size and position. */
6395 if (pImage->pDescData)
6396 {
6397 /* Separate descriptor file. */
6398 pRenameState->fEmbeddedDesc = false;
6399 }
6400 else
6401 {
6402 /* Embedded descriptor file. */
6403 pRenameState->ExtentCopy = pImage->pExtents[0];
6404 pRenameState->fEmbeddedDesc = true;
6405 }
6406 /* Save the descriptor content. */
6407 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
6408 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6409 {
6410 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
6411 if (!pRenameState->DescriptorCopy.aLines[i])
6412 {
6413 rc = VERR_NO_MEMORY;
6414 break;
6415 }
6416 }
6417 if (RT_SUCCESS(rc))
6418 {
6419 /* Prepare both old and new base names used for string replacement. */
6420 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
6421 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
6422 RTPathStripSuffix(pRenameState->pszNewBaseName);
6423 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
6424 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
6425 RTPathStripSuffix(pRenameState->pszOldBaseName);
6426 /* Prepare both old and new full names used for string replacement.
6427 Note! Must abspath the stuff here, so the strstr weirdness later in
6428 the renaming process get a match against abspath'ed extent paths.
6429 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
6430 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
6431 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
6432 RTPathStripSuffix(pRenameState->pszNewFullName);
6433 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
6434 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
6435 RTPathStripSuffix(pRenameState->pszOldFullName);
6436 /* Save the old name for easy access to the old descriptor file. */
6437 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
6438 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
6439 /* Save old image name. */
6440 pRenameState->pszOldImageName = pImage->pszFilename;
6441 }
6442 }
6443 else
6444 rc = VERR_NO_TMP_MEMORY;
6445 return rc;
6446}
6447/**
6448 * Destroys the given rename state, freeing all allocated memory.
6449 *
6450 * @returns nothing.
6451 * @param pRenameState The rename state to destroy.
6452 */
6453static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
6454{
6455 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6456 if (pRenameState->DescriptorCopy.aLines[i])
6457 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
6458 if (pRenameState->apszOldName)
6459 {
6460 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6461 if (pRenameState->apszOldName[i])
6462 RTStrFree(pRenameState->apszOldName[i]);
6463 RTMemTmpFree(pRenameState->apszOldName);
6464 }
6465 if (pRenameState->apszNewName)
6466 {
6467 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6468 if (pRenameState->apszNewName[i])
6469 RTStrFree(pRenameState->apszNewName[i]);
6470 RTMemTmpFree(pRenameState->apszNewName);
6471 }
6472 if (pRenameState->apszNewLines)
6473 {
6474 for (unsigned i = 0; i < pRenameState->cExtents; i++)
6475 if (pRenameState->apszNewLines[i])
6476 RTStrFree(pRenameState->apszNewLines[i]);
6477 RTMemTmpFree(pRenameState->apszNewLines);
6478 }
6479 if (pRenameState->pszOldDescName)
6480 RTStrFree(pRenameState->pszOldDescName);
6481 if (pRenameState->pszOldBaseName)
6482 RTStrFree(pRenameState->pszOldBaseName);
6483 if (pRenameState->pszNewBaseName)
6484 RTStrFree(pRenameState->pszNewBaseName);
6485 if (pRenameState->pszOldFullName)
6486 RTStrFree(pRenameState->pszOldFullName);
6487 if (pRenameState->pszNewFullName)
6488 RTStrFree(pRenameState->pszNewFullName);
6489}
6490/**
6491 * Rolls back the rename operation to the original state.
6492 *
6493 * @returns VBox status code.
6494 * @param pImage VMDK image instance.
6495 * @param pRenameState The rename state.
6496 */
6497static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
6498{
6499 int rc = VINF_SUCCESS;
6500 if (!pRenameState->fImageFreed)
6501 {
6502 /*
6503 * Some extents may have been closed, close the rest. We will
6504 * re-open the whole thing later.
6505 */
6506 vmdkFreeImage(pImage, false, true /*fFlush*/);
6507 }
6508 /* Rename files back. */
6509 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6510 {
6511 if (pRenameState->apszOldName[i])
6512 {
6513 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
6514 AssertRC(rc);
6515 }
6516 }
6517 /* Restore the old descriptor. */
6518 PVMDKFILE pFile;
6519 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
6520 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
6521 false /* fCreate */));
6522 AssertRC(rc);
6523 if (pRenameState->fEmbeddedDesc)
6524 {
6525 pRenameState->ExtentCopy.pFile = pFile;
6526 pImage->pExtents = &pRenameState->ExtentCopy;
6527 }
6528 else
6529 {
6530 /* Shouldn't be null for separate descriptor.
6531 * There will be no access to the actual content.
6532 */
6533 pImage->pDescData = pRenameState->pszOldDescName;
6534 pImage->pFile = pFile;
6535 }
6536 pImage->Descriptor = pRenameState->DescriptorCopy;
6537 vmdkWriteDescriptor(pImage, NULL);
6538 vmdkFileClose(pImage, &pFile, false);
6539 /* Get rid of the stuff we implanted. */
6540 pImage->pExtents = NULL;
6541 pImage->pFile = NULL;
6542 pImage->pDescData = NULL;
6543 /* Re-open the image back. */
6544 pImage->pszFilename = pRenameState->pszOldImageName;
6545 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6546 return rc;
6547}
6548/**
6549 * Rename worker doing the real work.
6550 *
6551 * @returns VBox status code.
6552 * @param pImage VMDK image instance.
6553 * @param pRenameState The rename state.
6554 * @param pszFilename The new filename.
6555 */
6556static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6557{
6558 int rc = VINF_SUCCESS;
6559 unsigned i, line;
6560 /* Update the descriptor with modified extent names. */
6561 for (i = 0, line = pImage->Descriptor.uFirstExtent;
6562 i < pRenameState->cExtents;
6563 i++, line = pImage->Descriptor.aNextLines[line])
6564 {
6565 /* Update the descriptor. */
6566 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
6567 pRenameState->pszOldBaseName,
6568 pRenameState->pszNewBaseName);
6569 if (!pRenameState->apszNewLines[i])
6570 {
6571 rc = VERR_NO_MEMORY;
6572 break;
6573 }
6574 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
6575 }
6576 if (RT_SUCCESS(rc))
6577 {
6578 /* Make sure the descriptor gets written back. */
6579 pImage->Descriptor.fDirty = true;
6580 /* Flush the descriptor now, in case it is embedded. */
6581 vmdkFlushImage(pImage, NULL);
6582 /* Close and rename/move extents. */
6583 for (i = 0; i < pRenameState->cExtents; i++)
6584 {
6585 PVMDKEXTENT pExtent = &pImage->pExtents[i];
6586 /* Compose new name for the extent. */
6587 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
6588 pRenameState->pszOldFullName,
6589 pRenameState->pszNewFullName);
6590 if (!pRenameState->apszNewName[i])
6591 {
6592 rc = VERR_NO_MEMORY;
6593 break;
6594 }
6595 /* Close the extent file. */
6596 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
6597 if (RT_FAILURE(rc))
6598 break;;
6599 /* Rename the extent file. */
6600 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
6601 if (RT_FAILURE(rc))
6602 break;
6603 /* Remember the old name. */
6604 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
6605 }
6606 if (RT_SUCCESS(rc))
6607 {
6608 /* Release all old stuff. */
6609 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
6610 if (RT_SUCCESS(rc))
6611 {
6612 pRenameState->fImageFreed = true;
6613 /* Last elements of new/old name arrays are intended for
6614 * storing descriptor's names.
6615 */
6616 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
6617 /* Rename the descriptor file if it's separate. */
6618 if (!pRenameState->fEmbeddedDesc)
6619 {
6620 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
6621 if (RT_SUCCESS(rc))
6622 {
6623 /* Save old name only if we may need to change it back. */
6624 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
6625 }
6626 }
6627 /* Update pImage with the new information. */
6628 pImage->pszFilename = pszFilename;
6629 /* Open the new image. */
6630 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6631 }
6632 }
6633 }
6634 return rc;
6635}
6636/** @copydoc VDIMAGEBACKEND::pfnRename */
6637static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
6638{
6639 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
6640 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6641 VMDKRENAMESTATE RenameState;
6642 memset(&RenameState, 0, sizeof(RenameState));
6643 /* Check arguments. */
6644 AssertReturn(( pImage
6645 && VALID_PTR(pszFilename)
6646 && *pszFilename
6647 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)), VERR_INVALID_PARAMETER);
6648 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
6649 if (RT_SUCCESS(rc))
6650 {
6651 /* --- Up to this point we have not done any damage yet. --- */
6652 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
6653 /* Roll back all changes in case of failure. */
6654 if (RT_FAILURE(rc))
6655 {
6656 int rrc = vmdkRenameRollback(pImage, &RenameState);
6657 AssertRC(rrc);
6658 }
6659 }
6660 vmdkRenameStateDestroy(&RenameState);
6661 LogFlowFunc(("returns %Rrc\n", rc));
6662 return rc;
6663}
6664/** @copydoc VDIMAGEBACKEND::pfnClose */
6665static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
6666{
6667 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
6668 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6669 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
6670 RTMemFree(pImage);
6671 LogFlowFunc(("returns %Rrc\n", rc));
6672 return rc;
6673}
6674/** @copydoc VDIMAGEBACKEND::pfnRead */
6675static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
6676 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6677{
6678 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6679 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
6680 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6681 AssertPtr(pImage);
6682 Assert(uOffset % 512 == 0);
6683 Assert(cbToRead % 512 == 0);
6684 AssertReturn((VALID_PTR(pIoCtx) && cbToRead), VERR_INVALID_PARAMETER);
6685 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
6686 /* Find the extent and check access permissions as defined in the extent descriptor. */
6687 PVMDKEXTENT pExtent;
6688 uint64_t uSectorExtentRel;
6689 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6690 &pExtent, &uSectorExtentRel);
6691 if ( RT_SUCCESS(rc)
6692 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
6693 {
6694 /* Clip read range to remain in this extent. */
6695 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6696 /* Handle the read according to the current extent type. */
6697 switch (pExtent->enmType)
6698 {
6699 case VMDKETYPE_HOSTED_SPARSE:
6700 {
6701 uint64_t uSectorExtentAbs;
6702 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
6703 if (RT_FAILURE(rc))
6704 break;
6705 /* Clip read range to at most the rest of the grain. */
6706 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6707 Assert(!(cbToRead % 512));
6708 if (uSectorExtentAbs == 0)
6709 {
6710 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6711 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6712 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6713 rc = VERR_VD_BLOCK_FREE;
6714 else
6715 rc = vmdkStreamReadSequential(pImage, pExtent,
6716 uSectorExtentRel,
6717 pIoCtx, cbToRead);
6718 }
6719 else
6720 {
6721 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6722 {
6723 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6724 ("Async I/O is not supported for stream optimized VMDK's\n"));
6725 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
6726 uSectorExtentAbs -= uSectorInGrain;
6727 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
6728 {
6729 uint64_t uLBA = 0; /* gcc maybe uninitialized */
6730 rc = vmdkFileInflateSync(pImage, pExtent,
6731 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6732 pExtent->pvGrain,
6733 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6734 NULL, &uLBA, NULL);
6735 if (RT_FAILURE(rc))
6736 {
6737 pExtent->uGrainSectorAbs = 0;
6738 break;
6739 }
6740 pExtent->uGrainSectorAbs = uSectorExtentAbs;
6741 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
6742 Assert(uLBA == uSectorExtentRel);
6743 }
6744 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6745 (uint8_t *)pExtent->pvGrain
6746 + VMDK_SECTOR2BYTE(uSectorInGrain),
6747 cbToRead);
6748 }
6749 else
6750 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
6751 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6752 pIoCtx, cbToRead);
6753 }
6754 break;
6755 }
6756 case VMDKETYPE_VMFS:
6757 case VMDKETYPE_FLAT:
6758 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
6759 VMDK_SECTOR2BYTE(uSectorExtentRel),
6760 pIoCtx, cbToRead);
6761 break;
6762 case VMDKETYPE_ZERO:
6763 {
6764 size_t cbSet;
6765 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
6766 Assert(cbSet == cbToRead);
6767 break;
6768 }
6769 }
6770 if (pcbActuallyRead)
6771 *pcbActuallyRead = cbToRead;
6772 }
6773 else if (RT_SUCCESS(rc))
6774 rc = VERR_VD_VMDK_INVALID_STATE;
6775 LogFlowFunc(("returns %Rrc\n", rc));
6776 return rc;
6777}
6778/** @copydoc VDIMAGEBACKEND::pfnWrite */
6779static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
6780 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
6781 size_t *pcbPostRead, unsigned fWrite)
6782{
6783 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
6784 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6785 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6786 int rc;
6787 AssertPtr(pImage);
6788 Assert(uOffset % 512 == 0);
6789 Assert(cbToWrite % 512 == 0);
6790 AssertReturn((VALID_PTR(pIoCtx) && cbToWrite), VERR_INVALID_PARAMETER);
6791 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6792 {
6793 PVMDKEXTENT pExtent;
6794 uint64_t uSectorExtentRel;
6795 uint64_t uSectorExtentAbs;
6796 /* No size check here, will do that later when the extent is located.
6797 * There are sparse images out there which according to the spec are
6798 * invalid, because the total size is not a multiple of the grain size.
6799 * Also for sparse images which are stitched together in odd ways (not at
6800 * grain boundaries, and with the nominal size not being a multiple of the
6801 * grain size), this would prevent writing to the last grain. */
6802 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6803 &pExtent, &uSectorExtentRel);
6804 if (RT_SUCCESS(rc))
6805 {
6806 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
6807 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6808 && !pImage->pExtents[0].uAppendPosition
6809 && pExtent->enmAccess != VMDKACCESS_READONLY))
6810 rc = VERR_VD_VMDK_INVALID_STATE;
6811 else
6812 {
6813 /* Handle the write according to the current extent type. */
6814 switch (pExtent->enmType)
6815 {
6816 case VMDKETYPE_HOSTED_SPARSE:
6817 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
6818 if (RT_SUCCESS(rc))
6819 {
6820 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6821 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
6822 rc = VERR_VD_VMDK_INVALID_WRITE;
6823 else
6824 {
6825 /* Clip write range to at most the rest of the grain. */
6826 cbToWrite = RT_MIN(cbToWrite,
6827 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
6828 - uSectorExtentRel % pExtent->cSectorsPerGrain));
6829 if (uSectorExtentAbs == 0)
6830 {
6831 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6832 {
6833 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6834 {
6835 /* Full block write to a previously unallocated block.
6836 * Check if the caller wants to avoid the automatic alloc. */
6837 if (!(fWrite & VD_WRITE_NO_ALLOC))
6838 {
6839 /* Allocate GT and find out where to store the grain. */
6840 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
6841 uSectorExtentRel, cbToWrite);
6842 }
6843 else
6844 rc = VERR_VD_BLOCK_FREE;
6845 *pcbPreRead = 0;
6846 *pcbPostRead = 0;
6847 }
6848 else
6849 {
6850 /* Clip write range to remain in this extent. */
6851 cbToWrite = RT_MIN(cbToWrite,
6852 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
6853 + pExtent->cNominalSectors - uSectorExtentRel));
6854 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
6855 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
6856 rc = VERR_VD_BLOCK_FREE;
6857 }
6858 }
6859 else
6860 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
6861 pIoCtx, cbToWrite);
6862 }
6863 else
6864 {
6865 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6866 {
6867 /* A partial write to a streamOptimized image is simply
6868 * invalid. It requires rewriting already compressed data
6869 * which is somewhere between expensive and impossible. */
6870 rc = VERR_VD_VMDK_INVALID_STATE;
6871 pExtent->uGrainSectorAbs = 0;
6872 AssertRC(rc);
6873 }
6874 else
6875 {
6876 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
6877 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6878 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6879 pIoCtx, cbToWrite, NULL, NULL);
6880 }
6881 }
6882 }
6883 }
6884 break;
6885 case VMDKETYPE_VMFS:
6886 case VMDKETYPE_FLAT:
6887 /* Clip write range to remain in this extent. */
6888 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6889 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6890 VMDK_SECTOR2BYTE(uSectorExtentRel),
6891 pIoCtx, cbToWrite, NULL, NULL);
6892 break;
6893 case VMDKETYPE_ZERO:
6894 /* Clip write range to remain in this extent. */
6895 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6896 break;
6897 }
6898 }
6899 if (pcbWriteProcess)
6900 *pcbWriteProcess = cbToWrite;
6901 }
6902 }
6903 else
6904 rc = VERR_VD_IMAGE_READ_ONLY;
6905 LogFlowFunc(("returns %Rrc\n", rc));
6906 return rc;
6907}
6908/** @copydoc VDIMAGEBACKEND::pfnFlush */
6909static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
6910{
6911 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6912 return vmdkFlushImage(pImage, pIoCtx);
6913}
6914/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
6915static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
6916{
6917 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6918 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6919 AssertPtrReturn(pImage, 0);
6920 return VMDK_IMAGE_VERSION;
6921}
6922/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
6923static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
6924{
6925 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6926 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6927 uint64_t cb = 0;
6928 AssertPtrReturn(pImage, 0);
6929 if (pImage->pFile != NULL)
6930 {
6931 uint64_t cbFile;
6932 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
6933 if (RT_SUCCESS(rc))
6934 cb += cbFile;
6935 }
6936 for (unsigned i = 0; i < pImage->cExtents; i++)
6937 {
6938 if (pImage->pExtents[i].pFile != NULL)
6939 {
6940 uint64_t cbFile;
6941 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
6942 if (RT_SUCCESS(rc))
6943 cb += cbFile;
6944 }
6945 }
6946 LogFlowFunc(("returns %lld\n", cb));
6947 return cb;
6948}
6949/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
6950static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
6951{
6952 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
6953 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6954 int rc = VINF_SUCCESS;
6955 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6956 if (pImage->PCHSGeometry.cCylinders)
6957 *pPCHSGeometry = pImage->PCHSGeometry;
6958 else
6959 rc = VERR_VD_GEOMETRY_NOT_SET;
6960 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6961 return rc;
6962}
6963/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
6964static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
6965{
6966 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
6967 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6968 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6969 int rc = VINF_SUCCESS;
6970 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6971 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6972 {
6973 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6974 {
6975 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
6976 if (RT_SUCCESS(rc))
6977 pImage->PCHSGeometry = *pPCHSGeometry;
6978 }
6979 else
6980 rc = VERR_NOT_SUPPORTED;
6981 }
6982 else
6983 rc = VERR_VD_IMAGE_READ_ONLY;
6984 LogFlowFunc(("returns %Rrc\n", rc));
6985 return rc;
6986}
6987/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
6988static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
6989{
6990 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
6991 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6992 int rc = VINF_SUCCESS;
6993 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6994 if (pImage->LCHSGeometry.cCylinders)
6995 *pLCHSGeometry = pImage->LCHSGeometry;
6996 else
6997 rc = VERR_VD_GEOMETRY_NOT_SET;
6998 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6999 return rc;
7000}
7001/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7002static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7003{
7004 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7005 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7006 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7007 int rc = VINF_SUCCESS;
7008 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7009 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7010 {
7011 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7012 {
7013 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7014 if (RT_SUCCESS(rc))
7015 pImage->LCHSGeometry = *pLCHSGeometry;
7016 }
7017 else
7018 rc = VERR_NOT_SUPPORTED;
7019 }
7020 else
7021 rc = VERR_VD_IMAGE_READ_ONLY;
7022 LogFlowFunc(("returns %Rrc\n", rc));
7023 return rc;
7024}
7025/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
7026static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
7027{
7028 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
7029 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7030 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
7031 *ppRegionList = &pThis->RegionList;
7032 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
7033 return VINF_SUCCESS;
7034}
7035/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
7036static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
7037{
7038 RT_NOREF1(pRegionList);
7039 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
7040 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7041 AssertPtr(pThis); RT_NOREF(pThis);
7042 /* Nothing to do here. */
7043}
7044/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
7045static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
7046{
7047 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7048 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7049 AssertPtrReturn(pImage, 0);
7050 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
7051 return pImage->uImageFlags;
7052}
7053/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
7054static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
7055{
7056 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7057 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7058 AssertPtrReturn(pImage, 0);
7059 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
7060 return pImage->uOpenFlags;
7061}
7062/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
7063static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
7064{
7065 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
7066 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7067 int rc;
7068 /* Image must be opened and the new flags must be valid. */
7069 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
7070 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
7071 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
7072 rc = VERR_INVALID_PARAMETER;
7073 else
7074 {
7075 /* StreamOptimized images need special treatment: reopen is prohibited. */
7076 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7077 {
7078 if (pImage->uOpenFlags == uOpenFlags)
7079 rc = VINF_SUCCESS;
7080 else
7081 rc = VERR_INVALID_PARAMETER;
7082 }
7083 else
7084 {
7085 /* Implement this operation via reopening the image. */
7086 vmdkFreeImage(pImage, false, true /*fFlush*/);
7087 rc = vmdkOpenImage(pImage, uOpenFlags);
7088 }
7089 }
7090 LogFlowFunc(("returns %Rrc\n", rc));
7091 return rc;
7092}
7093/** @copydoc VDIMAGEBACKEND::pfnGetComment */
7094static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
7095{
7096 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
7097 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7098 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7099 char *pszCommentEncoded = NULL;
7100 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
7101 "ddb.comment", &pszCommentEncoded);
7102 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
7103 {
7104 pszCommentEncoded = NULL;
7105 rc = VINF_SUCCESS;
7106 }
7107 if (RT_SUCCESS(rc))
7108 {
7109 if (pszComment && pszCommentEncoded)
7110 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
7111 else if (pszComment)
7112 *pszComment = '\0';
7113 if (pszCommentEncoded)
7114 RTMemTmpFree(pszCommentEncoded);
7115 }
7116 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
7117 return rc;
7118}
7119/** @copydoc VDIMAGEBACKEND::pfnSetComment */
7120static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
7121{
7122 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
7123 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7124 int rc;
7125 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7126 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7127 {
7128 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7129 rc = vmdkSetImageComment(pImage, pszComment);
7130 else
7131 rc = VERR_NOT_SUPPORTED;
7132 }
7133 else
7134 rc = VERR_VD_IMAGE_READ_ONLY;
7135 LogFlowFunc(("returns %Rrc\n", rc));
7136 return rc;
7137}
7138/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
7139static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
7140{
7141 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7142 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7143 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7144 *pUuid = pImage->ImageUuid;
7145 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7146 return VINF_SUCCESS;
7147}
7148/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
7149static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
7150{
7151 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7152 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7153 int rc = VINF_SUCCESS;
7154 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7155 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7156 {
7157 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7158 {
7159 pImage->ImageUuid = *pUuid;
7160 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7161 VMDK_DDB_IMAGE_UUID, pUuid);
7162 if (RT_FAILURE(rc))
7163 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7164 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
7165 }
7166 else
7167 rc = VERR_NOT_SUPPORTED;
7168 }
7169 else
7170 rc = VERR_VD_IMAGE_READ_ONLY;
7171 LogFlowFunc(("returns %Rrc\n", rc));
7172 return rc;
7173}
7174/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
7175static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
7176{
7177 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7178 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7179 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7180 *pUuid = pImage->ModificationUuid;
7181 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7182 return VINF_SUCCESS;
7183}
7184/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
7185static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
7186{
7187 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7188 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7189 int rc = VINF_SUCCESS;
7190 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7191 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7192 {
7193 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7194 {
7195 /* Only touch the modification uuid if it changed. */
7196 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
7197 {
7198 pImage->ModificationUuid = *pUuid;
7199 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7200 VMDK_DDB_MODIFICATION_UUID, pUuid);
7201 if (RT_FAILURE(rc))
7202 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
7203 }
7204 }
7205 else
7206 rc = VERR_NOT_SUPPORTED;
7207 }
7208 else
7209 rc = VERR_VD_IMAGE_READ_ONLY;
7210 LogFlowFunc(("returns %Rrc\n", rc));
7211 return rc;
7212}
7213/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
7214static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
7215{
7216 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7217 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7218 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7219 *pUuid = pImage->ParentUuid;
7220 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7221 return VINF_SUCCESS;
7222}
7223/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
7224static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
7225{
7226 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7227 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7228 int rc = VINF_SUCCESS;
7229 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7230 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7231 {
7232 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7233 {
7234 pImage->ParentUuid = *pUuid;
7235 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7236 VMDK_DDB_PARENT_UUID, pUuid);
7237 if (RT_FAILURE(rc))
7238 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7239 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7240 }
7241 else
7242 rc = VERR_NOT_SUPPORTED;
7243 }
7244 else
7245 rc = VERR_VD_IMAGE_READ_ONLY;
7246 LogFlowFunc(("returns %Rrc\n", rc));
7247 return rc;
7248}
7249/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
7250static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
7251{
7252 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7253 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7254 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7255 *pUuid = pImage->ParentModificationUuid;
7256 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7257 return VINF_SUCCESS;
7258}
7259/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
7260static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
7261{
7262 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7263 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7264 int rc = VINF_SUCCESS;
7265 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7266 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7267 {
7268 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7269 {
7270 pImage->ParentModificationUuid = *pUuid;
7271 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7272 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
7273 if (RT_FAILURE(rc))
7274 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7275 }
7276 else
7277 rc = VERR_NOT_SUPPORTED;
7278 }
7279 else
7280 rc = VERR_VD_IMAGE_READ_ONLY;
7281 LogFlowFunc(("returns %Rrc\n", rc));
7282 return rc;
7283}
7284/** @copydoc VDIMAGEBACKEND::pfnDump */
7285static DECLCALLBACK(void) vmdkDump(void *pBackendData)
7286{
7287 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7288 AssertPtrReturnVoid(pImage);
7289 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
7290 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
7291 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
7292 VMDK_BYTE2SECTOR(pImage->cbSize));
7293 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
7294 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
7295 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
7296 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
7297}
7298const VDIMAGEBACKEND g_VmdkBackend =
7299{
7300 /* u32Version */
7301 VD_IMGBACKEND_VERSION,
7302 /* pszBackendName */
7303 "VMDK",
7304 /* uBackendCaps */
7305 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
7306 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
7307 | VD_CAP_VFS | VD_CAP_PREFERRED,
7308 /* paFileExtensions */
7309 s_aVmdkFileExtensions,
7310 /* paConfigInfo */
7311 s_aVmdkConfigInfo,
7312 /* pfnProbe */
7313 vmdkProbe,
7314 /* pfnOpen */
7315 vmdkOpen,
7316 /* pfnCreate */
7317 vmdkCreate,
7318 /* pfnRename */
7319 vmdkRename,
7320 /* pfnClose */
7321 vmdkClose,
7322 /* pfnRead */
7323 vmdkRead,
7324 /* pfnWrite */
7325 vmdkWrite,
7326 /* pfnFlush */
7327 vmdkFlush,
7328 /* pfnDiscard */
7329 NULL,
7330 /* pfnGetVersion */
7331 vmdkGetVersion,
7332 /* pfnGetFileSize */
7333 vmdkGetFileSize,
7334 /* pfnGetPCHSGeometry */
7335 vmdkGetPCHSGeometry,
7336 /* pfnSetPCHSGeometry */
7337 vmdkSetPCHSGeometry,
7338 /* pfnGetLCHSGeometry */
7339 vmdkGetLCHSGeometry,
7340 /* pfnSetLCHSGeometry */
7341 vmdkSetLCHSGeometry,
7342 /* pfnQueryRegions */
7343 vmdkQueryRegions,
7344 /* pfnRegionListRelease */
7345 vmdkRegionListRelease,
7346 /* pfnGetImageFlags */
7347 vmdkGetImageFlags,
7348 /* pfnGetOpenFlags */
7349 vmdkGetOpenFlags,
7350 /* pfnSetOpenFlags */
7351 vmdkSetOpenFlags,
7352 /* pfnGetComment */
7353 vmdkGetComment,
7354 /* pfnSetComment */
7355 vmdkSetComment,
7356 /* pfnGetUuid */
7357 vmdkGetUuid,
7358 /* pfnSetUuid */
7359 vmdkSetUuid,
7360 /* pfnGetModificationUuid */
7361 vmdkGetModificationUuid,
7362 /* pfnSetModificationUuid */
7363 vmdkSetModificationUuid,
7364 /* pfnGetParentUuid */
7365 vmdkGetParentUuid,
7366 /* pfnSetParentUuid */
7367 vmdkSetParentUuid,
7368 /* pfnGetParentModificationUuid */
7369 vmdkGetParentModificationUuid,
7370 /* pfnSetParentModificationUuid */
7371 vmdkSetParentModificationUuid,
7372 /* pfnDump */
7373 vmdkDump,
7374 /* pfnGetTimestamp */
7375 NULL,
7376 /* pfnGetParentTimestamp */
7377 NULL,
7378 /* pfnSetParentTimestamp */
7379 NULL,
7380 /* pfnGetParentFilename */
7381 NULL,
7382 /* pfnSetParentFilename */
7383 NULL,
7384 /* pfnComposeLocation */
7385 genericFileComposeLocation,
7386 /* pfnComposeName */
7387 genericFileComposeName,
7388 /* pfnCompact */
7389 NULL,
7390 /* pfnResize */
7391 NULL,
7392 /* pfnRepair */
7393 NULL,
7394 /* pfnTraverseMetadata */
7395 NULL,
7396 /* u32VersionEnd */
7397 VD_IMGBACKEND_VERSION
7398};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette