VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 97086

Last change on this file since 97086 was 96842, checked in by vboxsync, 2 years ago

Storage: VMDK.cpp: scm fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 352.5 KB
Line 
1/* $Id: VMDK.cpp 96842 2022-09-23 10:08:09Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5/*
6 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * SPDX-License-Identifier: GPL-3.0-only
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_VD_VMDK
32#include <VBox/log.h> /* before VBox/vd-ifs.h */
33#include <VBox/vd-plugin.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/alloc.h>
37#include <iprt/base64.h>
38#include <iprt/ctype.h>
39#include <iprt/crc.h>
40#include <iprt/dvm.h>
41#include <iprt/uuid.h>
42#include <iprt/path.h>
43#include <iprt/rand.h>
44#include <iprt/string.h>
45#include <iprt/sort.h>
46#include <iprt/zip.h>
47#include <iprt/asm.h>
48#ifdef RT_OS_WINDOWS
49# include <iprt/utf16.h>
50# include <iprt/uni.h>
51# include <iprt/uni.h>
52# include <iprt/nt/nt-and-windows.h>
53# include <winioctl.h>
54#endif
55#ifdef RT_OS_LINUX
56# include <errno.h>
57# include <sys/stat.h>
58# include <iprt/dir.h>
59# include <iprt/symlink.h>
60# include <iprt/linux/sysfs.h>
61#endif
62#ifdef RT_OS_FREEBSD
63#include <libgeom.h>
64#include <sys/stat.h>
65#include <stdlib.h>
66#endif
67#ifdef RT_OS_SOLARIS
68#include <sys/dkio.h>
69#include <sys/vtoc.h>
70#include <sys/efi_partition.h>
71#include <unistd.h>
72#include <errno.h>
73#endif
74#ifdef RT_OS_DARWIN
75# include <sys/stat.h>
76# include <sys/disk.h>
77# include <errno.h>
78/* The following structure and IOCTLs are defined in znu bsd/sys/disk.h but
79 inside KERNEL ifdefs and thus stripped from the SDK edition of the header.
80 While we could try include the header from the Kernel.framework, it's a lot
81 easier to just add the structure and 4 defines here. */
82typedef struct
83{
84 uint64_t offset;
85 uint64_t length;
86 uint8_t reserved0128[12];
87 dev_t dev;
88} dk_physical_extent_t;
89# define DKIOCGETBASE _IOR( 'd', 73, uint64_t)
90# define DKIOCLOCKPHYSICALEXTENTS _IO( 'd', 81)
91# define DKIOCGETPHYSICALEXTENT _IOWR('d', 82, dk_physical_extent_t)
92# define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83)
93#endif /* RT_OS_DARWIN */
94#include "VDBackends.h"
95
96
97/*********************************************************************************************************************************
98* Constants And Macros, Structures and Typedefs *
99*********************************************************************************************************************************/
100/** Maximum encoded string size (including NUL) we allow for VMDK images.
101 * Deliberately not set high to avoid running out of descriptor space. */
102#define VMDK_ENCODED_COMMENT_MAX 1024
103/** VMDK descriptor DDB entry for PCHS cylinders. */
104#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
105/** VMDK descriptor DDB entry for PCHS heads. */
106#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
107/** VMDK descriptor DDB entry for PCHS sectors. */
108#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
109/** VMDK descriptor DDB entry for LCHS cylinders. */
110#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
111/** VMDK descriptor DDB entry for LCHS heads. */
112#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
113/** VMDK descriptor DDB entry for LCHS sectors. */
114#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
115/** VMDK descriptor DDB entry for image UUID. */
116#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
117/** VMDK descriptor DDB entry for image modification UUID. */
118#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
119/** VMDK descriptor DDB entry for parent image UUID. */
120#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
121/** VMDK descriptor DDB entry for parent image modification UUID. */
122#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
123/** No compression for streamOptimized files. */
124#define VMDK_COMPRESSION_NONE 0
125/** Deflate compression for streamOptimized files. */
126#define VMDK_COMPRESSION_DEFLATE 1
127/** Marker that the actual GD value is stored in the footer. */
128#define VMDK_GD_AT_END 0xffffffffffffffffULL
129/** Marker for end-of-stream in streamOptimized images. */
130#define VMDK_MARKER_EOS 0
131/** Marker for grain table block in streamOptimized images. */
132#define VMDK_MARKER_GT 1
133/** Marker for grain directory block in streamOptimized images. */
134#define VMDK_MARKER_GD 2
135/** Marker for footer in streamOptimized images. */
136#define VMDK_MARKER_FOOTER 3
137/** Marker for unknown purpose in streamOptimized images.
138 * Shows up in very recent images created by vSphere, but only sporadically.
139 * They "forgot" to document that one in the VMDK specification. */
140#define VMDK_MARKER_UNSPECIFIED 4
141/** Dummy marker for "don't check the marker value". */
142#define VMDK_MARKER_IGNORE 0xffffffffU
143/**
144 * Magic number for hosted images created by VMware Workstation 4, VMware
145 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
146 */
147#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
148/** VMDK sector size in bytes. */
149#define VMDK_SECTOR_SIZE 512
150/** Max string buffer size for uint64_t with null term */
151#define UINT64_MAX_BUFF_SIZE 21
152/** Grain directory entry size in bytes */
153#define VMDK_GRAIN_DIR_ENTRY_SIZE 4
154/** Grain table size in bytes */
155#define VMDK_GRAIN_TABLE_SIZE 2048
156/**
157 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
158 * this header is also used for monolithic flat images.
159 */
160#pragma pack(1)
161typedef struct SparseExtentHeader
162{
163 uint32_t magicNumber;
164 uint32_t version;
165 uint32_t flags;
166 uint64_t capacity;
167 uint64_t grainSize;
168 uint64_t descriptorOffset;
169 uint64_t descriptorSize;
170 uint32_t numGTEsPerGT;
171 uint64_t rgdOffset;
172 uint64_t gdOffset;
173 uint64_t overHead;
174 bool uncleanShutdown;
175 char singleEndLineChar;
176 char nonEndLineChar;
177 char doubleEndLineChar1;
178 char doubleEndLineChar2;
179 uint16_t compressAlgorithm;
180 uint8_t pad[433];
181} SparseExtentHeader;
182#pragma pack()
183/** The maximum allowed descriptor size in the extent header in sectors. */
184#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
185/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
186 * divisible by the default grain size (64K) */
187#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
188/** VMDK streamOptimized file format marker. The type field may or may not
189 * be actually valid, but there's always data to read there. */
190#pragma pack(1)
191typedef struct VMDKMARKER
192{
193 uint64_t uSector;
194 uint32_t cbSize;
195 uint32_t uType;
196} VMDKMARKER, *PVMDKMARKER;
197#pragma pack()
198/** Convert sector number/size to byte offset/size. */
199#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
200/** Convert byte offset/size to sector number/size. */
201#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
202/**
203 * VMDK extent type.
204 */
205typedef enum VMDKETYPE
206{
207 /** Hosted sparse extent. */
208 VMDKETYPE_HOSTED_SPARSE = 1,
209 /** Flat extent. */
210 VMDKETYPE_FLAT,
211 /** Zero extent. */
212 VMDKETYPE_ZERO,
213 /** VMFS extent, used by ESX. */
214 VMDKETYPE_VMFS
215} VMDKETYPE, *PVMDKETYPE;
216/**
217 * VMDK access type for a extent.
218 */
219typedef enum VMDKACCESS
220{
221 /** No access allowed. */
222 VMDKACCESS_NOACCESS = 0,
223 /** Read-only access. */
224 VMDKACCESS_READONLY,
225 /** Read-write access. */
226 VMDKACCESS_READWRITE
227} VMDKACCESS, *PVMDKACCESS;
228/** Forward declaration for PVMDKIMAGE. */
229typedef struct VMDKIMAGE *PVMDKIMAGE;
230/**
231 * Extents files entry. Used for opening a particular file only once.
232 */
233typedef struct VMDKFILE
234{
235 /** Pointer to file path. Local copy. */
236 const char *pszFilename;
237 /** Pointer to base name. Local copy. */
238 const char *pszBasename;
239 /** File open flags for consistency checking. */
240 unsigned fOpen;
241 /** Handle for sync/async file abstraction.*/
242 PVDIOSTORAGE pStorage;
243 /** Reference counter. */
244 unsigned uReferences;
245 /** Flag whether the file should be deleted on last close. */
246 bool fDelete;
247 /** Pointer to the image we belong to (for debugging purposes). */
248 PVMDKIMAGE pImage;
249 /** Pointer to next file descriptor. */
250 struct VMDKFILE *pNext;
251 /** Pointer to the previous file descriptor. */
252 struct VMDKFILE *pPrev;
253} VMDKFILE, *PVMDKFILE;
254/**
255 * VMDK extent data structure.
256 */
257typedef struct VMDKEXTENT
258{
259 /** File handle. */
260 PVMDKFILE pFile;
261 /** Base name of the image extent. */
262 const char *pszBasename;
263 /** Full name of the image extent. */
264 const char *pszFullname;
265 /** Number of sectors in this extent. */
266 uint64_t cSectors;
267 /** Number of sectors per block (grain in VMDK speak). */
268 uint64_t cSectorsPerGrain;
269 /** Starting sector number of descriptor. */
270 uint64_t uDescriptorSector;
271 /** Size of descriptor in sectors. */
272 uint64_t cDescriptorSectors;
273 /** Starting sector number of grain directory. */
274 uint64_t uSectorGD;
275 /** Starting sector number of redundant grain directory. */
276 uint64_t uSectorRGD;
277 /** Total number of metadata sectors. */
278 uint64_t cOverheadSectors;
279 /** Nominal size (i.e. as described by the descriptor) of this extent. */
280 uint64_t cNominalSectors;
281 /** Sector offset (i.e. as described by the descriptor) of this extent. */
282 uint64_t uSectorOffset;
283 /** Number of entries in a grain table. */
284 uint32_t cGTEntries;
285 /** Number of sectors reachable via a grain directory entry. */
286 uint32_t cSectorsPerGDE;
287 /** Number of entries in the grain directory. */
288 uint32_t cGDEntries;
289 /** Pointer to the next free sector. Legacy information. Do not use. */
290 uint32_t uFreeSector;
291 /** Number of this extent in the list of images. */
292 uint32_t uExtent;
293 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
294 char *pDescData;
295 /** Pointer to the grain directory. */
296 uint32_t *pGD;
297 /** Pointer to the redundant grain directory. */
298 uint32_t *pRGD;
299 /** VMDK version of this extent. 1=1.0/1.1 */
300 uint32_t uVersion;
301 /** Type of this extent. */
302 VMDKETYPE enmType;
303 /** Access to this extent. */
304 VMDKACCESS enmAccess;
305 /** Flag whether this extent is marked as unclean. */
306 bool fUncleanShutdown;
307 /** Flag whether the metadata in the extent header needs to be updated. */
308 bool fMetaDirty;
309 /** Flag whether there is a footer in this extent. */
310 bool fFooter;
311 /** Compression type for this extent. */
312 uint16_t uCompression;
313 /** Append position for writing new grain. Only for sparse extents. */
314 uint64_t uAppendPosition;
315 /** Last grain which was accessed. Only for streamOptimized extents. */
316 uint32_t uLastGrainAccess;
317 /** Starting sector corresponding to the grain buffer. */
318 uint32_t uGrainSectorAbs;
319 /** Grain number corresponding to the grain buffer. */
320 uint32_t uGrain;
321 /** Actual size of the compressed data, only valid for reading. */
322 uint32_t cbGrainStreamRead;
323 /** Size of compressed grain buffer for streamOptimized extents. */
324 size_t cbCompGrain;
325 /** Compressed grain buffer for streamOptimized extents, with marker. */
326 void *pvCompGrain;
327 /** Decompressed grain buffer for streamOptimized extents. */
328 void *pvGrain;
329 /** Reference to the image in which this extent is used. Do not use this
330 * on a regular basis to avoid passing pImage references to functions
331 * explicitly. */
332 struct VMDKIMAGE *pImage;
333} VMDKEXTENT, *PVMDKEXTENT;
334/**
335 * Grain table cache size. Allocated per image.
336 */
337#define VMDK_GT_CACHE_SIZE 256
338/**
339 * Grain table block size. Smaller than an actual grain table block to allow
340 * more grain table blocks to be cached without having to allocate excessive
341 * amounts of memory for the cache.
342 */
343#define VMDK_GT_CACHELINE_SIZE 128
344/**
345 * Maximum number of lines in a descriptor file. Not worth the effort of
346 * making it variable. Descriptor files are generally very short (~20 lines),
347 * with the exception of sparse files split in 2G chunks, which need for the
348 * maximum size (almost 2T) exactly 1025 lines for the disk database.
349 */
350#define VMDK_DESCRIPTOR_LINES_MAX 1100U
351/**
352 * Parsed descriptor information. Allows easy access and update of the
353 * descriptor (whether separate file or not). Free form text files suck.
354 */
355typedef struct VMDKDESCRIPTOR
356{
357 /** Line number of first entry of the disk descriptor. */
358 unsigned uFirstDesc;
359 /** Line number of first entry in the extent description. */
360 unsigned uFirstExtent;
361 /** Line number of first disk database entry. */
362 unsigned uFirstDDB;
363 /** Total number of lines. */
364 unsigned cLines;
365 /** Total amount of memory available for the descriptor. */
366 size_t cbDescAlloc;
367 /** Set if descriptor has been changed and not yet written to disk. */
368 bool fDirty;
369 /** Array of pointers to the data in the descriptor. */
370 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
371 /** Array of line indices pointing to the next non-comment line. */
372 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
373} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
374/**
375 * Cache entry for translating extent/sector to a sector number in that
376 * extent.
377 */
378typedef struct VMDKGTCACHEENTRY
379{
380 /** Extent number for which this entry is valid. */
381 uint32_t uExtent;
382 /** GT data block number. */
383 uint64_t uGTBlock;
384 /** Data part of the cache entry. */
385 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
386} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
387/**
388 * Cache data structure for blocks of grain table entries. For now this is a
389 * fixed size direct mapping cache, but this should be adapted to the size of
390 * the sparse image and maybe converted to a set-associative cache. The
391 * implementation below implements a write-through cache with write allocate.
392 */
393typedef struct VMDKGTCACHE
394{
395 /** Cache entries. */
396 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
397 /** Number of cache entries (currently unused). */
398 unsigned cEntries;
399} VMDKGTCACHE, *PVMDKGTCACHE;
400/**
401 * Complete VMDK image data structure. Mainly a collection of extents and a few
402 * extra global data fields.
403 */
404typedef struct VMDKIMAGE
405{
406 /** Image name. */
407 const char *pszFilename;
408 /** Descriptor file if applicable. */
409 PVMDKFILE pFile;
410 /** Pointer to the per-disk VD interface list. */
411 PVDINTERFACE pVDIfsDisk;
412 /** Pointer to the per-image VD interface list. */
413 PVDINTERFACE pVDIfsImage;
414 /** Error interface. */
415 PVDINTERFACEERROR pIfError;
416 /** I/O interface. */
417 PVDINTERFACEIOINT pIfIo;
418 /** Pointer to the image extents. */
419 PVMDKEXTENT pExtents;
420 /** Number of image extents. */
421 unsigned cExtents;
422 /** Pointer to the files list, for opening a file referenced multiple
423 * times only once (happens mainly with raw partition access). */
424 PVMDKFILE pFiles;
425 /**
426 * Pointer to an array of segment entries for async I/O.
427 * This is an optimization because the task number to submit is not known
428 * and allocating/freeing an array in the read/write functions every time
429 * is too expensive.
430 */
431 PPDMDATASEG paSegments;
432 /** Entries available in the segments array. */
433 unsigned cSegments;
434 /** Open flags passed by VBoxHD layer. */
435 unsigned uOpenFlags;
436 /** Image flags defined during creation or determined during open. */
437 unsigned uImageFlags;
438 /** Total size of the image. */
439 uint64_t cbSize;
440 /** Physical geometry of this image. */
441 VDGEOMETRY PCHSGeometry;
442 /** Logical geometry of this image. */
443 VDGEOMETRY LCHSGeometry;
444 /** Image UUID. */
445 RTUUID ImageUuid;
446 /** Image modification UUID. */
447 RTUUID ModificationUuid;
448 /** Parent image UUID. */
449 RTUUID ParentUuid;
450 /** Parent image modification UUID. */
451 RTUUID ParentModificationUuid;
452 /** Pointer to grain table cache, if this image contains sparse extents. */
453 PVMDKGTCACHE pGTCache;
454 /** Pointer to the descriptor (NULL if no separate descriptor file). */
455 char *pDescData;
456 /** Allocation size of the descriptor file. */
457 size_t cbDescAlloc;
458 /** Parsed descriptor file content. */
459 VMDKDESCRIPTOR Descriptor;
460 /** The static region list. */
461 VDREGIONLIST RegionList;
462} VMDKIMAGE;
463/** State for the input/output callout of the inflate reader/deflate writer. */
464typedef struct VMDKCOMPRESSIO
465{
466 /* Image this operation relates to. */
467 PVMDKIMAGE pImage;
468 /* Current read position. */
469 ssize_t iOffset;
470 /* Size of the compressed grain buffer (available data). */
471 size_t cbCompGrain;
472 /* Pointer to the compressed grain buffer. */
473 void *pvCompGrain;
474} VMDKCOMPRESSIO;
475/** Tracks async grain allocation. */
476typedef struct VMDKGRAINALLOCASYNC
477{
478 /** Flag whether the allocation failed. */
479 bool fIoErr;
480 /** Current number of transfers pending.
481 * If reached 0 and there is an error the old state is restored. */
482 unsigned cIoXfersPending;
483 /** Sector number */
484 uint64_t uSector;
485 /** Flag whether the grain table needs to be updated. */
486 bool fGTUpdateNeeded;
487 /** Extent the allocation happens. */
488 PVMDKEXTENT pExtent;
489 /** Position of the new grain, required for the grain table update. */
490 uint64_t uGrainOffset;
491 /** Grain table sector. */
492 uint64_t uGTSector;
493 /** Backup grain table sector. */
494 uint64_t uRGTSector;
495} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
496/**
497 * State information for vmdkRename() and helpers.
498 */
499typedef struct VMDKRENAMESTATE
500{
501 /** Array of old filenames. */
502 char **apszOldName;
503 /** Array of new filenames. */
504 char **apszNewName;
505 /** Array of new lines in the extent descriptor. */
506 char **apszNewLines;
507 /** Name of the old descriptor file if not a sparse image. */
508 char *pszOldDescName;
509 /** Flag whether we called vmdkFreeImage(). */
510 bool fImageFreed;
511 /** Flag whther the descriptor is embedded in the image (sparse) or
512 * in a separate file. */
513 bool fEmbeddedDesc;
514 /** Number of extents in the image. */
515 unsigned cExtents;
516 /** New base filename. */
517 char *pszNewBaseName;
518 /** The old base filename. */
519 char *pszOldBaseName;
520 /** New full filename. */
521 char *pszNewFullName;
522 /** Old full filename. */
523 char *pszOldFullName;
524 /** The old image name. */
525 const char *pszOldImageName;
526 /** Copy of the original VMDK descriptor. */
527 VMDKDESCRIPTOR DescriptorCopy;
528 /** Copy of the extent state for sparse images. */
529 VMDKEXTENT ExtentCopy;
530} VMDKRENAMESTATE;
531/** Pointer to a VMDK rename state. */
532typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
533
534
535/*********************************************************************************************************************************
536* Static Variables *
537*********************************************************************************************************************************/
538/** NULL-terminated array of supported file extensions. */
539static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
540{
541 {"vmdk", VDTYPE_HDD},
542 {NULL, VDTYPE_INVALID}
543};
544/** NULL-terminated array of configuration option. */
545static const VDCONFIGINFO s_aVmdkConfigInfo[] =
546{
547 /* Options for VMDK raw disks */
548 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
549 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
550 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
551 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
552 /* End of options list */
553 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
554};
555
556
557/*********************************************************************************************************************************
558* Internal Functions *
559*********************************************************************************************************************************/
560static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
561static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
562 bool fDelete);
563static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
564static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
565static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
566static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
567static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
568 void *pvUser, int rcReq);
569/**
570 * Internal: open a file (using a file descriptor cache to ensure each file
571 * is only opened once - anything else can cause locking problems).
572 */
573static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
574 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
575{
576 int rc = VINF_SUCCESS;
577 PVMDKFILE pVmdkFile;
578 for (pVmdkFile = pImage->pFiles;
579 pVmdkFile != NULL;
580 pVmdkFile = pVmdkFile->pNext)
581 {
582 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
583 {
584 Assert(fOpen == pVmdkFile->fOpen);
585 pVmdkFile->uReferences++;
586 *ppVmdkFile = pVmdkFile;
587 return rc;
588 }
589 }
590 /* If we get here, there's no matching entry in the cache. */
591 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
592 if (!pVmdkFile)
593 {
594 *ppVmdkFile = NULL;
595 return VERR_NO_MEMORY;
596 }
597 pVmdkFile->pszFilename = RTStrDup(pszFilename);
598 if (!pVmdkFile->pszFilename)
599 {
600 RTMemFree(pVmdkFile);
601 *ppVmdkFile = NULL;
602 return VERR_NO_MEMORY;
603 }
604 if (pszBasename)
605 {
606 pVmdkFile->pszBasename = RTStrDup(pszBasename);
607 if (!pVmdkFile->pszBasename)
608 {
609 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
610 RTMemFree(pVmdkFile);
611 *ppVmdkFile = NULL;
612 return VERR_NO_MEMORY;
613 }
614 }
615 pVmdkFile->fOpen = fOpen;
616 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
617 &pVmdkFile->pStorage);
618 if (RT_SUCCESS(rc))
619 {
620 pVmdkFile->uReferences = 1;
621 pVmdkFile->pImage = pImage;
622 pVmdkFile->pNext = pImage->pFiles;
623 if (pImage->pFiles)
624 pImage->pFiles->pPrev = pVmdkFile;
625 pImage->pFiles = pVmdkFile;
626 *ppVmdkFile = pVmdkFile;
627 }
628 else
629 {
630 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
631 RTMemFree(pVmdkFile);
632 *ppVmdkFile = NULL;
633 }
634 return rc;
635}
636/**
637 * Internal: close a file, updating the file descriptor cache.
638 */
639static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
640{
641 int rc = VINF_SUCCESS;
642 PVMDKFILE pVmdkFile = *ppVmdkFile;
643 AssertPtr(pVmdkFile);
644 pVmdkFile->fDelete |= fDelete;
645 Assert(pVmdkFile->uReferences);
646 pVmdkFile->uReferences--;
647 if (pVmdkFile->uReferences == 0)
648 {
649 PVMDKFILE pPrev;
650 PVMDKFILE pNext;
651 /* Unchain the element from the list. */
652 pPrev = pVmdkFile->pPrev;
653 pNext = pVmdkFile->pNext;
654 if (pNext)
655 pNext->pPrev = pPrev;
656 if (pPrev)
657 pPrev->pNext = pNext;
658 else
659 pImage->pFiles = pNext;
660 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
661 bool fFileDel = pVmdkFile->fDelete;
662 if ( pVmdkFile->pszBasename
663 && fFileDel)
664 {
665 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
666 if ( RTPathHasPath(pVmdkFile->pszBasename)
667 || !pszSuffix
668 || ( strcmp(pszSuffix, ".vmdk")
669 && strcmp(pszSuffix, ".bin")
670 && strcmp(pszSuffix, ".img")))
671 fFileDel = false;
672 }
673 if (fFileDel)
674 {
675 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
676 if (RT_SUCCESS(rc))
677 rc = rc2;
678 }
679 else if (pVmdkFile->fDelete)
680 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
681 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
682 if (pVmdkFile->pszBasename)
683 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
684 RTMemFree(pVmdkFile);
685 }
686 *ppVmdkFile = NULL;
687 return rc;
688}
689/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
690#ifndef VMDK_USE_BLOCK_DECOMP_API
691static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
692{
693 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
694 size_t cbInjected = 0;
695 Assert(cbBuf);
696 if (pInflateState->iOffset < 0)
697 {
698 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
699 pvBuf = (uint8_t *)pvBuf + 1;
700 cbBuf--;
701 cbInjected = 1;
702 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
703 }
704 if (!cbBuf)
705 {
706 if (pcbBuf)
707 *pcbBuf = cbInjected;
708 return VINF_SUCCESS;
709 }
710 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
711 memcpy(pvBuf,
712 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
713 cbBuf);
714 pInflateState->iOffset += cbBuf;
715 Assert(pcbBuf);
716 *pcbBuf = cbBuf + cbInjected;
717 return VINF_SUCCESS;
718}
719#endif
720/**
721 * Internal: read from a file and inflate the compressed data,
722 * distinguishing between async and normal operation
723 */
724DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
725 uint64_t uOffset, void *pvBuf,
726 size_t cbToRead, const void *pcvMarker,
727 uint64_t *puLBA, uint32_t *pcbMarkerData)
728{
729 int rc;
730#ifndef VMDK_USE_BLOCK_DECOMP_API
731 PRTZIPDECOMP pZip = NULL;
732#endif
733 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
734 size_t cbCompSize, cbActuallyRead;
735 if (!pcvMarker)
736 {
737 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
738 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
739 if (RT_FAILURE(rc))
740 return rc;
741 }
742 else
743 {
744 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
745 /* pcvMarker endianness has already been partially transformed, fix it */
746 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
747 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
748 }
749 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
750 if (cbCompSize == 0)
751 {
752 AssertMsgFailed(("VMDK: corrupted marker\n"));
753 return VERR_VD_VMDK_INVALID_FORMAT;
754 }
755 /* Sanity check - the expansion ratio should be much less than 2. */
756 Assert(cbCompSize < 2 * cbToRead);
757 if (cbCompSize >= 2 * cbToRead)
758 return VERR_VD_VMDK_INVALID_FORMAT;
759 /* Compressed grain marker. Data follows immediately. */
760 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
761 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
762 (uint8_t *)pExtent->pvCompGrain
763 + RT_UOFFSETOF(VMDKMARKER, uType),
764 RT_ALIGN_Z( cbCompSize
765 + RT_UOFFSETOF(VMDKMARKER, uType),
766 512)
767 - RT_UOFFSETOF(VMDKMARKER, uType));
768 if (puLBA)
769 *puLBA = RT_LE2H_U64(pMarker->uSector);
770 if (pcbMarkerData)
771 *pcbMarkerData = RT_ALIGN( cbCompSize
772 + RT_UOFFSETOF(VMDKMARKER, uType),
773 512);
774#ifdef VMDK_USE_BLOCK_DECOMP_API
775 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
776 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
777 pvBuf, cbToRead, &cbActuallyRead);
778#else
779 VMDKCOMPRESSIO InflateState;
780 InflateState.pImage = pImage;
781 InflateState.iOffset = -1;
782 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
783 InflateState.pvCompGrain = pExtent->pvCompGrain;
784 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
785 if (RT_FAILURE(rc))
786 return rc;
787 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
788 RTZipDecompDestroy(pZip);
789#endif /* !VMDK_USE_BLOCK_DECOMP_API */
790 if (RT_FAILURE(rc))
791 {
792 if (rc == VERR_ZIP_CORRUPTED)
793 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
794 return rc;
795 }
796 if (cbActuallyRead != cbToRead)
797 rc = VERR_VD_VMDK_INVALID_FORMAT;
798 return rc;
799}
800static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
801{
802 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
803 Assert(cbBuf);
804 if (pDeflateState->iOffset < 0)
805 {
806 pvBuf = (const uint8_t *)pvBuf + 1;
807 cbBuf--;
808 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
809 }
810 if (!cbBuf)
811 return VINF_SUCCESS;
812 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
813 return VERR_BUFFER_OVERFLOW;
814 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
815 pvBuf, cbBuf);
816 pDeflateState->iOffset += cbBuf;
817 return VINF_SUCCESS;
818}
819/**
820 * Internal: deflate the uncompressed data and write to a file,
821 * distinguishing between async and normal operation
822 */
823DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
824 uint64_t uOffset, const void *pvBuf,
825 size_t cbToWrite, uint64_t uLBA,
826 uint32_t *pcbMarkerData)
827{
828 int rc;
829 PRTZIPCOMP pZip = NULL;
830 VMDKCOMPRESSIO DeflateState;
831 DeflateState.pImage = pImage;
832 DeflateState.iOffset = -1;
833 DeflateState.cbCompGrain = pExtent->cbCompGrain;
834 DeflateState.pvCompGrain = pExtent->pvCompGrain;
835 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
836 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
837 if (RT_FAILURE(rc))
838 return rc;
839 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
840 if (RT_SUCCESS(rc))
841 rc = RTZipCompFinish(pZip);
842 RTZipCompDestroy(pZip);
843 if (RT_SUCCESS(rc))
844 {
845 Assert( DeflateState.iOffset > 0
846 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
847 /* pad with zeroes to get to a full sector size */
848 uint32_t uSize = DeflateState.iOffset;
849 if (uSize % 512)
850 {
851 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
852 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
853 uSizeAlign - uSize);
854 uSize = uSizeAlign;
855 }
856 if (pcbMarkerData)
857 *pcbMarkerData = uSize;
858 /* Compressed grain marker. Data follows immediately. */
859 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
860 pMarker->uSector = RT_H2LE_U64(uLBA);
861 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
862 - RT_UOFFSETOF(VMDKMARKER, uType));
863 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
864 uOffset, pMarker, uSize);
865 if (RT_FAILURE(rc))
866 return rc;
867 }
868 return rc;
869}
870/**
871 * Internal: check if all files are closed, prevent leaking resources.
872 */
873static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
874{
875 int rc = VINF_SUCCESS, rc2;
876 PVMDKFILE pVmdkFile;
877 Assert(pImage->pFiles == NULL);
878 for (pVmdkFile = pImage->pFiles;
879 pVmdkFile != NULL;
880 pVmdkFile = pVmdkFile->pNext)
881 {
882 LogRel(("VMDK: leaking reference to file \"%s\"\n",
883 pVmdkFile->pszFilename));
884 pImage->pFiles = pVmdkFile->pNext;
885 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
886 if (RT_SUCCESS(rc))
887 rc = rc2;
888 }
889 return rc;
890}
891/**
892 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
893 * critical non-ASCII characters.
894 */
895static char *vmdkEncodeString(const char *psz)
896{
897 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
898 char *pszDst = szEnc;
899 AssertPtr(psz);
900 for (; *psz; psz = RTStrNextCp(psz))
901 {
902 char *pszDstPrev = pszDst;
903 RTUNICP Cp = RTStrGetCp(psz);
904 if (Cp == '\\')
905 {
906 pszDst = RTStrPutCp(pszDst, Cp);
907 pszDst = RTStrPutCp(pszDst, Cp);
908 }
909 else if (Cp == '\n')
910 {
911 pszDst = RTStrPutCp(pszDst, '\\');
912 pszDst = RTStrPutCp(pszDst, 'n');
913 }
914 else if (Cp == '\r')
915 {
916 pszDst = RTStrPutCp(pszDst, '\\');
917 pszDst = RTStrPutCp(pszDst, 'r');
918 }
919 else
920 pszDst = RTStrPutCp(pszDst, Cp);
921 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
922 {
923 pszDst = pszDstPrev;
924 break;
925 }
926 }
927 *pszDst = '\0';
928 return RTStrDup(szEnc);
929}
930/**
931 * Internal: decode a string and store it into the specified string.
932 */
933static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
934{
935 int rc = VINF_SUCCESS;
936 char szBuf[4];
937 if (!cb)
938 return VERR_BUFFER_OVERFLOW;
939 AssertPtr(psz);
940 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
941 {
942 char *pszDst = szBuf;
943 RTUNICP Cp = RTStrGetCp(pszEncoded);
944 if (Cp == '\\')
945 {
946 pszEncoded = RTStrNextCp(pszEncoded);
947 RTUNICP CpQ = RTStrGetCp(pszEncoded);
948 if (CpQ == 'n')
949 RTStrPutCp(pszDst, '\n');
950 else if (CpQ == 'r')
951 RTStrPutCp(pszDst, '\r');
952 else if (CpQ == '\0')
953 {
954 rc = VERR_VD_VMDK_INVALID_HEADER;
955 break;
956 }
957 else
958 RTStrPutCp(pszDst, CpQ);
959 }
960 else
961 pszDst = RTStrPutCp(pszDst, Cp);
962 /* Need to leave space for terminating NUL. */
963 if ((size_t)(pszDst - szBuf) + 1 >= cb)
964 {
965 rc = VERR_BUFFER_OVERFLOW;
966 break;
967 }
968 memcpy(psz, szBuf, pszDst - szBuf);
969 psz += pszDst - szBuf;
970 }
971 *psz = '\0';
972 return rc;
973}
974/**
975 * Internal: free all buffers associated with grain directories.
976 */
977static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
978{
979 if (pExtent->pGD)
980 {
981 RTMemFree(pExtent->pGD);
982 pExtent->pGD = NULL;
983 }
984 if (pExtent->pRGD)
985 {
986 RTMemFree(pExtent->pRGD);
987 pExtent->pRGD = NULL;
988 }
989}
990/**
991 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
992 * images.
993 */
994static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
995{
996 int rc = VINF_SUCCESS;
997 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
998 {
999 /* streamOptimized extents need a compressed grain buffer, which must
1000 * be big enough to hold uncompressible data (which needs ~8 bytes
1001 * more than the uncompressed data), the marker and padding. */
1002 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1003 + 8 + sizeof(VMDKMARKER), 512);
1004 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1005 if (RT_LIKELY(pExtent->pvCompGrain))
1006 {
1007 /* streamOptimized extents need a decompressed grain buffer. */
1008 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1009 if (!pExtent->pvGrain)
1010 rc = VERR_NO_MEMORY;
1011 }
1012 else
1013 rc = VERR_NO_MEMORY;
1014 }
1015 if (RT_FAILURE(rc))
1016 vmdkFreeStreamBuffers(pExtent);
1017 return rc;
1018}
1019/**
1020 * Internal: allocate all buffers associated with grain directories.
1021 */
1022static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1023{
1024 RT_NOREF1(pImage);
1025 int rc = VINF_SUCCESS;
1026 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1027 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1028 if (RT_LIKELY(pExtent->pGD))
1029 {
1030 if (pExtent->uSectorRGD)
1031 {
1032 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1033 if (RT_UNLIKELY(!pExtent->pRGD))
1034 rc = VERR_NO_MEMORY;
1035 }
1036 }
1037 else
1038 rc = VERR_NO_MEMORY;
1039 if (RT_FAILURE(rc))
1040 vmdkFreeGrainDirectory(pExtent);
1041 return rc;
1042}
1043/**
1044 * Converts the grain directory from little to host endianess.
1045 *
1046 * @returns nothing.
1047 * @param pGD The grain directory.
1048 * @param cGDEntries Number of entries in the grain directory to convert.
1049 */
1050DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1051{
1052 uint32_t *pGDTmp = pGD;
1053 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1054 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1055}
1056/**
1057 * Read the grain directory and allocated grain tables verifying them against
1058 * their back up copies if available.
1059 *
1060 * @returns VBox status code.
1061 * @param pImage Image instance data.
1062 * @param pExtent The VMDK extent.
1063 */
1064static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1065{
1066 int rc = VINF_SUCCESS;
1067 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1068 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1069 && pExtent->uSectorGD != VMDK_GD_AT_END
1070 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1071 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1072 if (RT_SUCCESS(rc))
1073 {
1074 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1075 * but in reality they are not compressed. */
1076 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1077 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1078 pExtent->pGD, cbGD);
1079 if (RT_SUCCESS(rc))
1080 {
1081 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1082 if ( pExtent->uSectorRGD
1083 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1084 {
1085 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1086 * but in reality they are not compressed. */
1087 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1088 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1089 pExtent->pRGD, cbGD);
1090 if (RT_SUCCESS(rc))
1091 {
1092 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1093 /* Check grain table and redundant grain table for consistency. */
1094 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1095 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1096 size_t cbGTBuffersMax = _1M;
1097 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1098 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1099 if ( !pTmpGT1
1100 || !pTmpGT2)
1101 rc = VERR_NO_MEMORY;
1102 size_t i = 0;
1103 uint32_t *pGDTmp = pExtent->pGD;
1104 uint32_t *pRGDTmp = pExtent->pRGD;
1105 /* Loop through all entries. */
1106 while (i < pExtent->cGDEntries)
1107 {
1108 uint32_t uGTStart = *pGDTmp;
1109 uint32_t uRGTStart = *pRGDTmp;
1110 size_t cbGTRead = cbGT;
1111 /* If no grain table is allocated skip the entry. */
1112 if (*pGDTmp == 0 && *pRGDTmp == 0)
1113 {
1114 i++;
1115 continue;
1116 }
1117 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1118 {
1119 /* Just one grain directory entry refers to a not yet allocated
1120 * grain table or both grain directory copies refer to the same
1121 * grain table. Not allowed. */
1122 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1123 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1124 break;
1125 }
1126 i++;
1127 pGDTmp++;
1128 pRGDTmp++;
1129 /*
1130 * Read a few tables at once if adjacent to decrease the number
1131 * of I/O requests. Read at maximum 1MB at once.
1132 */
1133 while ( i < pExtent->cGDEntries
1134 && cbGTRead < cbGTBuffersMax)
1135 {
1136 /* If no grain table is allocated skip the entry. */
1137 if (*pGDTmp == 0 && *pRGDTmp == 0)
1138 {
1139 i++;
1140 continue;
1141 }
1142 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1143 {
1144 /* Just one grain directory entry refers to a not yet allocated
1145 * grain table or both grain directory copies refer to the same
1146 * grain table. Not allowed. */
1147 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1148 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1149 break;
1150 }
1151 /* Check that the start offsets are adjacent.*/
1152 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1153 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1154 break;
1155 i++;
1156 pGDTmp++;
1157 pRGDTmp++;
1158 cbGTRead += cbGT;
1159 }
1160 /* Increase buffers if required. */
1161 if ( RT_SUCCESS(rc)
1162 && cbGTBuffers < cbGTRead)
1163 {
1164 uint32_t *pTmp;
1165 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1166 if (pTmp)
1167 {
1168 pTmpGT1 = pTmp;
1169 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1170 if (pTmp)
1171 pTmpGT2 = pTmp;
1172 else
1173 rc = VERR_NO_MEMORY;
1174 }
1175 else
1176 rc = VERR_NO_MEMORY;
1177 if (rc == VERR_NO_MEMORY)
1178 {
1179 /* Reset to the old values. */
1180 rc = VINF_SUCCESS;
1181 i -= cbGTRead / cbGT;
1182 cbGTRead = cbGT;
1183 /* Don't try to increase the buffer again in the next run. */
1184 cbGTBuffersMax = cbGTBuffers;
1185 }
1186 }
1187 if (RT_SUCCESS(rc))
1188 {
1189 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1190 * but in reality they are not compressed. */
1191 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1192 VMDK_SECTOR2BYTE(uGTStart),
1193 pTmpGT1, cbGTRead);
1194 if (RT_FAILURE(rc))
1195 {
1196 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1197 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1198 break;
1199 }
1200 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1201 * but in reality they are not compressed. */
1202 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1203 VMDK_SECTOR2BYTE(uRGTStart),
1204 pTmpGT2, cbGTRead);
1205 if (RT_FAILURE(rc))
1206 {
1207 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1208 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1209 break;
1210 }
1211 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1212 {
1213 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1214 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1215 break;
1216 }
1217 }
1218 } /* while (i < pExtent->cGDEntries) */
1219 /** @todo figure out what to do for unclean VMDKs. */
1220 if (pTmpGT1)
1221 RTMemFree(pTmpGT1);
1222 if (pTmpGT2)
1223 RTMemFree(pTmpGT2);
1224 }
1225 else
1226 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1227 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1228 }
1229 }
1230 else
1231 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1232 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1233 }
1234 if (RT_FAILURE(rc))
1235 vmdkFreeGrainDirectory(pExtent);
1236 return rc;
1237}
1238/**
1239 * Creates a new grain directory for the given extent at the given start sector.
1240 *
1241 * @returns VBox status code.
1242 * @param pImage Image instance data.
1243 * @param pExtent The VMDK extent.
1244 * @param uStartSector Where the grain directory should be stored in the image.
1245 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1246 */
1247static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1248 uint64_t uStartSector, bool fPreAlloc)
1249{
1250 int rc = VINF_SUCCESS;
1251 unsigned i;
1252 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1253 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1254 size_t cbGTRounded;
1255 uint64_t cbOverhead;
1256 if (fPreAlloc)
1257 {
1258 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1259 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1260 }
1261 else
1262 {
1263 /* Use a dummy start sector for layout computation. */
1264 if (uStartSector == VMDK_GD_AT_END)
1265 uStartSector = 1;
1266 cbGTRounded = 0;
1267 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1268 }
1269 /* For streamOptimized extents there is only one grain directory,
1270 * and for all others take redundant grain directory into account. */
1271 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1272 {
1273 cbOverhead = RT_ALIGN_64(cbOverhead,
1274 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1275 }
1276 else
1277 {
1278 cbOverhead += cbGDRounded + cbGTRounded;
1279 cbOverhead = RT_ALIGN_64(cbOverhead,
1280 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1281 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1282 }
1283 if (RT_SUCCESS(rc))
1284 {
1285 pExtent->uAppendPosition = cbOverhead;
1286 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1287 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1288 {
1289 pExtent->uSectorRGD = 0;
1290 pExtent->uSectorGD = uStartSector;
1291 }
1292 else
1293 {
1294 pExtent->uSectorRGD = uStartSector;
1295 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1296 }
1297 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1298 if (RT_SUCCESS(rc))
1299 {
1300 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1301 if ( RT_SUCCESS(rc)
1302 && fPreAlloc)
1303 {
1304 uint32_t uGTSectorLE;
1305 uint64_t uOffsetSectors;
1306 if (pExtent->pRGD)
1307 {
1308 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1309 for (i = 0; i < pExtent->cGDEntries; i++)
1310 {
1311 pExtent->pRGD[i] = uOffsetSectors;
1312 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1313 /* Write the redundant grain directory entry to disk. */
1314 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1315 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1316 &uGTSectorLE, sizeof(uGTSectorLE));
1317 if (RT_FAILURE(rc))
1318 {
1319 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1320 break;
1321 }
1322 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1323 }
1324 }
1325 if (RT_SUCCESS(rc))
1326 {
1327 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1328 for (i = 0; i < pExtent->cGDEntries; i++)
1329 {
1330 pExtent->pGD[i] = uOffsetSectors;
1331 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1332 /* Write the grain directory entry to disk. */
1333 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1334 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1335 &uGTSectorLE, sizeof(uGTSectorLE));
1336 if (RT_FAILURE(rc))
1337 {
1338 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1339 break;
1340 }
1341 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1342 }
1343 }
1344 }
1345 }
1346 }
1347 if (RT_FAILURE(rc))
1348 vmdkFreeGrainDirectory(pExtent);
1349 return rc;
1350}
1351/**
1352 * Unquotes the given string returning the result in a separate buffer.
1353 *
1354 * @returns VBox status code.
1355 * @param pImage The VMDK image state.
1356 * @param pszStr The string to unquote.
1357 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1358 * free.
1359 * @param ppszNext Where to store the pointer to any character following
1360 * the quoted value, optional.
1361 */
1362static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1363 char **ppszUnquoted, char **ppszNext)
1364{
1365 const char *pszStart = pszStr;
1366 char *pszQ;
1367 char *pszUnquoted;
1368 /* Skip over whitespace. */
1369 while (*pszStr == ' ' || *pszStr == '\t')
1370 pszStr++;
1371 if (*pszStr != '"')
1372 {
1373 pszQ = (char *)pszStr;
1374 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1375 pszQ++;
1376 }
1377 else
1378 {
1379 pszStr++;
1380 pszQ = (char *)strchr(pszStr, '"');
1381 if (pszQ == NULL)
1382 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1383 pImage->pszFilename, pszStart);
1384 }
1385 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1386 if (!pszUnquoted)
1387 return VERR_NO_MEMORY;
1388 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1389 pszUnquoted[pszQ - pszStr] = '\0';
1390 *ppszUnquoted = pszUnquoted;
1391 if (ppszNext)
1392 *ppszNext = pszQ + 1;
1393 return VINF_SUCCESS;
1394}
1395static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1396 const char *pszLine)
1397{
1398 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1399 ssize_t cbDiff = strlen(pszLine) + 1;
1400 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1401 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1402 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1403 memcpy(pEnd, pszLine, cbDiff);
1404 pDescriptor->cLines++;
1405 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1406 pDescriptor->fDirty = true;
1407 return VINF_SUCCESS;
1408}
1409static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1410 const char *pszKey, const char **ppszValue)
1411{
1412 size_t cbKey = strlen(pszKey);
1413 const char *pszValue;
1414 while (uStart != 0)
1415 {
1416 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1417 {
1418 /* Key matches, check for a '=' (preceded by whitespace). */
1419 pszValue = pDescriptor->aLines[uStart] + cbKey;
1420 while (*pszValue == ' ' || *pszValue == '\t')
1421 pszValue++;
1422 if (*pszValue == '=')
1423 {
1424 *ppszValue = pszValue + 1;
1425 break;
1426 }
1427 }
1428 uStart = pDescriptor->aNextLines[uStart];
1429 }
1430 return !!uStart;
1431}
1432static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1433 unsigned uStart,
1434 const char *pszKey, const char *pszValue)
1435{
1436 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1437 size_t cbKey = strlen(pszKey);
1438 unsigned uLast = 0;
1439 while (uStart != 0)
1440 {
1441 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1442 {
1443 /* Key matches, check for a '=' (preceded by whitespace). */
1444 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1445 while (*pszTmp == ' ' || *pszTmp == '\t')
1446 pszTmp++;
1447 if (*pszTmp == '=')
1448 {
1449 pszTmp++;
1450 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1451 * bloat and potentially out of space error? */
1452 while (*pszTmp == ' ' || *pszTmp == '\t')
1453 pszTmp++;
1454 break;
1455 }
1456 }
1457 if (!pDescriptor->aNextLines[uStart])
1458 uLast = uStart;
1459 uStart = pDescriptor->aNextLines[uStart];
1460 }
1461 if (uStart)
1462 {
1463 if (pszValue)
1464 {
1465 /* Key already exists, replace existing value. */
1466 size_t cbOldVal = strlen(pszTmp);
1467 size_t cbNewVal = strlen(pszValue);
1468 ssize_t cbDiff = cbNewVal - cbOldVal;
1469 /* Check for buffer overflow. */
1470 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1471 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1472 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1473 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1474 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1475 memcpy(pszTmp, pszValue, cbNewVal + 1);
1476 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1477 pDescriptor->aLines[i] += cbDiff;
1478 }
1479 else
1480 {
1481 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1482 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1483 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1484 {
1485 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1486 if (pDescriptor->aNextLines[i])
1487 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1488 else
1489 pDescriptor->aNextLines[i-1] = 0;
1490 }
1491 pDescriptor->cLines--;
1492 /* Adjust starting line numbers of following descriptor sections. */
1493 if (uStart < pDescriptor->uFirstExtent)
1494 pDescriptor->uFirstExtent--;
1495 if (uStart < pDescriptor->uFirstDDB)
1496 pDescriptor->uFirstDDB--;
1497 }
1498 }
1499 else
1500 {
1501 /* Key doesn't exist, append after the last entry in this category. */
1502 if (!pszValue)
1503 {
1504 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1505 return VINF_SUCCESS;
1506 }
1507 cbKey = strlen(pszKey);
1508 size_t cbValue = strlen(pszValue);
1509 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1510 /* Check for buffer overflow. */
1511 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1512 || ( pDescriptor->aLines[pDescriptor->cLines]
1513 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1514 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1515 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1516 {
1517 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1518 if (pDescriptor->aNextLines[i - 1])
1519 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1520 else
1521 pDescriptor->aNextLines[i] = 0;
1522 }
1523 uStart = uLast + 1;
1524 pDescriptor->aNextLines[uLast] = uStart;
1525 pDescriptor->aNextLines[uStart] = 0;
1526 pDescriptor->cLines++;
1527 pszTmp = pDescriptor->aLines[uStart];
1528 memmove(pszTmp + cbDiff, pszTmp,
1529 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1530 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1531 pDescriptor->aLines[uStart][cbKey] = '=';
1532 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1533 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1534 pDescriptor->aLines[i] += cbDiff;
1535 /* Adjust starting line numbers of following descriptor sections. */
1536 if (uStart <= pDescriptor->uFirstExtent)
1537 pDescriptor->uFirstExtent++;
1538 if (uStart <= pDescriptor->uFirstDDB)
1539 pDescriptor->uFirstDDB++;
1540 }
1541 pDescriptor->fDirty = true;
1542 return VINF_SUCCESS;
1543}
1544static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1545 uint32_t *puValue)
1546{
1547 const char *pszValue;
1548 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1549 &pszValue))
1550 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1551 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1552}
1553/**
1554 * Returns the value of the given key as a string allocating the necessary memory.
1555 *
1556 * @returns VBox status code.
1557 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1558 * @param pImage The VMDK image state.
1559 * @param pDescriptor The descriptor to fetch the value from.
1560 * @param pszKey The key to get the value from.
1561 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1562 * free.
1563 */
1564static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1565 const char *pszKey, char **ppszValue)
1566{
1567 const char *pszValue;
1568 char *pszValueUnquoted;
1569 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1570 &pszValue))
1571 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1572 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1573 if (RT_FAILURE(rc))
1574 return rc;
1575 *ppszValue = pszValueUnquoted;
1576 return rc;
1577}
1578static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1579 const char *pszKey, const char *pszValue)
1580{
1581 char *pszValueQuoted;
1582 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1583 if (!pszValueQuoted)
1584 return VERR_NO_STR_MEMORY;
1585 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1586 pszValueQuoted);
1587 RTStrFree(pszValueQuoted);
1588 return rc;
1589}
1590static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1591 PVMDKDESCRIPTOR pDescriptor)
1592{
1593 RT_NOREF1(pImage);
1594 unsigned uEntry = pDescriptor->uFirstExtent;
1595 ssize_t cbDiff;
1596 if (!uEntry)
1597 return;
1598 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1599 /* Move everything including \0 in the entry marking the end of buffer. */
1600 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1601 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1602 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1603 {
1604 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1605 if (pDescriptor->aNextLines[i])
1606 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1607 else
1608 pDescriptor->aNextLines[i - 1] = 0;
1609 }
1610 pDescriptor->cLines--;
1611 if (pDescriptor->uFirstDDB)
1612 pDescriptor->uFirstDDB--;
1613 return;
1614}
1615static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1616 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1617 VMDKETYPE enmType, const char *pszBasename,
1618 uint64_t uSectorOffset)
1619{
1620 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1621 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1622 char *pszTmp;
1623 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1624 char szExt[1024];
1625 ssize_t cbDiff;
1626 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1627 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1628 /* Find last entry in extent description. */
1629 while (uStart)
1630 {
1631 if (!pDescriptor->aNextLines[uStart])
1632 uLast = uStart;
1633 uStart = pDescriptor->aNextLines[uStart];
1634 }
1635 if (enmType == VMDKETYPE_ZERO)
1636 {
1637 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1638 cNominalSectors, apszType[enmType]);
1639 }
1640 else if (enmType == VMDKETYPE_FLAT)
1641 {
1642 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1643 apszAccess[enmAccess], cNominalSectors,
1644 apszType[enmType], pszBasename, uSectorOffset);
1645 }
1646 else
1647 {
1648 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1649 apszAccess[enmAccess], cNominalSectors,
1650 apszType[enmType], pszBasename);
1651 }
1652 cbDiff = strlen(szExt) + 1;
1653 /* Check for buffer overflow. */
1654 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1655 || ( pDescriptor->aLines[pDescriptor->cLines]
1656 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1657 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1658 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1659 {
1660 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1661 if (pDescriptor->aNextLines[i - 1])
1662 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1663 else
1664 pDescriptor->aNextLines[i] = 0;
1665 }
1666 uStart = uLast + 1;
1667 pDescriptor->aNextLines[uLast] = uStart;
1668 pDescriptor->aNextLines[uStart] = 0;
1669 pDescriptor->cLines++;
1670 pszTmp = pDescriptor->aLines[uStart];
1671 memmove(pszTmp + cbDiff, pszTmp,
1672 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1673 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1674 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1675 pDescriptor->aLines[i] += cbDiff;
1676 /* Adjust starting line numbers of following descriptor sections. */
1677 if (uStart <= pDescriptor->uFirstDDB)
1678 pDescriptor->uFirstDDB++;
1679 pDescriptor->fDirty = true;
1680 return VINF_SUCCESS;
1681}
1682/**
1683 * Returns the value of the given key from the DDB as a string allocating
1684 * the necessary memory.
1685 *
1686 * @returns VBox status code.
1687 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1688 * @param pImage The VMDK image state.
1689 * @param pDescriptor The descriptor to fetch the value from.
1690 * @param pszKey The key to get the value from.
1691 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1692 * free.
1693 */
1694static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1695 const char *pszKey, char **ppszValue)
1696{
1697 const char *pszValue;
1698 char *pszValueUnquoted;
1699 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1700 &pszValue))
1701 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1702 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1703 if (RT_FAILURE(rc))
1704 return rc;
1705 *ppszValue = pszValueUnquoted;
1706 return rc;
1707}
1708static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1709 const char *pszKey, uint32_t *puValue)
1710{
1711 const char *pszValue;
1712 char *pszValueUnquoted;
1713 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1714 &pszValue))
1715 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1716 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1717 if (RT_FAILURE(rc))
1718 return rc;
1719 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1720 RTMemTmpFree(pszValueUnquoted);
1721 return rc;
1722}
1723static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1724 const char *pszKey, PRTUUID pUuid)
1725{
1726 const char *pszValue;
1727 char *pszValueUnquoted;
1728 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1729 &pszValue))
1730 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1731 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1732 if (RT_FAILURE(rc))
1733 return rc;
1734 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1735 RTMemTmpFree(pszValueUnquoted);
1736 return rc;
1737}
1738static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1739 const char *pszKey, const char *pszVal)
1740{
1741 int rc;
1742 char *pszValQuoted;
1743 if (pszVal)
1744 {
1745 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1746 if (!pszValQuoted)
1747 return VERR_NO_STR_MEMORY;
1748 }
1749 else
1750 pszValQuoted = NULL;
1751 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1752 pszValQuoted);
1753 if (pszValQuoted)
1754 RTStrFree(pszValQuoted);
1755 return rc;
1756}
1757static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1758 const char *pszKey, PCRTUUID pUuid)
1759{
1760 char *pszUuid;
1761 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1762 if (!pszUuid)
1763 return VERR_NO_STR_MEMORY;
1764 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1765 pszUuid);
1766 RTStrFree(pszUuid);
1767 return rc;
1768}
1769static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1770 const char *pszKey, uint32_t uValue)
1771{
1772 char *pszValue;
1773 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1774 if (!pszValue)
1775 return VERR_NO_STR_MEMORY;
1776 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1777 pszValue);
1778 RTStrFree(pszValue);
1779 return rc;
1780}
1781/**
1782 * Splits the descriptor data into individual lines checking for correct line
1783 * endings and descriptor size.
1784 *
1785 * @returns VBox status code.
1786 * @param pImage The image instance.
1787 * @param pDesc The descriptor.
1788 * @param pszTmp The raw descriptor data from the image.
1789 */
1790static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
1791{
1792 unsigned cLine = 0;
1793 int rc = VINF_SUCCESS;
1794 while ( RT_SUCCESS(rc)
1795 && *pszTmp != '\0')
1796 {
1797 pDesc->aLines[cLine++] = pszTmp;
1798 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1799 {
1800 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1801 rc = VERR_VD_VMDK_INVALID_HEADER;
1802 break;
1803 }
1804 while (*pszTmp != '\0' && *pszTmp != '\n')
1805 {
1806 if (*pszTmp == '\r')
1807 {
1808 if (*(pszTmp + 1) != '\n')
1809 {
1810 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1811 break;
1812 }
1813 else
1814 {
1815 /* Get rid of CR character. */
1816 *pszTmp = '\0';
1817 }
1818 }
1819 pszTmp++;
1820 }
1821 if (RT_FAILURE(rc))
1822 break;
1823 /* Get rid of LF character. */
1824 if (*pszTmp == '\n')
1825 {
1826 *pszTmp = '\0';
1827 pszTmp++;
1828 }
1829 }
1830 if (RT_SUCCESS(rc))
1831 {
1832 pDesc->cLines = cLine;
1833 /* Pointer right after the end of the used part of the buffer. */
1834 pDesc->aLines[cLine] = pszTmp;
1835 }
1836 return rc;
1837}
1838static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1839 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1840{
1841 pDescriptor->cbDescAlloc = cbDescData;
1842 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
1843 if (RT_SUCCESS(rc))
1844 {
1845 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1846 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
1847 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
1848 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
1849 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1850 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1851 else
1852 {
1853 unsigned uLastNonEmptyLine = 0;
1854 /* Initialize those, because we need to be able to reopen an image. */
1855 pDescriptor->uFirstDesc = 0;
1856 pDescriptor->uFirstExtent = 0;
1857 pDescriptor->uFirstDDB = 0;
1858 for (unsigned i = 0; i < pDescriptor->cLines; i++)
1859 {
1860 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1861 {
1862 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1863 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1864 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1865 {
1866 /* An extent descriptor. */
1867 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1868 {
1869 /* Incorrect ordering of entries. */
1870 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1871 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1872 break;
1873 }
1874 if (!pDescriptor->uFirstExtent)
1875 {
1876 pDescriptor->uFirstExtent = i;
1877 uLastNonEmptyLine = 0;
1878 }
1879 }
1880 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1881 {
1882 /* A disk database entry. */
1883 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1884 {
1885 /* Incorrect ordering of entries. */
1886 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1887 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1888 break;
1889 }
1890 if (!pDescriptor->uFirstDDB)
1891 {
1892 pDescriptor->uFirstDDB = i;
1893 uLastNonEmptyLine = 0;
1894 }
1895 }
1896 else
1897 {
1898 /* A normal entry. */
1899 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1900 {
1901 /* Incorrect ordering of entries. */
1902 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1903 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1904 break;
1905 }
1906 if (!pDescriptor->uFirstDesc)
1907 {
1908 pDescriptor->uFirstDesc = i;
1909 uLastNonEmptyLine = 0;
1910 }
1911 }
1912 if (uLastNonEmptyLine)
1913 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1914 uLastNonEmptyLine = i;
1915 }
1916 }
1917 }
1918 }
1919 return rc;
1920}
1921static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
1922 PCVDGEOMETRY pPCHSGeometry)
1923{
1924 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1925 VMDK_DDB_GEO_PCHS_CYLINDERS,
1926 pPCHSGeometry->cCylinders);
1927 if (RT_FAILURE(rc))
1928 return rc;
1929 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1930 VMDK_DDB_GEO_PCHS_HEADS,
1931 pPCHSGeometry->cHeads);
1932 if (RT_FAILURE(rc))
1933 return rc;
1934 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1935 VMDK_DDB_GEO_PCHS_SECTORS,
1936 pPCHSGeometry->cSectors);
1937 return rc;
1938}
1939static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
1940 PCVDGEOMETRY pLCHSGeometry)
1941{
1942 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1943 VMDK_DDB_GEO_LCHS_CYLINDERS,
1944 pLCHSGeometry->cCylinders);
1945 if (RT_FAILURE(rc))
1946 return rc;
1947 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1948 VMDK_DDB_GEO_LCHS_HEADS,
1949 pLCHSGeometry->cHeads);
1950 if (RT_FAILURE(rc))
1951 return rc;
1952 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1953 VMDK_DDB_GEO_LCHS_SECTORS,
1954 pLCHSGeometry->cSectors);
1955 return rc;
1956}
1957static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
1958 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1959{
1960 pDescriptor->uFirstDesc = 0;
1961 pDescriptor->uFirstExtent = 0;
1962 pDescriptor->uFirstDDB = 0;
1963 pDescriptor->cLines = 0;
1964 pDescriptor->cbDescAlloc = cbDescData;
1965 pDescriptor->fDirty = false;
1966 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
1967 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
1968 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
1969 if (RT_SUCCESS(rc))
1970 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
1971 if (RT_SUCCESS(rc))
1972 {
1973 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
1974 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1975 }
1976 if (RT_SUCCESS(rc))
1977 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
1978 if (RT_SUCCESS(rc))
1979 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
1980 if (RT_SUCCESS(rc))
1981 {
1982 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
1983 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1984 }
1985 if (RT_SUCCESS(rc))
1986 {
1987 /* The trailing space is created by VMware, too. */
1988 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
1989 }
1990 if (RT_SUCCESS(rc))
1991 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
1992 if (RT_SUCCESS(rc))
1993 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1994 if (RT_SUCCESS(rc))
1995 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
1996 if (RT_SUCCESS(rc))
1997 {
1998 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
1999 /* Now that the framework is in place, use the normal functions to insert
2000 * the remaining keys. */
2001 char szBuf[9];
2002 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2003 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2004 "CID", szBuf);
2005 }
2006 if (RT_SUCCESS(rc))
2007 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2008 "parentCID", "ffffffff");
2009 if (RT_SUCCESS(rc))
2010 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2011 return rc;
2012}
2013static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2014{
2015 int rc;
2016 unsigned cExtents;
2017 unsigned uLine;
2018 unsigned i;
2019 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2020 &pImage->Descriptor);
2021 if (RT_FAILURE(rc))
2022 return rc;
2023 /* Check version, must be 1. */
2024 uint32_t uVersion;
2025 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2026 if (RT_FAILURE(rc))
2027 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2028 if (uVersion != 1)
2029 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2030 /* Get image creation type and determine image flags. */
2031 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2032 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2033 &pszCreateType);
2034 if (RT_FAILURE(rc))
2035 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2036 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2037 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2038 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2039 else if ( !strcmp(pszCreateType, "partitionedDevice")
2040 || !strcmp(pszCreateType, "fullDevice"))
2041 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2042 else if (!strcmp(pszCreateType, "streamOptimized"))
2043 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2044 else if (!strcmp(pszCreateType, "vmfs"))
2045 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2046 RTMemTmpFree(pszCreateType);
2047 /* Count the number of extent config entries. */
2048 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2049 uLine != 0;
2050 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2051 /* nothing */;
2052 if (!pImage->pDescData && cExtents != 1)
2053 {
2054 /* Monolithic image, must have only one extent (already opened). */
2055 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2056 }
2057 if (pImage->pDescData)
2058 {
2059 /* Non-monolithic image, extents need to be allocated. */
2060 rc = vmdkCreateExtents(pImage, cExtents);
2061 if (RT_FAILURE(rc))
2062 return rc;
2063 }
2064 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2065 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2066 {
2067 char *pszLine = pImage->Descriptor.aLines[uLine];
2068 /* Access type of the extent. */
2069 if (!strncmp(pszLine, "RW", 2))
2070 {
2071 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2072 pszLine += 2;
2073 }
2074 else if (!strncmp(pszLine, "RDONLY", 6))
2075 {
2076 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2077 pszLine += 6;
2078 }
2079 else if (!strncmp(pszLine, "NOACCESS", 8))
2080 {
2081 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2082 pszLine += 8;
2083 }
2084 else
2085 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2086 if (*pszLine++ != ' ')
2087 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2088 /* Nominal size of the extent. */
2089 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2090 &pImage->pExtents[i].cNominalSectors);
2091 if (RT_FAILURE(rc))
2092 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2093 if (*pszLine++ != ' ')
2094 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2095 /* Type of the extent. */
2096 if (!strncmp(pszLine, "SPARSE", 6))
2097 {
2098 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2099 pszLine += 6;
2100 }
2101 else if (!strncmp(pszLine, "FLAT", 4))
2102 {
2103 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2104 pszLine += 4;
2105 }
2106 else if (!strncmp(pszLine, "ZERO", 4))
2107 {
2108 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2109 pszLine += 4;
2110 }
2111 else if (!strncmp(pszLine, "VMFS", 4))
2112 {
2113 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2114 pszLine += 4;
2115 }
2116 else
2117 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2118 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2119 {
2120 /* This one has no basename or offset. */
2121 if (*pszLine == ' ')
2122 pszLine++;
2123 if (*pszLine != '\0')
2124 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2125 pImage->pExtents[i].pszBasename = NULL;
2126 }
2127 else
2128 {
2129 /* All other extent types have basename and optional offset. */
2130 if (*pszLine++ != ' ')
2131 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2132 /* Basename of the image. Surrounded by quotes. */
2133 char *pszBasename;
2134 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2135 if (RT_FAILURE(rc))
2136 return rc;
2137 pImage->pExtents[i].pszBasename = pszBasename;
2138 if (*pszLine == ' ')
2139 {
2140 pszLine++;
2141 if (*pszLine != '\0')
2142 {
2143 /* Optional offset in extent specified. */
2144 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2145 &pImage->pExtents[i].uSectorOffset);
2146 if (RT_FAILURE(rc))
2147 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2148 }
2149 }
2150 if (*pszLine != '\0')
2151 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2152 }
2153 }
2154 /* Determine PCHS geometry (autogenerate if necessary). */
2155 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2156 VMDK_DDB_GEO_PCHS_CYLINDERS,
2157 &pImage->PCHSGeometry.cCylinders);
2158 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2159 pImage->PCHSGeometry.cCylinders = 0;
2160 else if (RT_FAILURE(rc))
2161 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2162 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2163 VMDK_DDB_GEO_PCHS_HEADS,
2164 &pImage->PCHSGeometry.cHeads);
2165 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2166 pImage->PCHSGeometry.cHeads = 0;
2167 else if (RT_FAILURE(rc))
2168 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2169 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2170 VMDK_DDB_GEO_PCHS_SECTORS,
2171 &pImage->PCHSGeometry.cSectors);
2172 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2173 pImage->PCHSGeometry.cSectors = 0;
2174 else if (RT_FAILURE(rc))
2175 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2176 if ( pImage->PCHSGeometry.cCylinders == 0
2177 || pImage->PCHSGeometry.cHeads == 0
2178 || pImage->PCHSGeometry.cHeads > 16
2179 || pImage->PCHSGeometry.cSectors == 0
2180 || pImage->PCHSGeometry.cSectors > 63)
2181 {
2182 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2183 * as the total image size isn't known yet). */
2184 pImage->PCHSGeometry.cCylinders = 0;
2185 pImage->PCHSGeometry.cHeads = 16;
2186 pImage->PCHSGeometry.cSectors = 63;
2187 }
2188 /* Determine LCHS geometry (set to 0 if not specified). */
2189 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2190 VMDK_DDB_GEO_LCHS_CYLINDERS,
2191 &pImage->LCHSGeometry.cCylinders);
2192 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2193 pImage->LCHSGeometry.cCylinders = 0;
2194 else if (RT_FAILURE(rc))
2195 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2196 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2197 VMDK_DDB_GEO_LCHS_HEADS,
2198 &pImage->LCHSGeometry.cHeads);
2199 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2200 pImage->LCHSGeometry.cHeads = 0;
2201 else if (RT_FAILURE(rc))
2202 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2203 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2204 VMDK_DDB_GEO_LCHS_SECTORS,
2205 &pImage->LCHSGeometry.cSectors);
2206 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2207 pImage->LCHSGeometry.cSectors = 0;
2208 else if (RT_FAILURE(rc))
2209 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2210 if ( pImage->LCHSGeometry.cCylinders == 0
2211 || pImage->LCHSGeometry.cHeads == 0
2212 || pImage->LCHSGeometry.cSectors == 0)
2213 {
2214 pImage->LCHSGeometry.cCylinders = 0;
2215 pImage->LCHSGeometry.cHeads = 0;
2216 pImage->LCHSGeometry.cSectors = 0;
2217 }
2218 /* Get image UUID. */
2219 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2220 &pImage->ImageUuid);
2221 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2222 {
2223 /* Image without UUID. Probably created by VMware and not yet used
2224 * by VirtualBox. Can only be added for images opened in read/write
2225 * mode, so don't bother producing a sensible UUID otherwise. */
2226 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2227 RTUuidClear(&pImage->ImageUuid);
2228 else
2229 {
2230 rc = RTUuidCreate(&pImage->ImageUuid);
2231 if (RT_FAILURE(rc))
2232 return rc;
2233 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2234 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2235 if (RT_FAILURE(rc))
2236 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2237 }
2238 }
2239 else if (RT_FAILURE(rc))
2240 return rc;
2241 /* Get image modification UUID. */
2242 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2243 VMDK_DDB_MODIFICATION_UUID,
2244 &pImage->ModificationUuid);
2245 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2246 {
2247 /* Image without UUID. Probably created by VMware and not yet used
2248 * by VirtualBox. Can only be added for images opened in read/write
2249 * mode, so don't bother producing a sensible UUID otherwise. */
2250 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2251 RTUuidClear(&pImage->ModificationUuid);
2252 else
2253 {
2254 rc = RTUuidCreate(&pImage->ModificationUuid);
2255 if (RT_FAILURE(rc))
2256 return rc;
2257 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2258 VMDK_DDB_MODIFICATION_UUID,
2259 &pImage->ModificationUuid);
2260 if (RT_FAILURE(rc))
2261 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2262 }
2263 }
2264 else if (RT_FAILURE(rc))
2265 return rc;
2266 /* Get UUID of parent image. */
2267 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2268 &pImage->ParentUuid);
2269 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2270 {
2271 /* Image without UUID. Probably created by VMware and not yet used
2272 * by VirtualBox. Can only be added for images opened in read/write
2273 * mode, so don't bother producing a sensible UUID otherwise. */
2274 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2275 RTUuidClear(&pImage->ParentUuid);
2276 else
2277 {
2278 rc = RTUuidClear(&pImage->ParentUuid);
2279 if (RT_FAILURE(rc))
2280 return rc;
2281 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2282 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2283 if (RT_FAILURE(rc))
2284 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2285 }
2286 }
2287 else if (RT_FAILURE(rc))
2288 return rc;
2289 /* Get parent image modification UUID. */
2290 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2291 VMDK_DDB_PARENT_MODIFICATION_UUID,
2292 &pImage->ParentModificationUuid);
2293 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2294 {
2295 /* Image without UUID. Probably created by VMware and not yet used
2296 * by VirtualBox. Can only be added for images opened in read/write
2297 * mode, so don't bother producing a sensible UUID otherwise. */
2298 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2299 RTUuidClear(&pImage->ParentModificationUuid);
2300 else
2301 {
2302 RTUuidClear(&pImage->ParentModificationUuid);
2303 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2304 VMDK_DDB_PARENT_MODIFICATION_UUID,
2305 &pImage->ParentModificationUuid);
2306 if (RT_FAILURE(rc))
2307 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2308 }
2309 }
2310 else if (RT_FAILURE(rc))
2311 return rc;
2312 return VINF_SUCCESS;
2313}
2314/**
2315 * Internal : Prepares the descriptor to write to the image.
2316 */
2317static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2318 void **ppvData, size_t *pcbData)
2319{
2320 int rc = VINF_SUCCESS;
2321 /*
2322 * Allocate temporary descriptor buffer.
2323 * In case there is no limit allocate a default
2324 * and increase if required.
2325 */
2326 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2327 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2328 size_t offDescriptor = 0;
2329 if (!pszDescriptor)
2330 return VERR_NO_MEMORY;
2331 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2332 {
2333 const char *psz = pImage->Descriptor.aLines[i];
2334 size_t cb = strlen(psz);
2335 /*
2336 * Increase the descriptor if there is no limit and
2337 * there is not enough room left for this line.
2338 */
2339 if (offDescriptor + cb + 1 > cbDescriptor)
2340 {
2341 if (cbLimit)
2342 {
2343 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2344 break;
2345 }
2346 else
2347 {
2348 char *pszDescriptorNew = NULL;
2349 LogFlow(("Increasing descriptor cache\n"));
2350 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2351 if (!pszDescriptorNew)
2352 {
2353 rc = VERR_NO_MEMORY;
2354 break;
2355 }
2356 pszDescriptor = pszDescriptorNew;
2357 cbDescriptor += cb + 4 * _1K;
2358 }
2359 }
2360 if (cb > 0)
2361 {
2362 memcpy(pszDescriptor + offDescriptor, psz, cb);
2363 offDescriptor += cb;
2364 }
2365 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2366 offDescriptor++;
2367 }
2368 if (RT_SUCCESS(rc))
2369 {
2370 *ppvData = pszDescriptor;
2371 *pcbData = offDescriptor;
2372 }
2373 else if (pszDescriptor)
2374 RTMemFree(pszDescriptor);
2375 return rc;
2376}
2377/**
2378 * Internal: write/update the descriptor part of the image.
2379 */
2380static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2381{
2382 int rc = VINF_SUCCESS;
2383 uint64_t cbLimit;
2384 uint64_t uOffset;
2385 PVMDKFILE pDescFile;
2386 void *pvDescriptor = NULL;
2387 size_t cbDescriptor;
2388 if (pImage->pDescData)
2389 {
2390 /* Separate descriptor file. */
2391 uOffset = 0;
2392 cbLimit = 0;
2393 pDescFile = pImage->pFile;
2394 }
2395 else
2396 {
2397 /* Embedded descriptor file. */
2398 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2399 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2400 pDescFile = pImage->pExtents[0].pFile;
2401 }
2402 /* Bail out if there is no file to write to. */
2403 if (pDescFile == NULL)
2404 return VERR_INVALID_PARAMETER;
2405 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2406 if (RT_SUCCESS(rc))
2407 {
2408 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2409 uOffset, pvDescriptor,
2410 cbLimit ? cbLimit : cbDescriptor,
2411 pIoCtx, NULL, NULL);
2412 if ( RT_FAILURE(rc)
2413 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2414 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2415 }
2416 if (RT_SUCCESS(rc) && !cbLimit)
2417 {
2418 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2419 if (RT_FAILURE(rc))
2420 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2421 }
2422 if (RT_SUCCESS(rc))
2423 pImage->Descriptor.fDirty = false;
2424 if (pvDescriptor)
2425 RTMemFree(pvDescriptor);
2426 return rc;
2427}
2428/**
2429 * Internal: validate the consistency check values in a binary header.
2430 */
2431static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2432{
2433 int rc = VINF_SUCCESS;
2434 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2435 {
2436 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2437 return rc;
2438 }
2439 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2440 {
2441 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2442 return rc;
2443 }
2444 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2445 && ( pHeader->singleEndLineChar != '\n'
2446 || pHeader->nonEndLineChar != ' '
2447 || pHeader->doubleEndLineChar1 != '\r'
2448 || pHeader->doubleEndLineChar2 != '\n') )
2449 {
2450 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2451 return rc;
2452 }
2453 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2454 {
2455 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2456 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2457 return rc;
2458 }
2459 return rc;
2460}
2461/**
2462 * Internal: read metadata belonging to an extent with binary header, i.e.
2463 * as found in monolithic files.
2464 */
2465static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2466 bool fMagicAlreadyRead)
2467{
2468 SparseExtentHeader Header;
2469 int rc;
2470 if (!fMagicAlreadyRead)
2471 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2472 &Header, sizeof(Header));
2473 else
2474 {
2475 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2476 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2477 RT_UOFFSETOF(SparseExtentHeader, version),
2478 &Header.version,
2479 sizeof(Header)
2480 - RT_UOFFSETOF(SparseExtentHeader, version));
2481 }
2482 if (RT_SUCCESS(rc))
2483 {
2484 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2485 if (RT_SUCCESS(rc))
2486 {
2487 uint64_t cbFile = 0;
2488 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2489 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2490 pExtent->fFooter = true;
2491 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2492 || ( pExtent->fFooter
2493 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2494 {
2495 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2496 if (RT_FAILURE(rc))
2497 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2498 }
2499 if (RT_SUCCESS(rc))
2500 {
2501 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2502 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2503 if ( pExtent->fFooter
2504 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2505 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2506 {
2507 /* Read the footer, which comes before the end-of-stream marker. */
2508 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2509 cbFile - 2*512, &Header,
2510 sizeof(Header));
2511 if (RT_FAILURE(rc))
2512 {
2513 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2514 rc = VERR_VD_VMDK_INVALID_HEADER;
2515 }
2516 if (RT_SUCCESS(rc))
2517 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2518 /* Prohibit any writes to this extent. */
2519 pExtent->uAppendPosition = 0;
2520 }
2521 if (RT_SUCCESS(rc))
2522 {
2523 pExtent->uVersion = RT_LE2H_U32(Header.version);
2524 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2525 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2526 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2527 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2528 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2529 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2530 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2531 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2532 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2533 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2534 {
2535 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2536 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2537 }
2538 else
2539 {
2540 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2541 pExtent->uSectorRGD = 0;
2542 }
2543 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2544 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2545 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2546 if ( RT_SUCCESS(rc)
2547 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2548 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2549 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2550 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2551 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2552 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2553 if (RT_SUCCESS(rc))
2554 {
2555 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2556 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2557 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2558 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2559 else
2560 {
2561 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2562 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2563 /* Fix up the number of descriptor sectors, as some flat images have
2564 * really just one, and this causes failures when inserting the UUID
2565 * values and other extra information. */
2566 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2567 {
2568 /* Do it the easy way - just fix it for flat images which have no
2569 * other complicated metadata which needs space too. */
2570 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2571 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2572 pExtent->cDescriptorSectors = 4;
2573 }
2574 }
2575 }
2576 }
2577 }
2578 }
2579 }
2580 else
2581 {
2582 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2583 rc = VERR_VD_VMDK_INVALID_HEADER;
2584 }
2585 if (RT_FAILURE(rc))
2586 vmdkFreeExtentData(pImage, pExtent, false);
2587 return rc;
2588}
2589/**
2590 * Internal: read additional metadata belonging to an extent. For those
2591 * extents which have no additional metadata just verify the information.
2592 */
2593static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2594{
2595 int rc = VINF_SUCCESS;
2596/* disabled the check as there are too many truncated vmdk images out there */
2597#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2598 uint64_t cbExtentSize;
2599 /* The image must be a multiple of a sector in size and contain the data
2600 * area (flat images only). If not, it means the image is at least
2601 * truncated, or even seriously garbled. */
2602 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2603 if (RT_FAILURE(rc))
2604 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2605 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2606 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2607 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2608 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2609#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2610 if ( RT_SUCCESS(rc)
2611 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2612 {
2613 /* The spec says that this must be a power of two and greater than 8,
2614 * but probably they meant not less than 8. */
2615 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2616 || pExtent->cSectorsPerGrain < 8)
2617 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2618 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2619 else
2620 {
2621 /* This code requires that a grain table must hold a power of two multiple
2622 * of the number of entries per GT cache entry. */
2623 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2624 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2625 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2626 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2627 else
2628 {
2629 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2630 if (RT_SUCCESS(rc))
2631 {
2632 /* Prohibit any writes to this streamOptimized extent. */
2633 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2634 pExtent->uAppendPosition = 0;
2635 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2636 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2637 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2638 rc = vmdkReadGrainDirectory(pImage, pExtent);
2639 else
2640 {
2641 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2642 pExtent->cbGrainStreamRead = 0;
2643 }
2644 }
2645 }
2646 }
2647 }
2648 if (RT_FAILURE(rc))
2649 vmdkFreeExtentData(pImage, pExtent, false);
2650 return rc;
2651}
2652/**
2653 * Internal: write/update the metadata for a sparse extent.
2654 */
2655static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2656 uint64_t uOffset, PVDIOCTX pIoCtx)
2657{
2658 SparseExtentHeader Header;
2659 memset(&Header, '\0', sizeof(Header));
2660 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2661 Header.version = RT_H2LE_U32(pExtent->uVersion);
2662 Header.flags = RT_H2LE_U32(RT_BIT(0));
2663 if (pExtent->pRGD)
2664 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2665 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2666 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2667 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2668 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2669 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2670 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2671 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2672 if (pExtent->fFooter && uOffset == 0)
2673 {
2674 if (pExtent->pRGD)
2675 {
2676 Assert(pExtent->uSectorRGD);
2677 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2678 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2679 }
2680 else
2681 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2682 }
2683 else
2684 {
2685 if (pExtent->pRGD)
2686 {
2687 Assert(pExtent->uSectorRGD);
2688 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2689 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2690 }
2691 else
2692 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2693 }
2694 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2695 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2696 Header.singleEndLineChar = '\n';
2697 Header.nonEndLineChar = ' ';
2698 Header.doubleEndLineChar1 = '\r';
2699 Header.doubleEndLineChar2 = '\n';
2700 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2701 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
2702 uOffset, &Header, sizeof(Header),
2703 pIoCtx, NULL, NULL);
2704 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2705 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2706 return rc;
2707}
2708/**
2709 * Internal: free the buffers used for streamOptimized images.
2710 */
2711static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
2712{
2713 if (pExtent->pvCompGrain)
2714 {
2715 RTMemFree(pExtent->pvCompGrain);
2716 pExtent->pvCompGrain = NULL;
2717 }
2718 if (pExtent->pvGrain)
2719 {
2720 RTMemFree(pExtent->pvGrain);
2721 pExtent->pvGrain = NULL;
2722 }
2723}
2724/**
2725 * Internal: free the memory used by the extent data structure, optionally
2726 * deleting the referenced files.
2727 *
2728 * @returns VBox status code.
2729 * @param pImage Pointer to the image instance data.
2730 * @param pExtent The extent to free.
2731 * @param fDelete Flag whether to delete the backing storage.
2732 */
2733static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2734 bool fDelete)
2735{
2736 int rc = VINF_SUCCESS;
2737 vmdkFreeGrainDirectory(pExtent);
2738 if (pExtent->pDescData)
2739 {
2740 RTMemFree(pExtent->pDescData);
2741 pExtent->pDescData = NULL;
2742 }
2743 if (pExtent->pFile != NULL)
2744 {
2745 /* Do not delete raw extents, these have full and base names equal. */
2746 rc = vmdkFileClose(pImage, &pExtent->pFile,
2747 fDelete
2748 && pExtent->pszFullname
2749 && pExtent->pszBasename
2750 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2751 }
2752 if (pExtent->pszBasename)
2753 {
2754 RTMemTmpFree((void *)pExtent->pszBasename);
2755 pExtent->pszBasename = NULL;
2756 }
2757 if (pExtent->pszFullname)
2758 {
2759 RTStrFree((char *)(void *)pExtent->pszFullname);
2760 pExtent->pszFullname = NULL;
2761 }
2762 vmdkFreeStreamBuffers(pExtent);
2763 return rc;
2764}
2765/**
2766 * Internal: allocate grain table cache if necessary for this image.
2767 */
2768static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2769{
2770 PVMDKEXTENT pExtent;
2771 /* Allocate grain table cache if any sparse extent is present. */
2772 for (unsigned i = 0; i < pImage->cExtents; i++)
2773 {
2774 pExtent = &pImage->pExtents[i];
2775 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2776 {
2777 /* Allocate grain table cache. */
2778 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
2779 if (!pImage->pGTCache)
2780 return VERR_NO_MEMORY;
2781 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
2782 {
2783 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
2784 pGCE->uExtent = UINT32_MAX;
2785 }
2786 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
2787 break;
2788 }
2789 }
2790 return VINF_SUCCESS;
2791}
2792/**
2793 * Internal: allocate the given number of extents.
2794 */
2795static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
2796{
2797 int rc = VINF_SUCCESS;
2798 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
2799 if (pExtents)
2800 {
2801 for (unsigned i = 0; i < cExtents; i++)
2802 {
2803 pExtents[i].pFile = NULL;
2804 pExtents[i].pszBasename = NULL;
2805 pExtents[i].pszFullname = NULL;
2806 pExtents[i].pGD = NULL;
2807 pExtents[i].pRGD = NULL;
2808 pExtents[i].pDescData = NULL;
2809 pExtents[i].uVersion = 1;
2810 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
2811 pExtents[i].uExtent = i;
2812 pExtents[i].pImage = pImage;
2813 }
2814 pImage->pExtents = pExtents;
2815 pImage->cExtents = cExtents;
2816 }
2817 else
2818 rc = VERR_NO_MEMORY;
2819 return rc;
2820}
2821/**
2822 * Internal: allocate and describes an additional, file-backed extent
2823 * for the given size. Preserves original extents.
2824 */
2825static int vmdkAddFileBackedExtent(PVMDKIMAGE pImage, uint64_t cbSize)
2826{
2827 int rc = VINF_SUCCESS;
2828 PVMDKEXTENT pNewExtents = (PVMDKEXTENT)RTMemAllocZ((pImage->cExtents + 1) * sizeof(VMDKEXTENT));
2829 if (pNewExtents)
2830 {
2831 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT));
2832 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents];
2833
2834 pExtent->pFile = NULL;
2835 pExtent->pszBasename = NULL;
2836 pExtent->pszFullname = NULL;
2837 pExtent->pGD = NULL;
2838 pExtent->pRGD = NULL;
2839 pExtent->pDescData = NULL;
2840 pExtent->uVersion = 1;
2841 pExtent->uCompression = VMDK_COMPRESSION_NONE;
2842 pExtent->uExtent = pImage->cExtents;
2843 pExtent->pImage = pImage;
2844 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
2845 pExtent->enmType = VMDKETYPE_FLAT;
2846 pExtent->enmAccess = VMDKACCESS_READWRITE;
2847 pExtent->uSectorOffset = 0;
2848
2849 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
2850 AssertPtr(pszBasenameSubstr);
2851
2852 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
2853 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
2854 RTPathStripSuffix(pszBasenameBase);
2855 char *pszTmp;
2856 size_t cbTmp;
2857
2858 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED)
2859 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
2860 pExtent->uExtent + 1, pszBasenameSuff);
2861 else
2862 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1,
2863 pszBasenameSuff);
2864
2865 RTStrFree(pszBasenameBase);
2866 if (!pszTmp)
2867 return VERR_NO_STR_MEMORY;
2868 cbTmp = strlen(pszTmp) + 1;
2869 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
2870 if (!pszBasename)
2871 {
2872 RTStrFree(pszTmp);
2873 return VERR_NO_MEMORY;
2874 }
2875
2876 memcpy(pszBasename, pszTmp, cbTmp);
2877 RTStrFree(pszTmp);
2878
2879 pExtent->pszBasename = pszBasename;
2880
2881 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
2882 if (!pszBasedirectory)
2883 return VERR_NO_STR_MEMORY;
2884 RTPathStripFilename(pszBasedirectory);
2885 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
2886 RTStrFree(pszBasedirectory);
2887 if (!pszFullname)
2888 return VERR_NO_STR_MEMORY;
2889 pExtent->pszFullname = pszFullname;
2890
2891 /* Create file for extent. */
2892 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
2893 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
2894 true /* fCreate */));
2895 if (RT_FAILURE(rc))
2896 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
2897
2898 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
2899 pExtent->cNominalSectors, pExtent->enmType,
2900 pExtent->pszBasename, pExtent->uSectorOffset);
2901 if (RT_FAILURE(rc))
2902 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
2903
2904 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSize,
2905 0 /* fFlags */, NULL, 0, 0);
2906
2907 if (RT_FAILURE(rc))
2908 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
2909
2910 pImage->pExtents = pNewExtents;
2911 pImage->cExtents++;
2912 }
2913 else
2914 rc = VERR_NO_MEMORY;
2915 return rc;
2916}
2917/**
2918 * Reads and processes the descriptor embedded in sparse images.
2919 *
2920 * @returns VBox status code.
2921 * @param pImage VMDK image instance.
2922 * @param pFile The sparse file handle.
2923 */
2924static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
2925{
2926 /* It's a hosted single-extent image. */
2927 int rc = vmdkCreateExtents(pImage, 1);
2928 if (RT_SUCCESS(rc))
2929 {
2930 /* The opened file is passed to the extent. No separate descriptor
2931 * file, so no need to keep anything open for the image. */
2932 PVMDKEXTENT pExtent = &pImage->pExtents[0];
2933 pExtent->pFile = pFile;
2934 pImage->pFile = NULL;
2935 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
2936 if (RT_LIKELY(pExtent->pszFullname))
2937 {
2938 /* As we're dealing with a monolithic image here, there must
2939 * be a descriptor embedded in the image file. */
2940 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
2941 if ( RT_SUCCESS(rc)
2942 && pExtent->uDescriptorSector
2943 && pExtent->cDescriptorSectors)
2944 {
2945 /* HACK: extend the descriptor if it is unusually small and it fits in
2946 * the unused space after the image header. Allows opening VMDK files
2947 * with extremely small descriptor in read/write mode.
2948 *
2949 * The previous version introduced a possible regression for VMDK stream
2950 * optimized images from VMware which tend to have only a single sector sized
2951 * descriptor. Increasing the descriptor size resulted in adding the various uuid
2952 * entries required to make it work with VBox but for stream optimized images
2953 * the updated binary header wasn't written to the disk creating a mismatch
2954 * between advertised and real descriptor size.
2955 *
2956 * The descriptor size will be increased even if opened readonly now if there
2957 * enough room but the new value will not be written back to the image.
2958 */
2959 if ( pExtent->cDescriptorSectors < 3
2960 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
2961 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
2962 {
2963 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
2964 pExtent->cDescriptorSectors = 4;
2965 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2966 {
2967 /*
2968 * Update the on disk number now to make sure we don't introduce inconsistencies
2969 * in case of stream optimized images from VMware where the descriptor is just
2970 * one sector big (the binary header is not written to disk for complete
2971 * stream optimized images in vmdkFlushImage()).
2972 */
2973 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
2974 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
2975 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
2976 &u64DescSizeNew, sizeof(u64DescSizeNew));
2977 if (RT_FAILURE(rc))
2978 {
2979 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
2980 /* Restore the old size and carry on. */
2981 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
2982 }
2983 }
2984 }
2985 /* Read the descriptor from the extent. */
2986 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2987 if (RT_LIKELY(pExtent->pDescData))
2988 {
2989 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2990 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
2991 pExtent->pDescData,
2992 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2993 if (RT_SUCCESS(rc))
2994 {
2995 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
2996 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
2997 if ( RT_SUCCESS(rc)
2998 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2999 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3000 {
3001 rc = vmdkReadMetaExtent(pImage, pExtent);
3002 if (RT_SUCCESS(rc))
3003 {
3004 /* Mark the extent as unclean if opened in read-write mode. */
3005 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3006 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3007 {
3008 pExtent->fUncleanShutdown = true;
3009 pExtent->fMetaDirty = true;
3010 }
3011 }
3012 }
3013 else if (RT_SUCCESS(rc))
3014 rc = VERR_NOT_SUPPORTED;
3015 }
3016 else
3017 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3018 }
3019 else
3020 rc = VERR_NO_MEMORY;
3021 }
3022 else if (RT_SUCCESS(rc))
3023 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3024 }
3025 else
3026 rc = VERR_NO_MEMORY;
3027 }
3028 return rc;
3029}
3030/**
3031 * Reads the descriptor from a pure text file.
3032 *
3033 * @returns VBox status code.
3034 * @param pImage VMDK image instance.
3035 * @param pFile The descriptor file handle.
3036 */
3037static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3038{
3039 /* Allocate at least 10K, and make sure that there is 5K free space
3040 * in case new entries need to be added to the descriptor. Never
3041 * allocate more than 128K, because that's no valid descriptor file
3042 * and will result in the correct "truncated read" error handling. */
3043 uint64_t cbFileSize;
3044 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3045 if ( RT_SUCCESS(rc)
3046 && cbFileSize >= 50)
3047 {
3048 uint64_t cbSize = cbFileSize;
3049 if (cbSize % VMDK_SECTOR2BYTE(10))
3050 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3051 else
3052 cbSize += VMDK_SECTOR2BYTE(10);
3053 cbSize = RT_MIN(cbSize, _128K);
3054 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3055 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3056 if (RT_LIKELY(pImage->pDescData))
3057 {
3058 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3059 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3060 if (RT_SUCCESS(rc))
3061 {
3062#if 0 /** @todo Revisit */
3063 cbRead += sizeof(u32Magic);
3064 if (cbRead == pImage->cbDescAlloc)
3065 {
3066 /* Likely the read is truncated. Better fail a bit too early
3067 * (normally the descriptor is much smaller than our buffer). */
3068 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3069 goto out;
3070 }
3071#endif
3072 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3073 pImage->cbDescAlloc);
3074 if (RT_SUCCESS(rc))
3075 {
3076 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3077 {
3078 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3079 if (pExtent->pszBasename)
3080 {
3081 /* Hack to figure out whether the specified name in the
3082 * extent descriptor is absolute. Doesn't always work, but
3083 * should be good enough for now. */
3084 char *pszFullname;
3085 /** @todo implement proper path absolute check. */
3086 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3087 {
3088 pszFullname = RTStrDup(pExtent->pszBasename);
3089 if (!pszFullname)
3090 {
3091 rc = VERR_NO_MEMORY;
3092 break;
3093 }
3094 }
3095 else
3096 {
3097 char *pszDirname = RTStrDup(pImage->pszFilename);
3098 if (!pszDirname)
3099 {
3100 rc = VERR_NO_MEMORY;
3101 break;
3102 }
3103 RTPathStripFilename(pszDirname);
3104 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3105 RTStrFree(pszDirname);
3106 if (!pszFullname)
3107 {
3108 rc = VERR_NO_STR_MEMORY;
3109 break;
3110 }
3111 }
3112 pExtent->pszFullname = pszFullname;
3113 }
3114 else
3115 pExtent->pszFullname = NULL;
3116 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3117 switch (pExtent->enmType)
3118 {
3119 case VMDKETYPE_HOSTED_SPARSE:
3120 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3121 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3122 if (RT_FAILURE(rc))
3123 {
3124 /* Do NOT signal an appropriate error here, as the VD
3125 * layer has the choice of retrying the open if it
3126 * failed. */
3127 break;
3128 }
3129 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3130 false /* fMagicAlreadyRead */);
3131 if (RT_FAILURE(rc))
3132 break;
3133 rc = vmdkReadMetaExtent(pImage, pExtent);
3134 if (RT_FAILURE(rc))
3135 break;
3136 /* Mark extent as unclean if opened in read-write mode. */
3137 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3138 {
3139 pExtent->fUncleanShutdown = true;
3140 pExtent->fMetaDirty = true;
3141 }
3142 break;
3143 case VMDKETYPE_VMFS:
3144 case VMDKETYPE_FLAT:
3145 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3146 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3147 if (RT_FAILURE(rc))
3148 {
3149 /* Do NOT signal an appropriate error here, as the VD
3150 * layer has the choice of retrying the open if it
3151 * failed. */
3152 break;
3153 }
3154 break;
3155 case VMDKETYPE_ZERO:
3156 /* Nothing to do. */
3157 break;
3158 default:
3159 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3160 }
3161 }
3162 }
3163 }
3164 else
3165 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3166 }
3167 else
3168 rc = VERR_NO_MEMORY;
3169 }
3170 else if (RT_SUCCESS(rc))
3171 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3172 return rc;
3173}
3174/**
3175 * Read and process the descriptor based on the image type.
3176 *
3177 * @returns VBox status code.
3178 * @param pImage VMDK image instance.
3179 * @param pFile VMDK file handle.
3180 */
3181static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3182{
3183 uint32_t u32Magic;
3184 /* Read magic (if present). */
3185 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3186 &u32Magic, sizeof(u32Magic));
3187 if (RT_SUCCESS(rc))
3188 {
3189 /* Handle the file according to its magic number. */
3190 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3191 rc = vmdkDescriptorReadSparse(pImage, pFile);
3192 else
3193 rc = vmdkDescriptorReadAscii(pImage, pFile);
3194 }
3195 else
3196 {
3197 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3198 rc = VERR_VD_VMDK_INVALID_HEADER;
3199 }
3200 return rc;
3201}
3202/**
3203 * Internal: Open an image, constructing all necessary data structures.
3204 */
3205static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3206{
3207 pImage->uOpenFlags = uOpenFlags;
3208 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3209 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3210 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3211 /*
3212 * Open the image.
3213 * We don't have to check for asynchronous access because
3214 * we only support raw access and the opened file is a description
3215 * file were no data is stored.
3216 */
3217 PVMDKFILE pFile;
3218 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3219 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3220 if (RT_SUCCESS(rc))
3221 {
3222 pImage->pFile = pFile;
3223 rc = vmdkDescriptorRead(pImage, pFile);
3224 if (RT_SUCCESS(rc))
3225 {
3226 /* Determine PCHS geometry if not set. */
3227 if (pImage->PCHSGeometry.cCylinders == 0)
3228 {
3229 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3230 / pImage->PCHSGeometry.cHeads
3231 / pImage->PCHSGeometry.cSectors;
3232 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3233 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3234 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3235 {
3236 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3237 AssertRC(rc);
3238 }
3239 }
3240 /* Update the image metadata now in case has changed. */
3241 rc = vmdkFlushImage(pImage, NULL);
3242 if (RT_SUCCESS(rc))
3243 {
3244 /* Figure out a few per-image constants from the extents. */
3245 pImage->cbSize = 0;
3246 for (unsigned i = 0; i < pImage->cExtents; i++)
3247 {
3248 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3249 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3250 {
3251 /* Here used to be a check whether the nominal size of an extent
3252 * is a multiple of the grain size. The spec says that this is
3253 * always the case, but unfortunately some files out there in the
3254 * wild violate the spec (e.g. ReactOS 0.3.1). */
3255 }
3256 else if ( pExtent->enmType == VMDKETYPE_FLAT
3257 || pExtent->enmType == VMDKETYPE_ZERO)
3258 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3259 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3260 }
3261 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3262 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3263 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3264 rc = vmdkAllocateGrainTableCache(pImage);
3265 }
3266 }
3267 }
3268 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3269 * choice of retrying the open if it failed. */
3270 if (RT_SUCCESS(rc))
3271 {
3272 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3273 pImage->RegionList.fFlags = 0;
3274 pImage->RegionList.cRegions = 1;
3275 pRegion->offRegion = 0; /* Disk start. */
3276 pRegion->cbBlock = 512;
3277 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3278 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3279 pRegion->cbData = 512;
3280 pRegion->cbMetadata = 0;
3281 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3282 }
3283 else
3284 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3285 return rc;
3286}
3287/**
3288 * Frees a raw descriptor.
3289 * @internal
3290 */
3291static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3292{
3293 if (!pRawDesc)
3294 return VINF_SUCCESS;
3295 RTStrFree(pRawDesc->pszRawDisk);
3296 pRawDesc->pszRawDisk = NULL;
3297 /* Partitions: */
3298 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3299 {
3300 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3301 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3302 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3303 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3304 }
3305 RTMemFree(pRawDesc->pPartDescs);
3306 pRawDesc->pPartDescs = NULL;
3307 RTMemFree(pRawDesc);
3308 return VINF_SUCCESS;
3309}
3310/**
3311 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3312 * returning the pointer to the first new entry.
3313 * @internal
3314 */
3315static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3316{
3317 uint32_t const cOld = pRawDesc->cPartDescs;
3318 uint32_t const cNew = cOld + cToAdd;
3319 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3320 cOld * sizeof(pRawDesc->pPartDescs[0]),
3321 cNew * sizeof(pRawDesc->pPartDescs[0]));
3322 if (paNew)
3323 {
3324 pRawDesc->cPartDescs = cNew;
3325 pRawDesc->pPartDescs = paNew;
3326 *ppRet = &paNew[cOld];
3327 return VINF_SUCCESS;
3328 }
3329 *ppRet = NULL;
3330 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3331 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3332 pImage->pszFilename, cOld, cNew);
3333}
3334/**
3335 * @callback_method_impl{FNRTSORTCMP}
3336 */
3337static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3338{
3339 RT_NOREF(pvUser);
3340 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3341 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3342}
3343/**
3344 * Post processes the partition descriptors.
3345 *
3346 * Sorts them and check that they don't overlap.
3347 */
3348static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3349{
3350 /*
3351 * Sort data areas in ascending order of start.
3352 */
3353 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3354 /*
3355 * Check that we don't have overlapping descriptors. If we do, that's an
3356 * indication that the drive is corrupt or that the RTDvm code is buggy.
3357 */
3358 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3359 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3360 {
3361 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3362 if (offLast <= paPartDescs[i].offStartInVDisk)
3363 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3364 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3365 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3366 paPartDescs[i].pvPartitionData ? " (data)" : "");
3367 offLast -= 1;
3368 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3369 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3370 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3371 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3372 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3373 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3374 if (offLast >= cbSize)
3375 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3376 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3377 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3378 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3379 }
3380 return VINF_SUCCESS;
3381}
3382#ifdef RT_OS_LINUX
3383/**
3384 * Searches the dir specified in @a pszBlockDevDir for subdirectories with a
3385 * 'dev' file matching @a uDevToLocate.
3386 *
3387 * This is used both
3388 *
3389 * @returns IPRT status code, errors have been reported properly.
3390 * @param pImage For error reporting.
3391 * @param pszBlockDevDir Input: Path to the directory search under.
3392 * Output: Path to the directory containing information
3393 * for @a uDevToLocate.
3394 * @param cbBlockDevDir The size of the buffer @a pszBlockDevDir points to.
3395 * @param uDevToLocate The device number of the block device info dir to
3396 * locate.
3397 * @param pszDevToLocate For error reporting.
3398 */
3399static int vmdkFindSysBlockDevPath(PVMDKIMAGE pImage, char *pszBlockDevDir, size_t cbBlockDevDir,
3400 dev_t uDevToLocate, const char *pszDevToLocate)
3401{
3402 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir);
3403 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW);
3404 RTDIR hDir = NIL_RTDIR;
3405 int rc = RTDirOpen(&hDir, pszBlockDevDir);
3406 if (RT_SUCCESS(rc))
3407 {
3408 for (;;)
3409 {
3410 RTDIRENTRY Entry;
3411 rc = RTDirRead(hDir, &Entry, NULL);
3412 if (RT_SUCCESS(rc))
3413 {
3414 /* We're interested in directories and symlinks. */
3415 if ( Entry.enmType == RTDIRENTRYTYPE_DIRECTORY
3416 || Entry.enmType == RTDIRENTRYTYPE_SYMLINK
3417 || Entry.enmType == RTDIRENTRYTYPE_UNKNOWN)
3418 {
3419 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName);
3420 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */
3421 dev_t uThisDevNo = ~uDevToLocate;
3422 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir);
3423 if (RT_SUCCESS(rc) && uThisDevNo == uDevToLocate)
3424 break;
3425 }
3426 }
3427 else
3428 {
3429 pszBlockDevDir[cchDir] = '\0';
3430 if (rc == VERR_NO_MORE_FILES)
3431 rc = vdIfError(pImage->pIfError, VERR_NOT_FOUND, RT_SRC_POS,
3432 N_("VMDK: Image path: '%s'. Failed to locate device corresponding to '%s' under '%s'"),
3433 pImage->pszFilename, pszDevToLocate, pszBlockDevDir);
3434 else
3435 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3436 N_("VMDK: Image path: '%s'. RTDirRead failed enumerating '%s': %Rrc"),
3437 pImage->pszFilename, pszBlockDevDir, rc);
3438 break;
3439 }
3440 }
3441 RTDirClose(hDir);
3442 }
3443 else
3444 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3445 N_("VMDK: Image path: '%s'. Failed to open dir '%s' for listing: %Rrc"),
3446 pImage->pszFilename, pszBlockDevDir, rc);
3447 return rc;
3448}
3449#endif /* RT_OS_LINUX */
3450#ifdef RT_OS_FREEBSD
3451/**
3452 * Reads the config data from the provider and returns offset and size
3453 *
3454 * @return IPRT status code
3455 * @param pProvider GEOM provider representing partition
3456 * @param pcbOffset Placeholder for the offset of the partition
3457 * @param pcbSize Placeholder for the size of the partition
3458 */
3459static int vmdkReadPartitionsParamsFromProvider(gprovider *pProvider, uint64_t *pcbOffset, uint64_t *pcbSize)
3460{
3461 gconfig *pConfEntry;
3462 int rc = VERR_NOT_FOUND;
3463 /*
3464 * Required parameters are located in the list containing key/value pairs.
3465 * Both key and value are in text form. Manuals tells nothing about the fact
3466 * that the both parameters should be present in the list. Thus, there are
3467 * cases when only one parameter is presented. To handle such cases we treat
3468 * absent params as zero allowing the caller decide the case is either correct
3469 * or an error.
3470 */
3471 uint64_t cbOffset = 0;
3472 uint64_t cbSize = 0;
3473 LIST_FOREACH(pConfEntry, &pProvider->lg_config, lg_config)
3474 {
3475 if (RTStrCmp(pConfEntry->lg_name, "offset") == 0)
3476 {
3477 cbOffset = RTStrToUInt64(pConfEntry->lg_val);
3478 rc = VINF_SUCCESS;
3479 }
3480 else if (RTStrCmp(pConfEntry->lg_name, "length") == 0)
3481 {
3482 cbSize = RTStrToUInt64(pConfEntry->lg_val);
3483 rc = VINF_SUCCESS;
3484 }
3485 }
3486 if (RT_SUCCESS(rc))
3487 {
3488 *pcbOffset = cbOffset;
3489 *pcbSize = cbSize;
3490 }
3491 return rc;
3492}
3493/**
3494 * Searches the partition specified by name and calculates its size and absolute offset.
3495 *
3496 * @return IPRT status code.
3497 * @param pParentClass Class containing pParentGeom
3498 * @param pszParentGeomName Name of the parent geom where we are looking for provider
3499 * @param pszProviderName Name of the provider we are looking for
3500 * @param pcbAbsoluteOffset Placeholder for the absolute offset of the partition, i.e. offset from the beginning of the disk
3501 * @param psbSize Placeholder for the size of the partition.
3502 */
3503static int vmdkFindPartitionParamsByName(gclass *pParentClass, const char *pszParentGeomName, const char *pszProviderName,
3504 uint64_t *pcbAbsoluteOffset, uint64_t *pcbSize)
3505{
3506 AssertReturn(pParentClass, VERR_INVALID_PARAMETER);
3507 AssertReturn(pszParentGeomName, VERR_INVALID_PARAMETER);
3508 AssertReturn(pszProviderName, VERR_INVALID_PARAMETER);
3509 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER);
3510 AssertReturn(pcbSize, VERR_INVALID_PARAMETER);
3511 ggeom *pParentGeom;
3512 int rc = VERR_NOT_FOUND;
3513 LIST_FOREACH(pParentGeom, &pParentClass->lg_geom, lg_geom)
3514 {
3515 if (RTStrCmp(pParentGeom->lg_name, pszParentGeomName) == 0)
3516 {
3517 rc = VINF_SUCCESS;
3518 break;
3519 }
3520 }
3521 if (RT_FAILURE(rc))
3522 return rc;
3523 gprovider *pProvider;
3524 /*
3525 * First, go over providers without handling EBR or BSDLabel
3526 * partitions for case when looking provider is child
3527 * of the givng geom, to reduce searching time
3528 */
3529 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3530 {
3531 if (RTStrCmp(pProvider->lg_name, pszProviderName) == 0)
3532 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize);
3533 }
3534 /*
3535 * No provider found. Go over the parent geom again
3536 * and make recursions if geom represents EBR or BSDLabel.
3537 * In this case given parent geom contains only EBR or BSDLabel
3538 * partition itself and their own partitions are in the separate
3539 * geoms. Also, partition offsets are relative to geom, so
3540 * we have to add offset from child provider with parent geoms
3541 * provider
3542 */
3543 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3544 {
3545 uint64_t cbOffset = 0;
3546 uint64_t cbSize = 0;
3547 rc = vmdkReadPartitionsParamsFromProvider(pProvider, &cbOffset, &cbSize);
3548 if (RT_FAILURE(rc))
3549 return rc;
3550 uint64_t cbProviderOffset = 0;
3551 uint64_t cbProviderSize = 0;
3552 rc = vmdkFindPartitionParamsByName(pParentClass, pProvider->lg_name, pszProviderName, &cbProviderOffset, &cbProviderSize);
3553 if (RT_SUCCESS(rc))
3554 {
3555 *pcbAbsoluteOffset = cbOffset + cbProviderOffset;
3556 *pcbSize = cbProviderSize;
3557 return rc;
3558 }
3559 }
3560 return VERR_NOT_FOUND;
3561}
3562#endif
3563/**
3564 * Attempts to verify the raw partition path.
3565 *
3566 * We don't want to trust RTDvm and the partition device node morphing blindly.
3567 */
3568static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3569 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3570{
3571 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3572 /*
3573 * Try open the raw partition device.
3574 */
3575 RTFILE hRawPart = NIL_RTFILE;
3576 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
3577 if (RT_FAILURE(rc))
3578 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3579 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
3580 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
3581 /*
3582 * Compare the partition UUID if we can get it.
3583 */
3584#ifdef RT_OS_WINDOWS
3585 DWORD cbReturned;
3586 /* 1. Get the device numbers for both handles, they should have the same disk. */
3587 STORAGE_DEVICE_NUMBER DevNum1;
3588 RT_ZERO(DevNum1);
3589 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3590 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
3591 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3592 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3593 pImage->pszFilename, pszRawDrive, GetLastError());
3594 STORAGE_DEVICE_NUMBER DevNum2;
3595 RT_ZERO(DevNum2);
3596 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3597 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
3598 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3599 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3600 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
3601 if ( RT_SUCCESS(rc)
3602 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
3603 || DevNum1.DeviceType != DevNum2.DeviceType))
3604 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3605 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
3606 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3607 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
3608 if (RT_SUCCESS(rc))
3609 {
3610 /* Get the partitions from the raw drive and match up with the volume info
3611 from RTDvm. The partition number is found in DevNum2. */
3612 DWORD cbNeeded = 0;
3613 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3614 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
3615 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
3616 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
3617 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
3618 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
3619 if (pLayout)
3620 {
3621 cbReturned = 0;
3622 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3623 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
3624 {
3625 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
3626 unsigned iEntry = 0;
3627 while ( iEntry < pLayout->PartitionCount
3628 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
3629 iEntry++;
3630 if (iEntry < pLayout->PartitionCount)
3631 {
3632 /* Compare the basics */
3633 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
3634 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
3635 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3636 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
3637 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3638 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
3639 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
3640 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3641 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
3642 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3643 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
3644 /** @todo We could compare the MBR type, GPT type and ID. */
3645 RT_NOREF(hVol);
3646 }
3647 else
3648 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3649 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
3650 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3651 DevNum2.PartitionNumber, pLayout->PartitionCount);
3652# ifndef LOG_ENABLED
3653 if (RT_FAILURE(rc))
3654# endif
3655 {
3656 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
3657 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
3658 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
3659 {
3660 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
3661 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
3662 pEntry->PartitionStyle, pEntry->RewritePartition));
3663 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
3664 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
3665 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
3666 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
3667 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
3668 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
3669 else
3670 LogRel(("\n"));
3671 }
3672 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
3673 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
3674 }
3675 }
3676 else
3677 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3678 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
3679 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
3680 RTMemTmpFree(pLayout);
3681 }
3682 else
3683 rc = VERR_NO_TMP_MEMORY;
3684 }
3685#elif defined(RT_OS_LINUX)
3686 RT_NOREF(hVol);
3687 /* Stat the two devices first to get their device numbers. (We probably
3688 could make some assumptions here about the major & minor number assignments
3689 for legacy nodes, but it doesn't hold up for nvme, so we'll skip that.) */
3690 struct stat StDrive, StPart;
3691 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
3692 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3693 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3694 else if (fstat((int)RTFileToNative(hRawPart), &StPart) != 0)
3695 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3696 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pPartDesc->pszRawDevice, errno);
3697 else
3698 {
3699 /* Scan the directories immediately under /sys/block/ for one with a
3700 'dev' file matching the drive's device number: */
3701 char szSysPath[RTPATH_MAX];
3702 rc = RTLinuxConstructPath(szSysPath, sizeof(szSysPath), "block/");
3703 AssertRCReturn(rc, rc); /* this shall not fail */
3704 if (RTDirExists(szSysPath))
3705 {
3706 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive);
3707 /* Now, scan the directories under that again for a partition device
3708 matching the hRawPart device's number: */
3709 if (RT_SUCCESS(rc))
3710 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice);
3711 /* Having found the /sys/block/device/partition/ path, we can finally
3712 read the partition attributes and compare with hVol. */
3713 if (RT_SUCCESS(rc))
3714 {
3715 /* partition number: */
3716 int64_t iLnxPartition = 0;
3717 rc = RTLinuxSysFsReadIntFile(10, &iLnxPartition, "%s/partition", szSysPath);
3718 if (RT_SUCCESS(rc) && iLnxPartition != idxPartition)
3719 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3720 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Partition number %RI64, expected %RU32"),
3721 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition);
3722 /* else: ignore failure? */
3723 /* start offset: */
3724 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */
3725 if (RT_SUCCESS(rc))
3726 {
3727 int64_t offLnxStart = -1;
3728 rc = RTLinuxSysFsReadIntFile(10, &offLnxStart, "%s/start", szSysPath);
3729 offLnxStart *= cbLnxSector;
3730 if (RT_SUCCESS(rc) && offLnxStart != (int64_t)pPartDesc->offStartInVDisk)
3731 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3732 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
3733 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, offLnxStart, pPartDesc->offStartInVDisk);
3734 /* else: ignore failure? */
3735 }
3736 /* the size: */
3737 if (RT_SUCCESS(rc))
3738 {
3739 int64_t cbLnxData = -1;
3740 rc = RTLinuxSysFsReadIntFile(10, &cbLnxData, "%s/size", szSysPath);
3741 cbLnxData *= cbLnxSector;
3742 if (RT_SUCCESS(rc) && cbLnxData != (int64_t)pPartDesc->cbData)
3743 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3744 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
3745 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbLnxData, pPartDesc->cbData);
3746 /* else: ignore failure? */
3747 }
3748 }
3749 }
3750 /* else: We've got nothing to work on, so only do content comparison. */
3751 }
3752#elif defined(RT_OS_FREEBSD)
3753 char szDriveDevName[256];
3754 char* pszDevName = fdevname_r(RTFileToNative(hRawDrive), szDriveDevName, 256);
3755 if (pszDevName == NULL)
3756 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3757 N_("VMDK: Image path: '%s'. '%s' is not a drive path"), pImage->pszFilename, pszRawDrive);
3758 char szPartDevName[256];
3759 if (RT_SUCCESS(rc))
3760 {
3761 pszDevName = fdevname_r(RTFileToNative(hRawPart), szPartDevName, 256);
3762 if (pszDevName == NULL)
3763 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3764 N_("VMDK: Image path: '%s'. '%s' is not a partition path"), pImage->pszFilename, pPartDesc->pszRawDevice);
3765 }
3766 if (RT_SUCCESS(rc))
3767 {
3768 gmesh geomMesh;
3769 int err = geom_gettree(&geomMesh);
3770 if (err == 0)
3771 {
3772 /* Find root class containg partitions info */
3773 gclass* pPartClass;
3774 LIST_FOREACH(pPartClass, &geomMesh.lg_class, lg_class)
3775 {
3776 if (RTStrCmp(pPartClass->lg_name, "PART") == 0)
3777 break;
3778 }
3779 if (pPartClass == NULL || RTStrCmp(pPartClass->lg_name, "PART") != 0)
3780 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS,
3781 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename);
3782 if (RT_SUCCESS(rc))
3783 {
3784 /* Find provider representing partition device */
3785 uint64_t cbOffset;
3786 uint64_t cbSize;
3787 rc = vmdkFindPartitionParamsByName(pPartClass, szDriveDevName, szPartDevName, &cbOffset, &cbSize);
3788 if (RT_SUCCESS(rc))
3789 {
3790 if (cbOffset != pPartDesc->offStartInVDisk)
3791 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3792 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
3793 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3794 if (cbSize != pPartDesc->cbData)
3795 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3796 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
3797 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3798 }
3799 else
3800 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3801 N_("VMDK: Image path: '%s'. Error getting geom provider for the partition '%s' of the drive '%s' in the GEOM tree: %Rrc"),
3802 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc);
3803 }
3804 geom_deletetree(&geomMesh);
3805 }
3806 else
3807 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(err), RT_SRC_POS,
3808 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err);
3809 }
3810#elif defined(RT_OS_SOLARIS)
3811 RT_NOREF(hVol);
3812 dk_cinfo dkiDriveInfo;
3813 dk_cinfo dkiPartInfo;
3814 if (ioctl(RTFileToNative(hRawDrive), DKIOCINFO, (caddr_t)&dkiDriveInfo) == -1)
3815 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3816 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3817 else if (ioctl(RTFileToNative(hRawPart), DKIOCINFO, (caddr_t)&dkiPartInfo) == -1)
3818 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3819 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
3820 else if ( dkiDriveInfo.dki_ctype != dkiPartInfo.dki_ctype
3821 || dkiDriveInfo.dki_cnum != dkiPartInfo.dki_cnum
3822 || dkiDriveInfo.dki_addr != dkiPartInfo.dki_addr
3823 || dkiDriveInfo.dki_unit != dkiPartInfo.dki_unit
3824 || dkiDriveInfo.dki_slave != dkiPartInfo.dki_slave)
3825 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3826 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x)"),
3827 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3828 dkiDriveInfo.dki_ctype, dkiPartInfo.dki_ctype, dkiDriveInfo.dki_cnum, dkiPartInfo.dki_cnum,
3829 dkiDriveInfo.dki_addr, dkiPartInfo.dki_addr, dkiDriveInfo.dki_unit, dkiPartInfo.dki_unit,
3830 dkiDriveInfo.dki_slave, dkiPartInfo.dki_slave);
3831 else
3832 {
3833 uint64_t cbOffset = 0;
3834 uint64_t cbSize = 0;
3835 dk_gpt *pEfi = NULL;
3836 int idxEfiPart = efi_alloc_and_read(RTFileToNative(hRawPart), &pEfi);
3837 if (idxEfiPart >= 0)
3838 {
3839 if ((uint32_t)dkiPartInfo.dki_partition + 1 == idxPartition)
3840 {
3841 cbOffset = pEfi->efi_parts[idxEfiPart].p_start * pEfi->efi_lbasize;
3842 cbSize = pEfi->efi_parts[idxEfiPart].p_size * pEfi->efi_lbasize;
3843 }
3844 else
3845 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3846 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
3847 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3848 idxPartition, (uint32_t)dkiPartInfo.dki_partition + 1);
3849 efi_free(pEfi);
3850 }
3851 else
3852 {
3853 /*
3854 * Manual says the efi_alloc_and_read returns VT_EINVAL if no EFI partition table found.
3855 * Actually, the function returns any error, e.g. VT_ERROR. Thus, we are not sure, is it
3856 * real error or just no EFI table found. Therefore, let's try to obtain partition info
3857 * using another way. If there is an error, it returns errno which will be handled below.
3858 */
3859 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition;
3860 if (numPartition > NDKMAP)
3861 numPartition -= NDKMAP;
3862 if (numPartition != idxPartition)
3863 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3864 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
3865 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3866 idxPartition, numPartition);
3867 else
3868 {
3869 dk_minfo_ext mediaInfo;
3870 if (ioctl(RTFileToNative(hRawPart), DKIOCGMEDIAINFOEXT, (caddr_t)&mediaInfo) == -1)
3871 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3872 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
3873 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3874 else
3875 {
3876 extpart_info extPartInfo;
3877 if (ioctl(RTFileToNative(hRawPart), DKIOCEXTPARTINFO, (caddr_t)&extPartInfo) != -1)
3878 {
3879 cbOffset = (uint64_t)extPartInfo.p_start * mediaInfo.dki_lbsize;
3880 cbSize = (uint64_t)extPartInfo.p_length * mediaInfo.dki_lbsize;
3881 }
3882 else
3883 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3884 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
3885 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3886 }
3887 }
3888 }
3889 if (RT_SUCCESS(rc) && cbOffset != pPartDesc->offStartInVDisk)
3890 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3891 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
3892 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3893 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData)
3894 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3895 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
3896 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3897 }
3898
3899#elif defined(RT_OS_DARWIN)
3900 /* Stat the drive get its device number. */
3901 struct stat StDrive;
3902 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
3903 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3904 N_("VMDK: Image path: '%s'. fstat failed on '%s' (errno=%d)"), pImage->pszFilename, pszRawDrive, errno);
3905 else
3906 {
3907 if (ioctl(RTFileToNative(hRawPart), DKIOCLOCKPHYSICALEXTENTS, NULL) == -1)
3908 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3909 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to lock the partition (errno=%d)"),
3910 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3911 else
3912 {
3913 uint32_t cbBlockSize = 0;
3914 uint64_t cbOffset = 0;
3915 uint64_t cbSize = 0;
3916 if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKSIZE, (caddr_t)&cbBlockSize) == -1)
3917 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3918 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the sector size of the partition (errno=%d)"),
3919 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3920 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBASE, (caddr_t)&cbOffset) == -1)
3921 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3922 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the start offset of the partition (errno=%d)"),
3923 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3924 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKCOUNT, (caddr_t)&cbSize) == -1)
3925 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3926 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the size of the partition (errno=%d)"),
3927 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3928 else
3929 {
3930 cbSize *= (uint64_t)cbBlockSize;
3931 dk_physical_extent_t dkPartExtent = {0};
3932 dkPartExtent.offset = 0;
3933 dkPartExtent.length = cbSize;
3934 if (ioctl(RTFileToNative(hRawPart), DKIOCGETPHYSICALEXTENT, (caddr_t)&dkPartExtent) == -1)
3935 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3936 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain partition info (errno=%d)"),
3937 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3938 else
3939 {
3940 if (dkPartExtent.dev != StDrive.st_rdev)
3941 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3942 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Drive does not contain the partition"),
3943 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive);
3944 else if (cbOffset != pPartDesc->offStartInVDisk)
3945 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3946 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
3947 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
3948 else if (cbSize != pPartDesc->cbData)
3949 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3950 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
3951 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
3952 }
3953 }
3954
3955 if (ioctl(RTFileToNative(hRawPart), DKIOCUNLOCKPHYSICALEXTENTS, NULL) == -1)
3956 {
3957 int rc2 = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
3958 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to unlock the partition (errno=%d)"),
3959 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
3960 if (RT_SUCCESS(rc))
3961 rc = rc2;
3962 }
3963 }
3964 }
3965
3966#else
3967 RT_NOREF(hVol); /* PORTME */
3968#endif
3969 if (RT_SUCCESS(rc))
3970 {
3971 /*
3972 * Compare the first 32 sectors of the partition.
3973 *
3974 * This might not be conclusive, but for partitions formatted with the more
3975 * common file systems it should be as they have a superblock copy at or near
3976 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
3977 */
3978 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
3979 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
3980 if (pbSector1 != NULL)
3981 {
3982 uint8_t *pbSector2 = pbSector1 + cbToCompare;
3983 /* Do the comparing, we repeat if it fails and the data might be volatile. */
3984 uint64_t uPrevCrc1 = 0;
3985 uint64_t uPrevCrc2 = 0;
3986 uint32_t cStable = 0;
3987 for (unsigned iTry = 0; iTry < 256; iTry++)
3988 {
3989 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
3990 if (RT_SUCCESS(rc))
3991 {
3992 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
3993 if (RT_SUCCESS(rc))
3994 {
3995 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
3996 {
3997 rc = VERR_MISMATCH;
3998 /* Do data stability checks before repeating: */
3999 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
4000 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
4001 if ( uPrevCrc1 != uCrc1
4002 || uPrevCrc2 != uCrc2)
4003 cStable = 0;
4004 else if (++cStable > 4)
4005 break;
4006 uPrevCrc1 = uCrc1;
4007 uPrevCrc2 = uCrc2;
4008 continue;
4009 }
4010 rc = VINF_SUCCESS;
4011 }
4012 else
4013 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4014 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4015 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
4016 }
4017 else
4018 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4019 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4020 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
4021 break;
4022 }
4023 if (rc == VERR_MISMATCH)
4024 {
4025 /* Find the first mismatching bytes: */
4026 size_t offMissmatch = 0;
4027 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
4028 offMissmatch++;
4029 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
4030 if (cStable > 0)
4031 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4032 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
4033 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
4034 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
4035 else
4036 {
4037 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
4038 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4039 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
4040 rc = -rc;
4041 }
4042 }
4043 RTMemTmpFree(pbSector1);
4044 }
4045 else
4046 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
4047 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
4048 pImage->pszFilename, cbToCompare * 2);
4049 }
4050 RTFileClose(hRawPart);
4051 return rc;
4052}
4053#ifdef RT_OS_WINDOWS
4054/**
4055 * Construct the device name for the given partition number.
4056 */
4057static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
4058 char **ppszRawPartition)
4059{
4060 int rc = VINF_SUCCESS;
4061 DWORD cbReturned = 0;
4062 STORAGE_DEVICE_NUMBER DevNum;
4063 RT_ZERO(DevNum);
4064 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4065 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
4066 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
4067 else
4068 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4069 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4070 pImage->pszFilename, pszRawDrive, GetLastError());
4071 return rc;
4072}
4073#endif /* RT_OS_WINDOWS */
4074/**
4075 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
4076 * 'Partitions' configuration value is present.
4077 *
4078 * @returns VBox status code, error message has been set on failure.
4079 *
4080 * @note Caller is assumed to clean up @a pRawDesc and release
4081 * @a *phVolToRelease.
4082 * @internal
4083 */
4084static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4085 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
4086 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
4087 PRTDVMVOLUME phVolToRelease)
4088{
4089 *phVolToRelease = NIL_RTDVMVOLUME;
4090 /* Check sanity/understanding. */
4091 Assert(fPartitions);
4092 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
4093 /*
4094 * Allocate on descriptor for each volume up front.
4095 */
4096 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
4097 PVDISKRAWPARTDESC paPartDescs = NULL;
4098 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
4099 AssertRCReturn(rc, rc);
4100 /*
4101 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
4102 */
4103 uint32_t fPartitionsLeft = fPartitions;
4104 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
4105 for (uint32_t i = 0; i < cVolumes; i++)
4106 {
4107 /*
4108 * Get the next/first volume and release the current.
4109 */
4110 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
4111 if (i == 0)
4112 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
4113 else
4114 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
4115 if (RT_FAILURE(rc))
4116 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4117 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
4118 pImage->pszFilename, i, pszRawDrive, rc);
4119 uint32_t cRefs = RTDvmVolumeRelease(hVol);
4120 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
4121 *phVolToRelease = hVol = hVolNext;
4122 /*
4123 * Depending on the fPartitions selector and associated read-only mask,
4124 * the guest either gets read-write or read-only access (bits set)
4125 * or no access (selector bit clear, access directed to the VMDK).
4126 */
4127 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
4128 uint64_t offVolumeEndIgnored = 0;
4129 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
4130 if (RT_FAILURE(rc))
4131 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4132 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
4133 pImage->pszFilename, i, pszRawDrive, rc);
4134 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
4135 /* Note! The index must match IHostDrivePartition::number. */
4136 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
4137 if ( idxPartition < 32
4138 && (fPartitions & RT_BIT_32(idxPartition)))
4139 {
4140 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
4141 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
4142 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
4143 if (!fRelative)
4144 {
4145 /*
4146 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
4147 */
4148 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
4149 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
4150 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4151 }
4152 else
4153 {
4154 /*
4155 * Relative means access the partition data via the device node for that
4156 * partition, allowing the sysadmin/OS to allow a user access to individual
4157 * partitions without necessarily being able to compromise the host OS.
4158 * Obviously, the creation of the VMDK requires read access to the main
4159 * device node for the drive, but that's a one-time thing and can be done
4160 * by the sysadmin. Here data starts at offset zero in the device node.
4161 */
4162 paPartDescs[i].offStartInDevice = 0;
4163#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
4164 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
4165 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
4166#elif defined(RT_OS_LINUX)
4167 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
4168 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
4169 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
4170#elif defined(RT_OS_WINDOWS)
4171 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
4172 AssertRCReturn(rc, rc);
4173#elif defined(RT_OS_SOLARIS)
4174 if (pRawDesc->enmPartitioningType == VDISKPARTTYPE_MBR)
4175 {
4176 /*
4177 * MBR partitions have device nodes in form /dev/(r)dsk/cXtYdZpK
4178 * where X is the controller,
4179 * Y is target (SCSI device number),
4180 * Z is disk number,
4181 * K is partition number,
4182 * where p0 is the whole disk
4183 * p1-pN are the partitions of the disk
4184 */
4185 const char *pszRawDrivePath = pszRawDrive;
4186 char szDrivePath[RTPATH_MAX];
4187 size_t cbRawDrive = strlen(pszRawDrive);
4188 if ( cbRawDrive > 1 && strcmp(&pszRawDrive[cbRawDrive - 2], "p0") == 0)
4189 {
4190 memcpy(szDrivePath, pszRawDrive, cbRawDrive - 2);
4191 szDrivePath[cbRawDrive - 2] = '\0';
4192 pszRawDrivePath = szDrivePath;
4193 }
4194 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%sp%u", pszRawDrivePath, idxPartition);
4195 }
4196 else /* GPT */
4197 {
4198 /*
4199 * GPT partitions have device nodes in form /dev/(r)dsk/cXtYdZsK
4200 * where X is the controller,
4201 * Y is target (SCSI device number),
4202 * Z is disk number,
4203 * K is partition number, zero based. Can be only from 0 to 6.
4204 * Thus, only partitions numbered 0 through 6 have device nodes.
4205 */
4206 if (idxPartition > 7)
4207 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4208 N_("VMDK: Image path: '%s'. the partition #%u on '%s' has no device node and can not be specified with 'Relative' property"),
4209 pImage->pszFilename, idxPartition, pszRawDrive);
4210 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition - 1);
4211 }
4212#else
4213 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
4214#endif
4215 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4216 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
4217 AssertRCReturn(rc, rc);
4218 }
4219 }
4220 else
4221 {
4222 /* Not accessible to the guest. */
4223 paPartDescs[i].offStartInDevice = 0;
4224 paPartDescs[i].pszRawDevice = NULL;
4225 }
4226 } /* for each volume */
4227 RTDvmVolumeRelease(hVol);
4228 *phVolToRelease = NIL_RTDVMVOLUME;
4229 /*
4230 * Check that we found all the partitions the user selected.
4231 */
4232 if (fPartitionsLeft)
4233 {
4234 char szLeft[3 * sizeof(fPartitions) * 8];
4235 size_t cchLeft = 0;
4236 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
4237 if (fPartitionsLeft & RT_BIT_32(i))
4238 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
4239 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4240 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
4241 pImage->pszFilename, pszRawDrive, szLeft);
4242 }
4243 return VINF_SUCCESS;
4244}
4245/**
4246 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
4247 * of the partition tables and associated padding areas when the 'Partitions'
4248 * configuration value is present.
4249 *
4250 * The guest is not allowed access to the partition tables, however it needs
4251 * them to be able to access the drive. So, create descriptors for each of the
4252 * tables and attach the current disk content. vmdkCreateRawImage() will later
4253 * write the content to the VMDK. Any changes the guest later makes to the
4254 * partition tables will then go to the VMDK copy, rather than the host drive.
4255 *
4256 * @returns VBox status code, error message has been set on failure.
4257 *
4258 * @note Caller is assumed to clean up @a pRawDesc
4259 * @internal
4260 */
4261static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4262 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
4263{
4264 /*
4265 * Query the locations.
4266 */
4267 /* Determin how many locations there are: */
4268 size_t cLocations = 0;
4269 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
4270 if (rc != VERR_BUFFER_OVERFLOW)
4271 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4272 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4273 pImage->pszFilename, pszRawDrive, rc);
4274 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
4275 /* We can allocate the partition descriptors here to save an intentation level. */
4276 PVDISKRAWPARTDESC paPartDescs = NULL;
4277 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
4278 AssertRCReturn(rc, rc);
4279 /* Allocate the result table and repeat the location table query: */
4280 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
4281 if (!paLocations)
4282 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
4283 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
4284 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
4285 if (RT_SUCCESS(rc))
4286 {
4287 /*
4288 * Translate them into descriptors.
4289 *
4290 * We restrict the amount of partition alignment padding to 4MiB as more
4291 * will just be a waste of space. The use case for including the padding
4292 * are older boot loaders and boot manager (including one by a team member)
4293 * that put data and code in the 62 sectors between the MBR and the first
4294 * partition (total of 63). Later CHS was abandond and partition started
4295 * being aligned on power of two sector boundraries (typically 64KiB or
4296 * 1MiB depending on the media size).
4297 */
4298 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
4299 {
4300 Assert(paLocations[i].cb > 0);
4301 if (paLocations[i].cb <= _64M)
4302 {
4303 /* Create the partition descriptor entry: */
4304 //paPartDescs[i].pszRawDevice = NULL;
4305 //paPartDescs[i].offStartInDevice = 0;
4306 //paPartDescs[i].uFlags = 0;
4307 paPartDescs[i].offStartInVDisk = paLocations[i].off;
4308 paPartDescs[i].cbData = paLocations[i].cb;
4309 if (paPartDescs[i].cbData < _4M)
4310 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4311 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4312 if (paPartDescs[i].pvPartitionData)
4313 {
4314 /* Read the content from the drive: */
4315 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4316 (size_t)paPartDescs[i].cbData, NULL);
4317 if (RT_SUCCESS(rc))
4318 {
4319 /* Do we have custom boot sector code? */
4320 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4321 {
4322 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4323 Instead we fail as we weren't able to do what the user requested us to do.
4324 Better if the user knows than starts questioning why the guest isn't
4325 booting as expected. */
4326 if (cbBootSector <= paPartDescs[i].cbData)
4327 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4328 else
4329 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4330 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4331 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4332 }
4333 }
4334 else
4335 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4336 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4337 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4338 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4339 }
4340 else
4341 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4342 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4343 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4344 }
4345 else
4346 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4347 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4348 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4349 }
4350 }
4351 else
4352 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4353 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4354 pImage->pszFilename, pszRawDrive, rc);
4355 RTMemFree(paLocations);
4356 return rc;
4357}
4358/**
4359 * Opens the volume manager for the raw drive when in selected-partition mode.
4360 *
4361 * @param pImage The VMDK image (for errors).
4362 * @param hRawDrive The raw drive handle.
4363 * @param pszRawDrive The raw drive device path (for errors).
4364 * @param cbSector The sector size.
4365 * @param phVolMgr Where to return the handle to the volume manager on
4366 * success.
4367 * @returns VBox status code, errors have been reported.
4368 * @internal
4369 */
4370static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4371{
4372 *phVolMgr = NIL_RTDVM;
4373 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4374 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4375 if (RT_FAILURE(rc))
4376 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4377 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4378 pImage->pszFilename, pszRawDrive, rc);
4379 RTDVM hVolMgr = NIL_RTDVM;
4380 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4381 RTVfsFileRelease(hVfsFile);
4382 if (RT_FAILURE(rc))
4383 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4384 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4385 pImage->pszFilename, pszRawDrive, rc);
4386 rc = RTDvmMapOpen(hVolMgr);
4387 if (RT_SUCCESS(rc))
4388 {
4389 *phVolMgr = hVolMgr;
4390 return VINF_SUCCESS;
4391 }
4392 RTDvmRelease(hVolMgr);
4393 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4394 pImage->pszFilename, pszRawDrive, rc);
4395}
4396/**
4397 * Opens the raw drive device and get the sizes for it.
4398 *
4399 * @param pImage The image (for error reporting).
4400 * @param pszRawDrive The device/whatever to open.
4401 * @param phRawDrive Where to return the file handle.
4402 * @param pcbRawDrive Where to return the size.
4403 * @param pcbSector Where to return the sector size.
4404 * @returns IPRT status code, errors have been reported.
4405 * @internal
4406 */
4407static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4408 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4409{
4410 /*
4411 * Open the device for the raw drive.
4412 */
4413 RTFILE hRawDrive = NIL_RTFILE;
4414 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4415 if (RT_FAILURE(rc))
4416 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4417 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4418 pImage->pszFilename, pszRawDrive, rc);
4419 /*
4420 * Get the sector size.
4421 */
4422 uint32_t cbSector = 0;
4423 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4424 if (RT_SUCCESS(rc))
4425 {
4426 /* sanity checks */
4427 if ( cbSector >= 512
4428 && cbSector <= _64K
4429 && RT_IS_POWER_OF_TWO(cbSector))
4430 {
4431 /*
4432 * Get the size.
4433 */
4434 uint64_t cbRawDrive = 0;
4435 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4436 if (RT_SUCCESS(rc))
4437 {
4438 /* Check whether cbSize is actually sensible. */
4439 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4440 {
4441 *phRawDrive = hRawDrive;
4442 *pcbRawDrive = cbRawDrive;
4443 *pcbSector = cbSector;
4444 return VINF_SUCCESS;
4445 }
4446 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4447 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4448 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4449 }
4450 else
4451 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4452 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4453 pImage->pszFilename, pszRawDrive, rc);
4454 }
4455 else
4456 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4457 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4458 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4459 }
4460 else
4461 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4462 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4463 pImage->pszFilename, pszRawDrive, rc);
4464 RTFileClose(hRawDrive);
4465 return rc;
4466}
4467/**
4468 * Reads the raw disk configuration, leaving initalization and cleanup to the
4469 * caller (regardless of return status).
4470 *
4471 * @returns VBox status code, errors properly reported.
4472 * @internal
4473 */
4474static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4475 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4476 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4477 char **ppszFreeMe)
4478{
4479 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4480 if (!pImgCfg)
4481 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4482 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4483 /*
4484 * RawDrive = path
4485 */
4486 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4487 if (RT_FAILURE(rc))
4488 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4489 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4490 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4491 /*
4492 * Partitions=n[r][,...]
4493 */
4494 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4495 *pfPartitions = *pfPartitionsReadOnly = 0;
4496 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4497 if (RT_SUCCESS(rc))
4498 {
4499 char *psz = *ppszFreeMe;
4500 while (*psz != '\0')
4501 {
4502 char *pszNext;
4503 uint32_t u32;
4504 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4505 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4506 rc = -rc;
4507 if (RT_FAILURE(rc))
4508 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4509 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4510 pImage->pszFilename, rc, psz);
4511 if (u32 >= cMaxPartitionBits)
4512 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4513 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4514 pImage->pszFilename, u32, cMaxPartitionBits);
4515 *pfPartitions |= RT_BIT_32(u32);
4516 psz = pszNext;
4517 if (*psz == 'r')
4518 {
4519 *pfPartitionsReadOnly |= RT_BIT_32(u32);
4520 psz++;
4521 }
4522 if (*psz == ',')
4523 psz++;
4524 else if (*psz != '\0')
4525 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4526 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
4527 pImage->pszFilename, psz);
4528 }
4529 RTStrFree(*ppszFreeMe);
4530 *ppszFreeMe = NULL;
4531 }
4532 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4533 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4534 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4535 /*
4536 * BootSector=base64
4537 */
4538 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
4539 if (RT_SUCCESS(rc))
4540 {
4541 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
4542 if (cbBootSector < 0)
4543 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
4544 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
4545 pImage->pszFilename, *ppszRawDrive);
4546 if (cbBootSector == 0)
4547 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4548 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
4549 pImage->pszFilename, *ppszRawDrive);
4550 if (cbBootSector > _4M) /* this is just a preliminary max */
4551 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4552 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
4553 pImage->pszFilename, *ppszRawDrive, cbBootSector);
4554 /* Refuse the boot sector if whole-drive. This used to be done quietly,
4555 however, bird disagrees and thinks the user should be told that what
4556 he/she/it tries to do isn't possible. There should be less head
4557 scratching this way when the guest doesn't do the expected thing. */
4558 if (!*pfPartitions)
4559 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4560 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
4561 pImage->pszFilename, *ppszRawDrive);
4562 *pcbBootSector = (size_t)cbBootSector;
4563 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
4564 if (!*ppvBootSector)
4565 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4566 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
4567 pImage->pszFilename, cbBootSector, *ppszRawDrive);
4568 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
4569 if (RT_FAILURE(rc))
4570 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4571 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
4572 pImage->pszFilename, *ppszRawDrive, rc);
4573 RTStrFree(*ppszFreeMe);
4574 *ppszFreeMe = NULL;
4575 }
4576 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4577 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4578 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4579 /*
4580 * Relative=0/1
4581 */
4582 *pfRelative = false;
4583 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
4584 if (RT_SUCCESS(rc))
4585 {
4586 if (!*pfPartitions && *pfRelative != false)
4587 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4588 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
4589 pImage->pszFilename);
4590#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) && !defined(RT_OS_SOLARIS) /* PORTME */
4591 if (*pfRelative == true)
4592 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4593 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
4594 pImage->pszFilename);
4595#endif
4596 }
4597 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4598 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4599 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4600 else
4601#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
4602 *pfRelative = true;
4603#else
4604 *pfRelative = false;
4605#endif
4606 return VINF_SUCCESS;
4607}
4608/**
4609 * Creates a raw drive (nee disk) descriptor.
4610 *
4611 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
4612 * here much later. That's one of the reasons why we produce a descriptor just
4613 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
4614 *
4615 * @returns VBox status code.
4616 * @param pImage The image.
4617 * @param ppRaw Where to return the raw drive descriptor. Caller must
4618 * free it using vmdkRawDescFree regardless of the status
4619 * code.
4620 * @internal
4621 */
4622static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
4623{
4624 /* Make sure it's NULL. */
4625 *ppRaw = NULL;
4626 /*
4627 * Read the configuration.
4628 */
4629 char *pszRawDrive = NULL;
4630 uint32_t fPartitions = 0; /* zero if whole-drive */
4631 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
4632 void *pvBootSector = NULL;
4633 size_t cbBootSector = 0;
4634 bool fRelative = false;
4635 char *pszFreeMe = NULL; /* lazy bird cleanup. */
4636 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
4637 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
4638 RTStrFree(pszFreeMe);
4639 if (RT_SUCCESS(rc))
4640 {
4641 /*
4642 * Open the device, getting the sector size and drive size.
4643 */
4644 uint64_t cbSize = 0;
4645 uint32_t cbSector = 0;
4646 RTFILE hRawDrive = NIL_RTFILE;
4647 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
4648 if (RT_SUCCESS(rc))
4649 {
4650 /*
4651 * Create the raw-drive descriptor
4652 */
4653 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
4654 if (pRawDesc)
4655 {
4656 pRawDesc->szSignature[0] = 'R';
4657 pRawDesc->szSignature[1] = 'A';
4658 pRawDesc->szSignature[2] = 'W';
4659 //pRawDesc->szSignature[3] = '\0';
4660 if (!fPartitions)
4661 {
4662 /*
4663 * It's simple for when doing the whole drive.
4664 */
4665 pRawDesc->uFlags = VDISKRAW_DISK;
4666 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
4667 }
4668 else
4669 {
4670 /*
4671 * In selected partitions mode we've got a lot more work ahead of us.
4672 */
4673 pRawDesc->uFlags = VDISKRAW_NORMAL;
4674 //pRawDesc->pszRawDisk = NULL;
4675 //pRawDesc->cPartDescs = 0;
4676 //pRawDesc->pPartDescs = NULL;
4677 /* We need to parse the partition map to complete the descriptor: */
4678 RTDVM hVolMgr = NIL_RTDVM;
4679 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
4680 if (RT_SUCCESS(rc))
4681 {
4682 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
4683 if ( enmFormatType == RTDVMFORMATTYPE_MBR
4684 || enmFormatType == RTDVMFORMATTYPE_GPT)
4685 {
4686 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
4687 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
4688 /* Add copies of the partition tables: */
4689 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
4690 pvBootSector, cbBootSector);
4691 if (RT_SUCCESS(rc))
4692 {
4693 /* Add descriptors for the partitions/volumes, indicating which
4694 should be accessible and how to access them: */
4695 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
4696 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
4697 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
4698 RTDvmVolumeRelease(hVolRelease);
4699 /* Finally, sort the partition and check consistency (overlaps, etc): */
4700 if (RT_SUCCESS(rc))
4701 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
4702 }
4703 }
4704 else
4705 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4706 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
4707 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
4708 RTDvmRelease(hVolMgr);
4709 }
4710 }
4711 if (RT_SUCCESS(rc))
4712 {
4713 /*
4714 * We succeeded.
4715 */
4716 *ppRaw = pRawDesc;
4717 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
4718 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
4719 if (pRawDesc->cPartDescs)
4720 {
4721 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
4722 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
4723 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
4724 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
4725 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
4726 }
4727 }
4728 else
4729 vmdkRawDescFree(pRawDesc);
4730 }
4731 else
4732 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
4733 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
4734 pImage->pszFilename, sizeof(*pRawDesc));
4735 RTFileClose(hRawDrive);
4736 }
4737 }
4738 RTStrFree(pszRawDrive);
4739 RTMemFree(pvBootSector);
4740 return rc;
4741}
4742/**
4743 * Internal: create VMDK images for raw disk/partition access.
4744 */
4745static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
4746 uint64_t cbSize)
4747{
4748 int rc = VINF_SUCCESS;
4749 PVMDKEXTENT pExtent;
4750 if (pRaw->uFlags & VDISKRAW_DISK)
4751 {
4752 /* Full raw disk access. This requires setting up a descriptor
4753 * file and open the (flat) raw disk. */
4754 rc = vmdkCreateExtents(pImage, 1);
4755 if (RT_FAILURE(rc))
4756 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4757 pExtent = &pImage->pExtents[0];
4758 /* Create raw disk descriptor file. */
4759 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4760 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4761 true /* fCreate */));
4762 if (RT_FAILURE(rc))
4763 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4764 /* Set up basename for extent description. Cannot use StrDup. */
4765 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
4766 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4767 if (!pszBasename)
4768 return VERR_NO_MEMORY;
4769 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
4770 pExtent->pszBasename = pszBasename;
4771 /* For raw disks the full name is identical to the base name. */
4772 pExtent->pszFullname = RTStrDup(pszBasename);
4773 if (!pExtent->pszFullname)
4774 return VERR_NO_MEMORY;
4775 pExtent->enmType = VMDKETYPE_FLAT;
4776 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
4777 pExtent->uSectorOffset = 0;
4778 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4779 pExtent->fMetaDirty = false;
4780 /* Open flat image, the raw disk. */
4781 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4782 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4783 false /* fCreate */));
4784 if (RT_FAILURE(rc))
4785 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
4786 }
4787 else
4788 {
4789 /* Raw partition access. This requires setting up a descriptor
4790 * file, write the partition information to a flat extent and
4791 * open all the (flat) raw disk partitions. */
4792 /* First pass over the partition data areas to determine how many
4793 * extents we need. One data area can require up to 2 extents, as
4794 * it might be necessary to skip over unpartitioned space. */
4795 unsigned cExtents = 0;
4796 uint64_t uStart = 0;
4797 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4798 {
4799 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4800 if (uStart > pPart->offStartInVDisk)
4801 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4802 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
4803 if (uStart < pPart->offStartInVDisk)
4804 cExtents++;
4805 uStart = pPart->offStartInVDisk + pPart->cbData;
4806 cExtents++;
4807 }
4808 /* Another extent for filling up the rest of the image. */
4809 if (uStart != cbSize)
4810 cExtents++;
4811 rc = vmdkCreateExtents(pImage, cExtents);
4812 if (RT_FAILURE(rc))
4813 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4814 /* Create raw partition descriptor file. */
4815 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4816 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4817 true /* fCreate */));
4818 if (RT_FAILURE(rc))
4819 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
4820 /* Create base filename for the partition table extent. */
4821 /** @todo remove fixed buffer without creating memory leaks. */
4822 char pszPartition[1024];
4823 const char *pszBase = RTPathFilename(pImage->pszFilename);
4824 const char *pszSuff = RTPathSuffix(pszBase);
4825 if (pszSuff == NULL)
4826 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
4827 char *pszBaseBase = RTStrDup(pszBase);
4828 if (!pszBaseBase)
4829 return VERR_NO_MEMORY;
4830 RTPathStripSuffix(pszBaseBase);
4831 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
4832 pszBaseBase, pszSuff);
4833 RTStrFree(pszBaseBase);
4834 /* Second pass over the partitions, now define all extents. */
4835 uint64_t uPartOffset = 0;
4836 cExtents = 0;
4837 uStart = 0;
4838 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
4839 {
4840 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
4841 pExtent = &pImage->pExtents[cExtents++];
4842 if (uStart < pPart->offStartInVDisk)
4843 {
4844 pExtent->pszBasename = NULL;
4845 pExtent->pszFullname = NULL;
4846 pExtent->enmType = VMDKETYPE_ZERO;
4847 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
4848 pExtent->uSectorOffset = 0;
4849 pExtent->enmAccess = VMDKACCESS_READWRITE;
4850 pExtent->fMetaDirty = false;
4851 /* go to next extent */
4852 pExtent = &pImage->pExtents[cExtents++];
4853 }
4854 uStart = pPart->offStartInVDisk + pPart->cbData;
4855 if (pPart->pvPartitionData)
4856 {
4857 /* Set up basename for extent description. Can't use StrDup. */
4858 size_t cbBasename = strlen(pszPartition) + 1;
4859 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4860 if (!pszBasename)
4861 return VERR_NO_MEMORY;
4862 memcpy(pszBasename, pszPartition, cbBasename);
4863 pExtent->pszBasename = pszBasename;
4864 /* Set up full name for partition extent. */
4865 char *pszDirname = RTStrDup(pImage->pszFilename);
4866 if (!pszDirname)
4867 return VERR_NO_STR_MEMORY;
4868 RTPathStripFilename(pszDirname);
4869 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
4870 RTStrFree(pszDirname);
4871 if (!pszFullname)
4872 return VERR_NO_STR_MEMORY;
4873 pExtent->pszFullname = pszFullname;
4874 pExtent->enmType = VMDKETYPE_FLAT;
4875 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4876 pExtent->uSectorOffset = uPartOffset;
4877 pExtent->enmAccess = VMDKACCESS_READWRITE;
4878 pExtent->fMetaDirty = false;
4879 /* Create partition table flat image. */
4880 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4881 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4882 true /* fCreate */));
4883 if (RT_FAILURE(rc))
4884 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
4885 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4886 VMDK_SECTOR2BYTE(uPartOffset),
4887 pPart->pvPartitionData,
4888 pPart->cbData);
4889 if (RT_FAILURE(rc))
4890 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
4891 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
4892 }
4893 else
4894 {
4895 if (pPart->pszRawDevice)
4896 {
4897 /* Set up basename for extent descr. Can't use StrDup. */
4898 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
4899 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
4900 if (!pszBasename)
4901 return VERR_NO_MEMORY;
4902 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
4903 pExtent->pszBasename = pszBasename;
4904 /* For raw disks full name is identical to base name. */
4905 pExtent->pszFullname = RTStrDup(pszBasename);
4906 if (!pExtent->pszFullname)
4907 return VERR_NO_MEMORY;
4908 pExtent->enmType = VMDKETYPE_FLAT;
4909 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4910 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
4911 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
4912 pExtent->fMetaDirty = false;
4913 /* Open flat image, the raw partition. */
4914 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
4915 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
4916 false /* fCreate */));
4917 if (RT_FAILURE(rc))
4918 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
4919 }
4920 else
4921 {
4922 pExtent->pszBasename = NULL;
4923 pExtent->pszFullname = NULL;
4924 pExtent->enmType = VMDKETYPE_ZERO;
4925 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
4926 pExtent->uSectorOffset = 0;
4927 pExtent->enmAccess = VMDKACCESS_READWRITE;
4928 pExtent->fMetaDirty = false;
4929 }
4930 }
4931 }
4932 /* Another extent for filling up the rest of the image. */
4933 if (uStart != cbSize)
4934 {
4935 pExtent = &pImage->pExtents[cExtents++];
4936 pExtent->pszBasename = NULL;
4937 pExtent->pszFullname = NULL;
4938 pExtent->enmType = VMDKETYPE_ZERO;
4939 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
4940 pExtent->uSectorOffset = 0;
4941 pExtent->enmAccess = VMDKACCESS_READWRITE;
4942 pExtent->fMetaDirty = false;
4943 }
4944 }
4945 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4946 (pRaw->uFlags & VDISKRAW_DISK) ?
4947 "fullDevice" : "partitionedDevice");
4948 if (RT_FAILURE(rc))
4949 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4950 return rc;
4951}
4952/**
4953 * Internal: create a regular (i.e. file-backed) VMDK image.
4954 */
4955static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
4956 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
4957 unsigned uPercentStart, unsigned uPercentSpan)
4958{
4959 int rc = VINF_SUCCESS;
4960 unsigned cExtents = 1;
4961 uint64_t cbOffset = 0;
4962 uint64_t cbRemaining = cbSize;
4963 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4964 {
4965 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
4966 /* Do proper extent computation: need one smaller extent if the total
4967 * size isn't evenly divisible by the split size. */
4968 if (cbSize % VMDK_2G_SPLIT_SIZE)
4969 cExtents++;
4970 }
4971 rc = vmdkCreateExtents(pImage, cExtents);
4972 if (RT_FAILURE(rc))
4973 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4974 /* Basename strings needed for constructing the extent names. */
4975 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
4976 AssertPtr(pszBasenameSubstr);
4977 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
4978 /* Create separate descriptor file if necessary. */
4979 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
4980 {
4981 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
4982 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4983 true /* fCreate */));
4984 if (RT_FAILURE(rc))
4985 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
4986 }
4987 else
4988 pImage->pFile = NULL;
4989 /* Set up all extents. */
4990 for (unsigned i = 0; i < cExtents; i++)
4991 {
4992 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4993 uint64_t cbExtent = cbRemaining;
4994 /* Set up fullname/basename for extent description. Cannot use StrDup
4995 * for basename, as it is not guaranteed that the memory can be freed
4996 * with RTMemTmpFree, which must be used as in other code paths
4997 * StrDup is not usable. */
4998 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4999 {
5000 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5001 if (!pszBasename)
5002 return VERR_NO_MEMORY;
5003 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5004 pExtent->pszBasename = pszBasename;
5005 }
5006 else
5007 {
5008 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
5009 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
5010 RTPathStripSuffix(pszBasenameBase);
5011 char *pszTmp;
5012 size_t cbTmp;
5013 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5014 {
5015 if (cExtents == 1)
5016 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
5017 pszBasenameSuff);
5018 else
5019 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
5020 i+1, pszBasenameSuff);
5021 }
5022 else
5023 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
5024 pszBasenameSuff);
5025 RTStrFree(pszBasenameBase);
5026 if (!pszTmp)
5027 return VERR_NO_STR_MEMORY;
5028 cbTmp = strlen(pszTmp) + 1;
5029 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
5030 if (!pszBasename)
5031 {
5032 RTStrFree(pszTmp);
5033 return VERR_NO_MEMORY;
5034 }
5035 memcpy(pszBasename, pszTmp, cbTmp);
5036 RTStrFree(pszTmp);
5037 pExtent->pszBasename = pszBasename;
5038 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5039 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
5040 }
5041 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5042 if (!pszBasedirectory)
5043 return VERR_NO_STR_MEMORY;
5044 RTPathStripFilename(pszBasedirectory);
5045 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5046 RTStrFree(pszBasedirectory);
5047 if (!pszFullname)
5048 return VERR_NO_STR_MEMORY;
5049 pExtent->pszFullname = pszFullname;
5050 /* Create file for extent. */
5051 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5052 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5053 true /* fCreate */));
5054 if (RT_FAILURE(rc))
5055 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5056 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5057 {
5058 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
5059 0 /* fFlags */, pIfProgress,
5060 uPercentStart + cbOffset * uPercentSpan / cbSize,
5061 cbExtent * uPercentSpan / cbSize);
5062 if (RT_FAILURE(rc))
5063 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
5064 }
5065 /* Place descriptor file information (where integrated). */
5066 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5067 {
5068 pExtent->uDescriptorSector = 1;
5069 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5070 /* The descriptor is part of the (only) extent. */
5071 pExtent->pDescData = pImage->pDescData;
5072 pImage->pDescData = NULL;
5073 }
5074 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5075 {
5076 uint64_t cSectorsPerGDE, cSectorsPerGD;
5077 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5078 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
5079 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5080 pExtent->cGTEntries = 512;
5081 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5082 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5083 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5084 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5085 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5086 {
5087 /* The spec says version is 1 for all VMDKs, but the vast
5088 * majority of streamOptimized VMDKs actually contain
5089 * version 3 - so go with the majority. Both are accepted. */
5090 pExtent->uVersion = 3;
5091 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5092 }
5093 }
5094 else
5095 {
5096 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5097 pExtent->enmType = VMDKETYPE_VMFS;
5098 else
5099 pExtent->enmType = VMDKETYPE_FLAT;
5100 }
5101 pExtent->enmAccess = VMDKACCESS_READWRITE;
5102 pExtent->fUncleanShutdown = true;
5103 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
5104 pExtent->uSectorOffset = 0;
5105 pExtent->fMetaDirty = true;
5106 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5107 {
5108 /* fPreAlloc should never be false because VMware can't use such images. */
5109 rc = vmdkCreateGrainDirectory(pImage, pExtent,
5110 RT_MAX( pExtent->uDescriptorSector
5111 + pExtent->cDescriptorSectors,
5112 1),
5113 true /* fPreAlloc */);
5114 if (RT_FAILURE(rc))
5115 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5116 }
5117 cbOffset += cbExtent;
5118 if (RT_SUCCESS(rc))
5119 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
5120 cbRemaining -= cbExtent;
5121 }
5122 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5123 {
5124 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
5125 * controller type is set in an image. */
5126 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
5127 if (RT_FAILURE(rc))
5128 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
5129 }
5130 const char *pszDescType = NULL;
5131 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5132 {
5133 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5134 pszDescType = "vmfs";
5135 else
5136 pszDescType = (cExtents == 1)
5137 ? "monolithicFlat" : "twoGbMaxExtentFlat";
5138 }
5139 else
5140 {
5141 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5142 pszDescType = "streamOptimized";
5143 else
5144 {
5145 pszDescType = (cExtents == 1)
5146 ? "monolithicSparse" : "twoGbMaxExtentSparse";
5147 }
5148 }
5149 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5150 pszDescType);
5151 if (RT_FAILURE(rc))
5152 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5153 return rc;
5154}
5155/**
5156 * Internal: Create a real stream optimized VMDK using only linear writes.
5157 */
5158static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
5159{
5160 int rc = vmdkCreateExtents(pImage, 1);
5161 if (RT_FAILURE(rc))
5162 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5163 /* Basename strings needed for constructing the extent names. */
5164 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5165 AssertPtr(pszBasenameSubstr);
5166 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5167 /* No separate descriptor file. */
5168 pImage->pFile = NULL;
5169 /* Set up all extents. */
5170 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5171 /* Set up fullname/basename for extent description. Cannot use StrDup
5172 * for basename, as it is not guaranteed that the memory can be freed
5173 * with RTMemTmpFree, which must be used as in other code paths
5174 * StrDup is not usable. */
5175 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5176 if (!pszBasename)
5177 return VERR_NO_MEMORY;
5178 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5179 pExtent->pszBasename = pszBasename;
5180 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5181 RTPathStripFilename(pszBasedirectory);
5182 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5183 RTStrFree(pszBasedirectory);
5184 if (!pszFullname)
5185 return VERR_NO_STR_MEMORY;
5186 pExtent->pszFullname = pszFullname;
5187 /* Create file for extent. Make it write only, no reading allowed. */
5188 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5189 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5190 true /* fCreate */)
5191 & ~RTFILE_O_READ);
5192 if (RT_FAILURE(rc))
5193 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5194 /* Place descriptor file information. */
5195 pExtent->uDescriptorSector = 1;
5196 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5197 /* The descriptor is part of the (only) extent. */
5198 pExtent->pDescData = pImage->pDescData;
5199 pImage->pDescData = NULL;
5200 uint64_t cSectorsPerGDE, cSectorsPerGD;
5201 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5202 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
5203 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5204 pExtent->cGTEntries = 512;
5205 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5206 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5207 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5208 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5209 /* The spec says version is 1 for all VMDKs, but the vast
5210 * majority of streamOptimized VMDKs actually contain
5211 * version 3 - so go with the majority. Both are accepted. */
5212 pExtent->uVersion = 3;
5213 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5214 pExtent->fFooter = true;
5215 pExtent->enmAccess = VMDKACCESS_READONLY;
5216 pExtent->fUncleanShutdown = false;
5217 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5218 pExtent->uSectorOffset = 0;
5219 pExtent->fMetaDirty = true;
5220 /* Create grain directory, without preallocating it straight away. It will
5221 * be constructed on the fly when writing out the data and written when
5222 * closing the image. The end effect is that the full grain directory is
5223 * allocated, which is a requirement of the VMDK specs. */
5224 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
5225 false /* fPreAlloc */);
5226 if (RT_FAILURE(rc))
5227 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5228 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5229 "streamOptimized");
5230 if (RT_FAILURE(rc))
5231 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5232 return rc;
5233}
5234/**
5235 * Initializes the UUID fields in the DDB.
5236 *
5237 * @returns VBox status code.
5238 * @param pImage The VMDK image instance.
5239 */
5240static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5241{
5242 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5243 if (RT_SUCCESS(rc))
5244 {
5245 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5246 if (RT_SUCCESS(rc))
5247 {
5248 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5249 &pImage->ModificationUuid);
5250 if (RT_SUCCESS(rc))
5251 {
5252 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5253 &pImage->ParentModificationUuid);
5254 if (RT_FAILURE(rc))
5255 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5256 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5257 }
5258 else
5259 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5260 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5261 }
5262 else
5263 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5264 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5265 }
5266 else
5267 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5268 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5269 return rc;
5270}
5271/**
5272 * Internal: The actual code for creating any VMDK variant currently in
5273 * existence on hosted environments.
5274 */
5275static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5276 unsigned uImageFlags, const char *pszComment,
5277 PCVDGEOMETRY pPCHSGeometry,
5278 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5279 PVDINTERFACEPROGRESS pIfProgress,
5280 unsigned uPercentStart, unsigned uPercentSpan)
5281{
5282 pImage->uImageFlags = uImageFlags;
5283 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5284 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5285 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5286 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5287 &pImage->Descriptor);
5288 if (RT_SUCCESS(rc))
5289 {
5290 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5291 {
5292 /* Raw disk image (includes raw partition). */
5293 PVDISKRAW pRaw = NULL;
5294 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5295 if (RT_FAILURE(rc))
5296 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could get raw descriptor for '%s'"), pImage->pszFilename);
5297 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5298 vmdkRawDescFree(pRaw);
5299 }
5300 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5301 {
5302 /* Stream optimized sparse image (monolithic). */
5303 rc = vmdkCreateStreamImage(pImage, cbSize);
5304 }
5305 else
5306 {
5307 /* Regular fixed or sparse image (monolithic or split). */
5308 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5309 pIfProgress, uPercentStart,
5310 uPercentSpan * 95 / 100);
5311 }
5312 if (RT_SUCCESS(rc))
5313 {
5314 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5315 pImage->cbSize = cbSize;
5316 for (unsigned i = 0; i < pImage->cExtents; i++)
5317 {
5318 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5319 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5320 pExtent->cNominalSectors, pExtent->enmType,
5321 pExtent->pszBasename, pExtent->uSectorOffset);
5322 if (RT_FAILURE(rc))
5323 {
5324 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5325 break;
5326 }
5327 }
5328 if (RT_SUCCESS(rc))
5329 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5330 if ( RT_SUCCESS(rc)
5331 && pPCHSGeometry->cCylinders != 0
5332 && pPCHSGeometry->cHeads != 0
5333 && pPCHSGeometry->cSectors != 0)
5334 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5335 if ( RT_SUCCESS(rc)
5336 && pLCHSGeometry->cCylinders != 0
5337 && pLCHSGeometry->cHeads != 0
5338 && pLCHSGeometry->cSectors != 0)
5339 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5340 pImage->LCHSGeometry = *pLCHSGeometry;
5341 pImage->PCHSGeometry = *pPCHSGeometry;
5342 pImage->ImageUuid = *pUuid;
5343 RTUuidClear(&pImage->ParentUuid);
5344 RTUuidClear(&pImage->ModificationUuid);
5345 RTUuidClear(&pImage->ParentModificationUuid);
5346 if (RT_SUCCESS(rc))
5347 rc = vmdkCreateImageDdbUuidsInit(pImage);
5348 if (RT_SUCCESS(rc))
5349 rc = vmdkAllocateGrainTableCache(pImage);
5350 if (RT_SUCCESS(rc))
5351 {
5352 rc = vmdkSetImageComment(pImage, pszComment);
5353 if (RT_FAILURE(rc))
5354 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5355 }
5356 if (RT_SUCCESS(rc))
5357 {
5358 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5359 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5360 {
5361 /* streamOptimized is a bit special, we cannot trigger the flush
5362 * until all data has been written. So we write the necessary
5363 * information explicitly. */
5364 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5365 - pImage->Descriptor.aLines[0], 512));
5366 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5367 if (RT_SUCCESS(rc))
5368 {
5369 rc = vmdkWriteDescriptor(pImage, NULL);
5370 if (RT_FAILURE(rc))
5371 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5372 }
5373 else
5374 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5375 }
5376 else
5377 rc = vmdkFlushImage(pImage, NULL);
5378 }
5379 }
5380 }
5381 else
5382 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5383 if (RT_SUCCESS(rc))
5384 {
5385 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5386 pImage->RegionList.fFlags = 0;
5387 pImage->RegionList.cRegions = 1;
5388 pRegion->offRegion = 0; /* Disk start. */
5389 pRegion->cbBlock = 512;
5390 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5391 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5392 pRegion->cbData = 512;
5393 pRegion->cbMetadata = 0;
5394 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5395 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5396 }
5397 else
5398 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5399 return rc;
5400}
5401/**
5402 * Internal: Update image comment.
5403 */
5404static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5405{
5406 char *pszCommentEncoded = NULL;
5407 if (pszComment)
5408 {
5409 pszCommentEncoded = vmdkEncodeString(pszComment);
5410 if (!pszCommentEncoded)
5411 return VERR_NO_MEMORY;
5412 }
5413 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5414 "ddb.comment", pszCommentEncoded);
5415 if (pszCommentEncoded)
5416 RTStrFree(pszCommentEncoded);
5417 if (RT_FAILURE(rc))
5418 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5419 return VINF_SUCCESS;
5420}
5421/**
5422 * Internal. Clear the grain table buffer for real stream optimized writing.
5423 */
5424static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
5425{
5426 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5427 for (uint32_t i = 0; i < cCacheLines; i++)
5428 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
5429 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5430}
5431/**
5432 * Internal. Flush the grain table buffer for real stream optimized writing.
5433 */
5434static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5435 uint32_t uGDEntry)
5436{
5437 int rc = VINF_SUCCESS;
5438 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5439 /* VMware does not write out completely empty grain tables in the case
5440 * of streamOptimized images, which according to my interpretation of
5441 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
5442 * handle it without problems do it the same way and save some bytes. */
5443 bool fAllZero = true;
5444 for (uint32_t i = 0; i < cCacheLines; i++)
5445 {
5446 /* Convert the grain table to little endian in place, as it will not
5447 * be used at all after this function has been called. */
5448 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5449 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5450 if (*pGTTmp)
5451 {
5452 fAllZero = false;
5453 break;
5454 }
5455 if (!fAllZero)
5456 break;
5457 }
5458 if (fAllZero)
5459 return VINF_SUCCESS;
5460 uint64_t uFileOffset = pExtent->uAppendPosition;
5461 if (!uFileOffset)
5462 return VERR_INTERNAL_ERROR;
5463 /* Align to sector, as the previous write could have been any size. */
5464 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5465 /* Grain table marker. */
5466 uint8_t aMarker[512];
5467 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5468 memset(pMarker, '\0', sizeof(aMarker));
5469 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
5470 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
5471 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5472 aMarker, sizeof(aMarker));
5473 AssertRC(rc);
5474 uFileOffset += 512;
5475 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
5476 return VERR_INTERNAL_ERROR;
5477 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5478 for (uint32_t i = 0; i < cCacheLines; i++)
5479 {
5480 /* Convert the grain table to little endian in place, as it will not
5481 * be used at all after this function has been called. */
5482 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5483 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5484 *pGTTmp = RT_H2LE_U32(*pGTTmp);
5485 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5486 &pImage->pGTCache->aGTCache[i].aGTData[0],
5487 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5488 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5489 if (RT_FAILURE(rc))
5490 break;
5491 }
5492 Assert(!(uFileOffset % 512));
5493 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
5494 return rc;
5495}
5496/**
5497 * Internal. Free all allocated space for representing an image, and optionally
5498 * delete the image from disk.
5499 */
5500static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
5501{
5502 int rc = VINF_SUCCESS;
5503 /* Freeing a never allocated image (e.g. because the open failed) is
5504 * not signalled as an error. After all nothing bad happens. */
5505 if (pImage)
5506 {
5507 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5508 {
5509 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5510 {
5511 /* Check if all extents are clean. */
5512 for (unsigned i = 0; i < pImage->cExtents; i++)
5513 {
5514 Assert(!pImage->pExtents[i].fUncleanShutdown);
5515 }
5516 }
5517 else
5518 {
5519 /* Mark all extents as clean. */
5520 for (unsigned i = 0; i < pImage->cExtents; i++)
5521 {
5522 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
5523 && pImage->pExtents[i].fUncleanShutdown)
5524 {
5525 pImage->pExtents[i].fUncleanShutdown = false;
5526 pImage->pExtents[i].fMetaDirty = true;
5527 }
5528 /* From now on it's not safe to append any more data. */
5529 pImage->pExtents[i].uAppendPosition = 0;
5530 }
5531 }
5532 }
5533 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5534 {
5535 /* No need to write any pending data if the file will be deleted
5536 * or if the new file wasn't successfully created. */
5537 if ( !fDelete && pImage->pExtents
5538 && pImage->pExtents[0].cGTEntries
5539 && pImage->pExtents[0].uAppendPosition)
5540 {
5541 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5542 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5543 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5544 AssertRC(rc);
5545 vmdkStreamClearGT(pImage, pExtent);
5546 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
5547 {
5548 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5549 AssertRC(rc);
5550 }
5551 uint64_t uFileOffset = pExtent->uAppendPosition;
5552 if (!uFileOffset)
5553 return VERR_INTERNAL_ERROR;
5554 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5555 /* From now on it's not safe to append any more data. */
5556 pExtent->uAppendPosition = 0;
5557 /* Grain directory marker. */
5558 uint8_t aMarker[512];
5559 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5560 memset(pMarker, '\0', sizeof(aMarker));
5561 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
5562 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
5563 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5564 aMarker, sizeof(aMarker));
5565 AssertRC(rc);
5566 uFileOffset += 512;
5567 /* Write grain directory in little endian style. The array will
5568 * not be used after this, so convert in place. */
5569 uint32_t *pGDTmp = pExtent->pGD;
5570 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
5571 *pGDTmp = RT_H2LE_U32(*pGDTmp);
5572 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5573 uFileOffset, pExtent->pGD,
5574 pExtent->cGDEntries * sizeof(uint32_t));
5575 AssertRC(rc);
5576 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
5577 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
5578 uFileOffset = RT_ALIGN_64( uFileOffset
5579 + pExtent->cGDEntries * sizeof(uint32_t),
5580 512);
5581 /* Footer marker. */
5582 memset(pMarker, '\0', sizeof(aMarker));
5583 pMarker->uSector = VMDK_BYTE2SECTOR(512);
5584 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
5585 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5586 uFileOffset, aMarker, sizeof(aMarker));
5587 AssertRC(rc);
5588 uFileOffset += 512;
5589 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
5590 AssertRC(rc);
5591 uFileOffset += 512;
5592 /* End-of-stream marker. */
5593 memset(pMarker, '\0', sizeof(aMarker));
5594 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5595 uFileOffset, aMarker, sizeof(aMarker));
5596 AssertRC(rc);
5597 }
5598 }
5599 else if (!fDelete && fFlush)
5600 vmdkFlushImage(pImage, NULL);
5601 if (pImage->pExtents != NULL)
5602 {
5603 for (unsigned i = 0 ; i < pImage->cExtents; i++)
5604 {
5605 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
5606 if (RT_SUCCESS(rc))
5607 rc = rc2; /* Propogate any error when closing the file. */
5608 }
5609 RTMemFree(pImage->pExtents);
5610 pImage->pExtents = NULL;
5611 }
5612 pImage->cExtents = 0;
5613 if (pImage->pFile != NULL)
5614 {
5615 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
5616 if (RT_SUCCESS(rc))
5617 rc = rc2; /* Propogate any error when closing the file. */
5618 }
5619 int rc2 = vmdkFileCheckAllClose(pImage);
5620 if (RT_SUCCESS(rc))
5621 rc = rc2; /* Propogate any error when closing the file. */
5622 if (pImage->pGTCache)
5623 {
5624 RTMemFree(pImage->pGTCache);
5625 pImage->pGTCache = NULL;
5626 }
5627 if (pImage->pDescData)
5628 {
5629 RTMemFree(pImage->pDescData);
5630 pImage->pDescData = NULL;
5631 }
5632 }
5633 LogFlowFunc(("returns %Rrc\n", rc));
5634 return rc;
5635}
5636/**
5637 * Internal. Flush image data (and metadata) to disk.
5638 */
5639static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
5640{
5641 PVMDKEXTENT pExtent;
5642 int rc = VINF_SUCCESS;
5643 /* Update descriptor if changed. */
5644 if (pImage->Descriptor.fDirty)
5645 rc = vmdkWriteDescriptor(pImage, pIoCtx);
5646 if (RT_SUCCESS(rc))
5647 {
5648 for (unsigned i = 0; i < pImage->cExtents; i++)
5649 {
5650 pExtent = &pImage->pExtents[i];
5651 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
5652 {
5653 switch (pExtent->enmType)
5654 {
5655 case VMDKETYPE_HOSTED_SPARSE:
5656 if (!pExtent->fFooter)
5657 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
5658 else
5659 {
5660 uint64_t uFileOffset = pExtent->uAppendPosition;
5661 /* Simply skip writing anything if the streamOptimized
5662 * image hasn't been just created. */
5663 if (!uFileOffset)
5664 break;
5665 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5666 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
5667 uFileOffset, pIoCtx);
5668 }
5669 break;
5670 case VMDKETYPE_VMFS:
5671 case VMDKETYPE_FLAT:
5672 /* Nothing to do. */
5673 break;
5674 case VMDKETYPE_ZERO:
5675 default:
5676 AssertMsgFailed(("extent with type %d marked as dirty\n",
5677 pExtent->enmType));
5678 break;
5679 }
5680 }
5681 if (RT_FAILURE(rc))
5682 break;
5683 switch (pExtent->enmType)
5684 {
5685 case VMDKETYPE_HOSTED_SPARSE:
5686 case VMDKETYPE_VMFS:
5687 case VMDKETYPE_FLAT:
5688 /** @todo implement proper path absolute check. */
5689 if ( pExtent->pFile != NULL
5690 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5691 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
5692 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
5693 NULL, NULL);
5694 break;
5695 case VMDKETYPE_ZERO:
5696 /* No need to do anything for this extent. */
5697 break;
5698 default:
5699 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
5700 break;
5701 }
5702 }
5703 }
5704 return rc;
5705}
5706/**
5707 * Internal. Find extent corresponding to the sector number in the disk.
5708 */
5709static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
5710 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
5711{
5712 PVMDKEXTENT pExtent = NULL;
5713 int rc = VINF_SUCCESS;
5714 for (unsigned i = 0; i < pImage->cExtents; i++)
5715 {
5716 if (offSector < pImage->pExtents[i].cNominalSectors)
5717 {
5718 pExtent = &pImage->pExtents[i];
5719 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
5720 break;
5721 }
5722 offSector -= pImage->pExtents[i].cNominalSectors;
5723 }
5724 if (pExtent)
5725 *ppExtent = pExtent;
5726 else
5727 rc = VERR_IO_SECTOR_NOT_FOUND;
5728 return rc;
5729}
5730/**
5731 * Internal. Hash function for placing the grain table hash entries.
5732 */
5733static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
5734 unsigned uExtent)
5735{
5736 /** @todo this hash function is quite simple, maybe use a better one which
5737 * scrambles the bits better. */
5738 return (uSector + uExtent) % pCache->cEntries;
5739}
5740/**
5741 * Internal. Get sector number in the extent file from the relative sector
5742 * number in the extent.
5743 */
5744static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
5745 PVMDKEXTENT pExtent, uint64_t uSector,
5746 uint64_t *puExtentSector)
5747{
5748 PVMDKGTCACHE pCache = pImage->pGTCache;
5749 uint64_t uGDIndex, uGTSector, uGTBlock;
5750 uint32_t uGTHash, uGTBlockIndex;
5751 PVMDKGTCACHEENTRY pGTCacheEntry;
5752 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5753 int rc;
5754 /* For newly created and readonly/sequentially opened streamOptimized
5755 * images this must be a no-op, as the grain directory is not there. */
5756 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5757 && pExtent->uAppendPosition)
5758 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5759 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
5760 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
5761 {
5762 *puExtentSector = 0;
5763 return VINF_SUCCESS;
5764 }
5765 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5766 if (uGDIndex >= pExtent->cGDEntries)
5767 return VERR_OUT_OF_RANGE;
5768 uGTSector = pExtent->pGD[uGDIndex];
5769 if (!uGTSector)
5770 {
5771 /* There is no grain table referenced by this grain directory
5772 * entry. So there is absolutely no data in this area. */
5773 *puExtentSector = 0;
5774 return VINF_SUCCESS;
5775 }
5776 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5777 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5778 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5779 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5780 || pGTCacheEntry->uGTBlock != uGTBlock)
5781 {
5782 /* Cache miss, fetch data from disk. */
5783 PVDMETAXFER pMetaXfer;
5784 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5785 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5786 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
5787 if (RT_FAILURE(rc))
5788 return rc;
5789 /* We can release the metadata transfer immediately. */
5790 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5791 pGTCacheEntry->uExtent = pExtent->uExtent;
5792 pGTCacheEntry->uGTBlock = uGTBlock;
5793 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5794 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5795 }
5796 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5797 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
5798 if (uGrainSector)
5799 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
5800 else
5801 *puExtentSector = 0;
5802 return VINF_SUCCESS;
5803}
5804/**
5805 * Internal. Writes the grain and also if necessary the grain tables.
5806 * Uses the grain table cache as a true grain table.
5807 */
5808static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5809 uint64_t uSector, PVDIOCTX pIoCtx,
5810 uint64_t cbWrite)
5811{
5812 uint32_t uGrain;
5813 uint32_t uGDEntry, uLastGDEntry;
5814 uint32_t cbGrain = 0;
5815 uint32_t uCacheLine, uCacheEntry;
5816 const void *pData;
5817 int rc;
5818 /* Very strict requirements: always write at least one full grain, with
5819 * proper alignment. Everything else would require reading of already
5820 * written data, which we don't support for obvious reasons. The only
5821 * exception is the last grain, and only if the image size specifies
5822 * that only some portion holds data. In any case the write must be
5823 * within the image limits, no "overshoot" allowed. */
5824 if ( cbWrite == 0
5825 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
5826 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
5827 || uSector % pExtent->cSectorsPerGrain
5828 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
5829 return VERR_INVALID_PARAMETER;
5830 /* Clip write range to at most the rest of the grain. */
5831 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
5832 /* Do not allow to go back. */
5833 uGrain = uSector / pExtent->cSectorsPerGrain;
5834 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5835 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
5836 uGDEntry = uGrain / pExtent->cGTEntries;
5837 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
5838 if (uGrain < pExtent->uLastGrainAccess)
5839 return VERR_VD_VMDK_INVALID_WRITE;
5840 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
5841 * to allocate something, we also need to detect the situation ourself. */
5842 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
5843 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
5844 return VINF_SUCCESS;
5845 if (uGDEntry != uLastGDEntry)
5846 {
5847 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5848 if (RT_FAILURE(rc))
5849 return rc;
5850 vmdkStreamClearGT(pImage, pExtent);
5851 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
5852 {
5853 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5854 if (RT_FAILURE(rc))
5855 return rc;
5856 }
5857 }
5858 uint64_t uFileOffset;
5859 uFileOffset = pExtent->uAppendPosition;
5860 if (!uFileOffset)
5861 return VERR_INTERNAL_ERROR;
5862 /* Align to sector, as the previous write could have been any size. */
5863 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5864 /* Paranoia check: extent type, grain table buffer presence and
5865 * grain table buffer space. Also grain table entry must be clear. */
5866 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
5867 || !pImage->pGTCache
5868 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
5869 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
5870 return VERR_INTERNAL_ERROR;
5871 /* Update grain table entry. */
5872 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5873 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5874 {
5875 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
5876 memset((char *)pExtent->pvGrain + cbWrite, '\0',
5877 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
5878 pData = pExtent->pvGrain;
5879 }
5880 else
5881 {
5882 RTSGSEG Segment;
5883 unsigned cSegments = 1;
5884 size_t cbSeg = 0;
5885 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
5886 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5887 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5888 pData = Segment.pvSeg;
5889 }
5890 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
5891 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5892 uSector, &cbGrain);
5893 if (RT_FAILURE(rc))
5894 {
5895 pExtent->uGrainSectorAbs = 0;
5896 AssertRC(rc);
5897 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5898 }
5899 pExtent->uLastGrainAccess = uGrain;
5900 pExtent->uAppendPosition += cbGrain;
5901 return rc;
5902}
5903/**
5904 * Internal: Updates the grain table during grain allocation.
5905 */
5906static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
5907 PVMDKGRAINALLOCASYNC pGrainAlloc)
5908{
5909 int rc = VINF_SUCCESS;
5910 PVMDKGTCACHE pCache = pImage->pGTCache;
5911 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5912 uint32_t uGTHash, uGTBlockIndex;
5913 uint64_t uGTSector, uRGTSector, uGTBlock;
5914 uint64_t uSector = pGrainAlloc->uSector;
5915 PVMDKGTCACHEENTRY pGTCacheEntry;
5916 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
5917 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
5918 uGTSector = pGrainAlloc->uGTSector;
5919 uRGTSector = pGrainAlloc->uRGTSector;
5920 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5921 /* Update the grain table (and the cache). */
5922 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5923 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5924 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5925 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5926 || pGTCacheEntry->uGTBlock != uGTBlock)
5927 {
5928 /* Cache miss, fetch data from disk. */
5929 LogFlow(("Cache miss, fetch data from disk\n"));
5930 PVDMETAXFER pMetaXfer = NULL;
5931 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5932 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5933 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5934 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
5935 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5936 {
5937 pGrainAlloc->cIoXfersPending++;
5938 pGrainAlloc->fGTUpdateNeeded = true;
5939 /* Leave early, we will be called again after the read completed. */
5940 LogFlowFunc(("Metadata read in progress, leaving\n"));
5941 return rc;
5942 }
5943 else if (RT_FAILURE(rc))
5944 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5945 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5946 pGTCacheEntry->uExtent = pExtent->uExtent;
5947 pGTCacheEntry->uGTBlock = uGTBlock;
5948 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5949 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5950 }
5951 else
5952 {
5953 /* Cache hit. Convert grain table block back to disk format, otherwise
5954 * the code below will write garbage for all but the updated entry. */
5955 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5956 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5957 }
5958 pGrainAlloc->fGTUpdateNeeded = false;
5959 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5960 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
5961 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
5962 /* Update grain table on disk. */
5963 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5964 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5965 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5966 vmdkAllocGrainComplete, pGrainAlloc);
5967 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5968 pGrainAlloc->cIoXfersPending++;
5969 else if (RT_FAILURE(rc))
5970 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5971 if (pExtent->pRGD)
5972 {
5973 /* Update backup grain table on disk. */
5974 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
5975 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5976 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5977 vmdkAllocGrainComplete, pGrainAlloc);
5978 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5979 pGrainAlloc->cIoXfersPending++;
5980 else if (RT_FAILURE(rc))
5981 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5982 }
5983 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5984 return rc;
5985}
5986/**
5987 * Internal - complete the grain allocation by updating disk grain table if required.
5988 */
5989static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
5990{
5991 RT_NOREF1(rcReq);
5992 int rc = VINF_SUCCESS;
5993 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5994 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
5995 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
5996 pBackendData, pIoCtx, pvUser, rcReq));
5997 pGrainAlloc->cIoXfersPending--;
5998 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
5999 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
6000 if (!pGrainAlloc->cIoXfersPending)
6001 {
6002 /* Grain allocation completed. */
6003 RTMemFree(pGrainAlloc);
6004 }
6005 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
6006 return rc;
6007}
6008/**
6009 * Internal. Allocates a new grain table (if necessary).
6010 */
6011static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6012 uint64_t uSector, uint64_t cbWrite)
6013{
6014 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
6015 uint64_t uGDIndex, uGTSector, uRGTSector;
6016 uint64_t uFileOffset;
6017 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
6018 int rc;
6019 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
6020 pCache, pExtent, pIoCtx, uSector, cbWrite));
6021 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
6022 if (!pGrainAlloc)
6023 return VERR_NO_MEMORY;
6024 pGrainAlloc->pExtent = pExtent;
6025 pGrainAlloc->uSector = uSector;
6026 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6027 if (uGDIndex >= pExtent->cGDEntries)
6028 {
6029 RTMemFree(pGrainAlloc);
6030 return VERR_OUT_OF_RANGE;
6031 }
6032 uGTSector = pExtent->pGD[uGDIndex];
6033 if (pExtent->pRGD)
6034 uRGTSector = pExtent->pRGD[uGDIndex];
6035 else
6036 uRGTSector = 0; /**< avoid compiler warning */
6037 if (!uGTSector)
6038 {
6039 LogFlow(("Allocating new grain table\n"));
6040 /* There is no grain table referenced by this grain directory
6041 * entry. So there is absolutely no data in this area. Allocate
6042 * a new grain table and put the reference to it in the GDs. */
6043 uFileOffset = pExtent->uAppendPosition;
6044 if (!uFileOffset)
6045 {
6046 RTMemFree(pGrainAlloc);
6047 return VERR_INTERNAL_ERROR;
6048 }
6049 Assert(!(uFileOffset % 512));
6050 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6051 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6052 /* Normally the grain table is preallocated for hosted sparse extents
6053 * that support more than 32 bit sector numbers. So this shouldn't
6054 * ever happen on a valid extent. */
6055 if (uGTSector > UINT32_MAX)
6056 {
6057 RTMemFree(pGrainAlloc);
6058 return VERR_VD_VMDK_INVALID_HEADER;
6059 }
6060 /* Write grain table by writing the required number of grain table
6061 * cache chunks. Allocate memory dynamically here or we flood the
6062 * metadata cache with very small entries. */
6063 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
6064 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
6065 if (!paGTDataTmp)
6066 {
6067 RTMemFree(pGrainAlloc);
6068 return VERR_NO_MEMORY;
6069 }
6070 memset(paGTDataTmp, '\0', cbGTDataTmp);
6071 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6072 VMDK_SECTOR2BYTE(uGTSector),
6073 paGTDataTmp, cbGTDataTmp, pIoCtx,
6074 vmdkAllocGrainComplete, pGrainAlloc);
6075 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6076 pGrainAlloc->cIoXfersPending++;
6077 else if (RT_FAILURE(rc))
6078 {
6079 RTMemTmpFree(paGTDataTmp);
6080 RTMemFree(pGrainAlloc);
6081 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
6082 }
6083 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
6084 + cbGTDataTmp, 512);
6085 if (pExtent->pRGD)
6086 {
6087 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
6088 uFileOffset = pExtent->uAppendPosition;
6089 if (!uFileOffset)
6090 return VERR_INTERNAL_ERROR;
6091 Assert(!(uFileOffset % 512));
6092 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6093 /* Normally the redundant grain table is preallocated for hosted
6094 * sparse extents that support more than 32 bit sector numbers. So
6095 * this shouldn't ever happen on a valid extent. */
6096 if (uRGTSector > UINT32_MAX)
6097 {
6098 RTMemTmpFree(paGTDataTmp);
6099 return VERR_VD_VMDK_INVALID_HEADER;
6100 }
6101 /* Write grain table by writing the required number of grain table
6102 * cache chunks. Allocate memory dynamically here or we flood the
6103 * metadata cache with very small entries. */
6104 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6105 VMDK_SECTOR2BYTE(uRGTSector),
6106 paGTDataTmp, cbGTDataTmp, pIoCtx,
6107 vmdkAllocGrainComplete, pGrainAlloc);
6108 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6109 pGrainAlloc->cIoXfersPending++;
6110 else if (RT_FAILURE(rc))
6111 {
6112 RTMemTmpFree(paGTDataTmp);
6113 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
6114 }
6115 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
6116 }
6117 RTMemTmpFree(paGTDataTmp);
6118 /* Update the grain directory on disk (doing it before writing the
6119 * grain table will result in a garbled extent if the operation is
6120 * aborted for some reason. Otherwise the worst that can happen is
6121 * some unused sectors in the extent. */
6122 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
6123 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6124 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
6125 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
6126 vmdkAllocGrainComplete, pGrainAlloc);
6127 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6128 pGrainAlloc->cIoXfersPending++;
6129 else if (RT_FAILURE(rc))
6130 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
6131 if (pExtent->pRGD)
6132 {
6133 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
6134 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6135 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
6136 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
6137 vmdkAllocGrainComplete, pGrainAlloc);
6138 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6139 pGrainAlloc->cIoXfersPending++;
6140 else if (RT_FAILURE(rc))
6141 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
6142 }
6143 /* As the final step update the in-memory copy of the GDs. */
6144 pExtent->pGD[uGDIndex] = uGTSector;
6145 if (pExtent->pRGD)
6146 pExtent->pRGD[uGDIndex] = uRGTSector;
6147 }
6148 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6149 pGrainAlloc->uGTSector = uGTSector;
6150 pGrainAlloc->uRGTSector = uRGTSector;
6151 uFileOffset = pExtent->uAppendPosition;
6152 if (!uFileOffset)
6153 return VERR_INTERNAL_ERROR;
6154 Assert(!(uFileOffset % 512));
6155 pGrainAlloc->uGrainOffset = uFileOffset;
6156 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6157 {
6158 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6159 ("Accesses to stream optimized images must be synchronous\n"),
6160 VERR_INVALID_STATE);
6161 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6162 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
6163 /* Invalidate cache, just in case some code incorrectly allows mixing
6164 * of reads and writes. Normally shouldn't be needed. */
6165 pExtent->uGrainSectorAbs = 0;
6166 /* Write compressed data block and the markers. */
6167 uint32_t cbGrain = 0;
6168 size_t cbSeg = 0;
6169 RTSGSEG Segment;
6170 unsigned cSegments = 1;
6171 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6172 &cSegments, cbWrite);
6173 Assert(cbSeg == cbWrite);
6174 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
6175 Segment.pvSeg, cbWrite, uSector, &cbGrain);
6176 if (RT_FAILURE(rc))
6177 {
6178 AssertRC(rc);
6179 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
6180 }
6181 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
6182 pExtent->uAppendPosition += cbGrain;
6183 }
6184 else
6185 {
6186 /* Write the data. Always a full grain, or we're in big trouble. */
6187 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6188 uFileOffset, pIoCtx, cbWrite,
6189 vmdkAllocGrainComplete, pGrainAlloc);
6190 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6191 pGrainAlloc->cIoXfersPending++;
6192 else if (RT_FAILURE(rc))
6193 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
6194 pExtent->uAppendPosition += cbWrite;
6195 }
6196 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
6197 if (!pGrainAlloc->cIoXfersPending)
6198 {
6199 /* Grain allocation completed. */
6200 RTMemFree(pGrainAlloc);
6201 }
6202 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6203 return rc;
6204}
6205/**
6206 * Internal. Reads the contents by sequentially going over the compressed
6207 * grains (hoping that they are in sequence).
6208 */
6209static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6210 uint64_t uSector, PVDIOCTX pIoCtx,
6211 uint64_t cbRead)
6212{
6213 int rc;
6214 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6215 pImage, pExtent, uSector, pIoCtx, cbRead));
6216 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6217 ("Async I/O not supported for sequential stream optimized images\n"),
6218 VERR_INVALID_STATE);
6219 /* Do not allow to go back. */
6220 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6221 if (uGrain < pExtent->uLastGrainAccess)
6222 return VERR_VD_VMDK_INVALID_STATE;
6223 pExtent->uLastGrainAccess = uGrain;
6224 /* After a previous error do not attempt to recover, as it would need
6225 * seeking (in the general case backwards which is forbidden). */
6226 if (!pExtent->uGrainSectorAbs)
6227 return VERR_VD_VMDK_INVALID_STATE;
6228 /* Check if we need to read something from the image or if what we have
6229 * in the buffer is good to fulfill the request. */
6230 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6231 {
6232 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6233 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6234 /* Get the marker from the next data block - and skip everything which
6235 * is not a compressed grain. If it's a compressed grain which is for
6236 * the requested sector (or after), read it. */
6237 VMDKMARKER Marker;
6238 do
6239 {
6240 RT_ZERO(Marker);
6241 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6242 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6243 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6244 if (RT_FAILURE(rc))
6245 return rc;
6246 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6247 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6248 if (Marker.cbSize == 0)
6249 {
6250 /* A marker for something else than a compressed grain. */
6251 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6252 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6253 + RT_UOFFSETOF(VMDKMARKER, uType),
6254 &Marker.uType, sizeof(Marker.uType));
6255 if (RT_FAILURE(rc))
6256 return rc;
6257 Marker.uType = RT_LE2H_U32(Marker.uType);
6258 switch (Marker.uType)
6259 {
6260 case VMDK_MARKER_EOS:
6261 uGrainSectorAbs++;
6262 /* Read (or mostly skip) to the end of file. Uses the
6263 * Marker (LBA sector) as it is unused anyway. This
6264 * makes sure that really everything is read in the
6265 * success case. If this read fails it means the image
6266 * is truncated, but this is harmless so ignore. */
6267 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6268 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6269 + 511,
6270 &Marker.uSector, 1);
6271 break;
6272 case VMDK_MARKER_GT:
6273 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6274 break;
6275 case VMDK_MARKER_GD:
6276 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6277 break;
6278 case VMDK_MARKER_FOOTER:
6279 uGrainSectorAbs += 2;
6280 break;
6281 case VMDK_MARKER_UNSPECIFIED:
6282 /* Skip over the contents of the unspecified marker
6283 * type 4 which exists in some vSphere created files. */
6284 /** @todo figure out what the payload means. */
6285 uGrainSectorAbs += 1;
6286 break;
6287 default:
6288 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6289 pExtent->uGrainSectorAbs = 0;
6290 return VERR_VD_VMDK_INVALID_STATE;
6291 }
6292 pExtent->cbGrainStreamRead = 0;
6293 }
6294 else
6295 {
6296 /* A compressed grain marker. If it is at/after what we're
6297 * interested in read and decompress data. */
6298 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6299 {
6300 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6301 continue;
6302 }
6303 uint64_t uLBA = 0;
6304 uint32_t cbGrainStreamRead = 0;
6305 rc = vmdkFileInflateSync(pImage, pExtent,
6306 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6307 pExtent->pvGrain,
6308 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6309 &Marker, &uLBA, &cbGrainStreamRead);
6310 if (RT_FAILURE(rc))
6311 {
6312 pExtent->uGrainSectorAbs = 0;
6313 return rc;
6314 }
6315 if ( pExtent->uGrain
6316 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6317 {
6318 pExtent->uGrainSectorAbs = 0;
6319 return VERR_VD_VMDK_INVALID_STATE;
6320 }
6321 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6322 pExtent->cbGrainStreamRead = cbGrainStreamRead;
6323 break;
6324 }
6325 } while (Marker.uType != VMDK_MARKER_EOS);
6326 pExtent->uGrainSectorAbs = uGrainSectorAbs;
6327 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
6328 {
6329 pExtent->uGrain = UINT32_MAX;
6330 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
6331 * the next read would try to get more data, and we're at EOF. */
6332 pExtent->cbGrainStreamRead = 1;
6333 }
6334 }
6335 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
6336 {
6337 /* The next data block we have is not for this area, so just return
6338 * that there is no data. */
6339 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
6340 return VERR_VD_BLOCK_FREE;
6341 }
6342 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
6343 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6344 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
6345 cbRead);
6346 LogFlowFunc(("returns VINF_SUCCESS\n"));
6347 return VINF_SUCCESS;
6348}
6349/**
6350 * Replaces a fragment of a string with the specified string.
6351 *
6352 * @returns Pointer to the allocated UTF-8 string.
6353 * @param pszWhere UTF-8 string to search in.
6354 * @param pszWhat UTF-8 string to search for.
6355 * @param pszByWhat UTF-8 string to replace the found string with.
6356 *
6357 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
6358 * for updating the base name in the descriptor, the second is for
6359 * generating new filenames for extents. This code borked when
6360 * RTPathAbs started correcting the driver letter case on windows,
6361 * when strstr failed because the pExtent->pszFullname was not
6362 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
6363 * this by apply RTPathAbs to the places it wasn't applied.
6364 *
6365 * However, this highlights some undocumented ASSUMPTIONS as well as
6366 * terrible short commings of the approach.
6367 *
6368 * Given the right filename, it may also screw up the descriptor. Take
6369 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
6370 * we'll be asked to replace "Test0" with something, no problem. No,
6371 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
6372 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
6373 * its bum. The descriptor string must be parsed and reconstructed,
6374 * the lazy strstr approach doesn't cut it.
6375 *
6376 * I'm also curious as to what would be the correct escaping of '"' in
6377 * the file name and how that is supposed to be handled, because it
6378 * needs to be or such names must be rejected in several places (maybe
6379 * they are, I didn't check).
6380 *
6381 * When this function is used to replace the start of a path, I think
6382 * the assumption from the prep/setup code is that we kind of knows
6383 * what we're working on (I could be wrong). However, using strstr
6384 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
6385 * Especially on unix systems, weird stuff could happen if someone
6386 * unwittingly tinkers with the prep/setup code. What should really be
6387 * done here is using a new RTPathStartEx function that (via flags)
6388 * allows matching partial final component and returns the length of
6389 * what it matched up (in case it skipped slashes and '.' components).
6390 *
6391 */
6392static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
6393 const char *pszByWhat)
6394{
6395 AssertPtr(pszWhere);
6396 AssertPtr(pszWhat);
6397 AssertPtr(pszByWhat);
6398 const char *pszFoundStr = strstr(pszWhere, pszWhat);
6399 if (!pszFoundStr)
6400 {
6401 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
6402 return NULL;
6403 }
6404 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
6405 char *pszNewStr = RTStrAlloc(cbFinal);
6406 if (pszNewStr)
6407 {
6408 char *pszTmp = pszNewStr;
6409 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
6410 pszTmp += pszFoundStr - pszWhere;
6411 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
6412 pszTmp += strlen(pszByWhat);
6413 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
6414 }
6415 return pszNewStr;
6416}
6417/** @copydoc VDIMAGEBACKEND::pfnProbe */
6418static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
6419 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
6420{
6421 RT_NOREF(enmDesiredType);
6422 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
6423 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
6424 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6425 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6426
6427 int rc = VINF_SUCCESS;
6428 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6429 if (RT_LIKELY(pImage))
6430 {
6431 pImage->pszFilename = pszFilename;
6432 pImage->pFile = NULL;
6433 pImage->pExtents = NULL;
6434 pImage->pFiles = NULL;
6435 pImage->pGTCache = NULL;
6436 pImage->pDescData = NULL;
6437 pImage->pVDIfsDisk = pVDIfsDisk;
6438 pImage->pVDIfsImage = pVDIfsImage;
6439 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
6440 * much as possible in vmdkOpenImage. */
6441 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
6442 vmdkFreeImage(pImage, false, false /*fFlush*/);
6443 RTMemFree(pImage);
6444 if (RT_SUCCESS(rc))
6445 *penmType = VDTYPE_HDD;
6446 }
6447 else
6448 rc = VERR_NO_MEMORY;
6449 LogFlowFunc(("returns %Rrc\n", rc));
6450 return rc;
6451}
6452/** @copydoc VDIMAGEBACKEND::pfnOpen */
6453static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
6454 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6455 VDTYPE enmType, void **ppBackendData)
6456{
6457 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
6458 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
6459 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
6460 int rc;
6461 /* Check open flags. All valid flags are supported. */
6462 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6463 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6464 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6465
6466 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6467 if (RT_LIKELY(pImage))
6468 {
6469 pImage->pszFilename = pszFilename;
6470 pImage->pFile = NULL;
6471 pImage->pExtents = NULL;
6472 pImage->pFiles = NULL;
6473 pImage->pGTCache = NULL;
6474 pImage->pDescData = NULL;
6475 pImage->pVDIfsDisk = pVDIfsDisk;
6476 pImage->pVDIfsImage = pVDIfsImage;
6477 rc = vmdkOpenImage(pImage, uOpenFlags);
6478 if (RT_SUCCESS(rc))
6479 *ppBackendData = pImage;
6480 else
6481 RTMemFree(pImage);
6482 }
6483 else
6484 rc = VERR_NO_MEMORY;
6485 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6486 return rc;
6487}
6488/** @copydoc VDIMAGEBACKEND::pfnCreate */
6489static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
6490 unsigned uImageFlags, const char *pszComment,
6491 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
6492 PCRTUUID pUuid, unsigned uOpenFlags,
6493 unsigned uPercentStart, unsigned uPercentSpan,
6494 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6495 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
6496 void **ppBackendData)
6497{
6498 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
6499 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
6500 int rc;
6501 /* Check the VD container type and image flags. */
6502 if ( enmType != VDTYPE_HDD
6503 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
6504 return VERR_VD_INVALID_TYPE;
6505 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
6506 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
6507 && ( !cbSize
6508 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
6509 return VERR_VD_INVALID_SIZE;
6510 /* Check image flags for invalid combinations. */
6511 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6512 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
6513 return VERR_INVALID_PARAMETER;
6514 /* Check open flags. All valid flags are supported. */
6515 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
6516 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6517 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6518 AssertPtrReturn(pPCHSGeometry, VERR_INVALID_POINTER);
6519 AssertPtrReturn(pLCHSGeometry, VERR_INVALID_POINTER);
6520 AssertReturn(!( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
6521 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
6522 VERR_INVALID_PARAMETER);
6523 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
6524 if (RT_LIKELY(pImage))
6525 {
6526 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
6527 pImage->pszFilename = pszFilename;
6528 pImage->pFile = NULL;
6529 pImage->pExtents = NULL;
6530 pImage->pFiles = NULL;
6531 pImage->pGTCache = NULL;
6532 pImage->pDescData = NULL;
6533 pImage->pVDIfsDisk = pVDIfsDisk;
6534 pImage->pVDIfsImage = pVDIfsImage;
6535 /* Descriptors for split images can be pretty large, especially if the
6536 * filename is long. So prepare for the worst, and allocate quite some
6537 * memory for the descriptor in this case. */
6538 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
6539 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
6540 else
6541 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
6542 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
6543 if (RT_LIKELY(pImage->pDescData))
6544 {
6545 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
6546 pPCHSGeometry, pLCHSGeometry, pUuid,
6547 pIfProgress, uPercentStart, uPercentSpan);
6548 if (RT_SUCCESS(rc))
6549 {
6550 /* So far the image is opened in read/write mode. Make sure the
6551 * image is opened in read-only mode if the caller requested that. */
6552 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
6553 {
6554 vmdkFreeImage(pImage, false, true /*fFlush*/);
6555 rc = vmdkOpenImage(pImage, uOpenFlags);
6556 }
6557 if (RT_SUCCESS(rc))
6558 *ppBackendData = pImage;
6559 }
6560 if (RT_FAILURE(rc))
6561 RTMemFree(pImage->pDescData);
6562 }
6563 else
6564 rc = VERR_NO_MEMORY;
6565 if (RT_FAILURE(rc))
6566 RTMemFree(pImage);
6567 }
6568 else
6569 rc = VERR_NO_MEMORY;
6570 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6571 return rc;
6572}
6573/**
6574 * Prepares the state for renaming a VMDK image, setting up the state and allocating
6575 * memory.
6576 *
6577 * @returns VBox status code.
6578 * @param pImage VMDK image instance.
6579 * @param pRenameState The state to initialize.
6580 * @param pszFilename The new filename.
6581 */
6582static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6583{
6584 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
6585 int rc = VINF_SUCCESS;
6586 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
6587 /*
6588 * Allocate an array to store both old and new names of renamed files
6589 * in case we have to roll back the changes. Arrays are initialized
6590 * with zeros. We actually save stuff when and if we change it.
6591 */
6592 pRenameState->cExtents = pImage->cExtents;
6593 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6594 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
6595 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
6596 if ( pRenameState->apszOldName
6597 && pRenameState->apszNewName
6598 && pRenameState->apszNewLines)
6599 {
6600 /* Save the descriptor size and position. */
6601 if (pImage->pDescData)
6602 {
6603 /* Separate descriptor file. */
6604 pRenameState->fEmbeddedDesc = false;
6605 }
6606 else
6607 {
6608 /* Embedded descriptor file. */
6609 pRenameState->ExtentCopy = pImage->pExtents[0];
6610 pRenameState->fEmbeddedDesc = true;
6611 }
6612 /* Save the descriptor content. */
6613 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
6614 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6615 {
6616 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
6617 if (!pRenameState->DescriptorCopy.aLines[i])
6618 {
6619 rc = VERR_NO_MEMORY;
6620 break;
6621 }
6622 }
6623 if (RT_SUCCESS(rc))
6624 {
6625 /* Prepare both old and new base names used for string replacement. */
6626 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
6627 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
6628 RTPathStripSuffix(pRenameState->pszNewBaseName);
6629 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
6630 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
6631 RTPathStripSuffix(pRenameState->pszOldBaseName);
6632 /* Prepare both old and new full names used for string replacement.
6633 Note! Must abspath the stuff here, so the strstr weirdness later in
6634 the renaming process get a match against abspath'ed extent paths.
6635 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
6636 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
6637 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
6638 RTPathStripSuffix(pRenameState->pszNewFullName);
6639 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
6640 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
6641 RTPathStripSuffix(pRenameState->pszOldFullName);
6642 /* Save the old name for easy access to the old descriptor file. */
6643 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
6644 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
6645 /* Save old image name. */
6646 pRenameState->pszOldImageName = pImage->pszFilename;
6647 }
6648 }
6649 else
6650 rc = VERR_NO_TMP_MEMORY;
6651 return rc;
6652}
6653/**
6654 * Destroys the given rename state, freeing all allocated memory.
6655 *
6656 * @returns nothing.
6657 * @param pRenameState The rename state to destroy.
6658 */
6659static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
6660{
6661 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
6662 if (pRenameState->DescriptorCopy.aLines[i])
6663 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
6664 if (pRenameState->apszOldName)
6665 {
6666 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6667 if (pRenameState->apszOldName[i])
6668 RTStrFree(pRenameState->apszOldName[i]);
6669 RTMemTmpFree(pRenameState->apszOldName);
6670 }
6671 if (pRenameState->apszNewName)
6672 {
6673 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6674 if (pRenameState->apszNewName[i])
6675 RTStrFree(pRenameState->apszNewName[i]);
6676 RTMemTmpFree(pRenameState->apszNewName);
6677 }
6678 if (pRenameState->apszNewLines)
6679 {
6680 for (unsigned i = 0; i < pRenameState->cExtents; i++)
6681 if (pRenameState->apszNewLines[i])
6682 RTStrFree(pRenameState->apszNewLines[i]);
6683 RTMemTmpFree(pRenameState->apszNewLines);
6684 }
6685 if (pRenameState->pszOldDescName)
6686 RTStrFree(pRenameState->pszOldDescName);
6687 if (pRenameState->pszOldBaseName)
6688 RTStrFree(pRenameState->pszOldBaseName);
6689 if (pRenameState->pszNewBaseName)
6690 RTStrFree(pRenameState->pszNewBaseName);
6691 if (pRenameState->pszOldFullName)
6692 RTStrFree(pRenameState->pszOldFullName);
6693 if (pRenameState->pszNewFullName)
6694 RTStrFree(pRenameState->pszNewFullName);
6695}
6696/**
6697 * Rolls back the rename operation to the original state.
6698 *
6699 * @returns VBox status code.
6700 * @param pImage VMDK image instance.
6701 * @param pRenameState The rename state.
6702 */
6703static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
6704{
6705 int rc = VINF_SUCCESS;
6706 if (!pRenameState->fImageFreed)
6707 {
6708 /*
6709 * Some extents may have been closed, close the rest. We will
6710 * re-open the whole thing later.
6711 */
6712 vmdkFreeImage(pImage, false, true /*fFlush*/);
6713 }
6714 /* Rename files back. */
6715 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
6716 {
6717 if (pRenameState->apszOldName[i])
6718 {
6719 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
6720 AssertRC(rc);
6721 }
6722 }
6723 /* Restore the old descriptor. */
6724 PVMDKFILE pFile;
6725 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
6726 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
6727 false /* fCreate */));
6728 AssertRC(rc);
6729 if (pRenameState->fEmbeddedDesc)
6730 {
6731 pRenameState->ExtentCopy.pFile = pFile;
6732 pImage->pExtents = &pRenameState->ExtentCopy;
6733 }
6734 else
6735 {
6736 /* Shouldn't be null for separate descriptor.
6737 * There will be no access to the actual content.
6738 */
6739 pImage->pDescData = pRenameState->pszOldDescName;
6740 pImage->pFile = pFile;
6741 }
6742 pImage->Descriptor = pRenameState->DescriptorCopy;
6743 vmdkWriteDescriptor(pImage, NULL);
6744 vmdkFileClose(pImage, &pFile, false);
6745 /* Get rid of the stuff we implanted. */
6746 pImage->pExtents = NULL;
6747 pImage->pFile = NULL;
6748 pImage->pDescData = NULL;
6749 /* Re-open the image back. */
6750 pImage->pszFilename = pRenameState->pszOldImageName;
6751 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6752 return rc;
6753}
6754/**
6755 * Rename worker doing the real work.
6756 *
6757 * @returns VBox status code.
6758 * @param pImage VMDK image instance.
6759 * @param pRenameState The rename state.
6760 * @param pszFilename The new filename.
6761 */
6762static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
6763{
6764 int rc = VINF_SUCCESS;
6765 unsigned i, line;
6766 /* Update the descriptor with modified extent names. */
6767 for (i = 0, line = pImage->Descriptor.uFirstExtent;
6768 i < pRenameState->cExtents;
6769 i++, line = pImage->Descriptor.aNextLines[line])
6770 {
6771 /* Update the descriptor. */
6772 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
6773 pRenameState->pszOldBaseName,
6774 pRenameState->pszNewBaseName);
6775 if (!pRenameState->apszNewLines[i])
6776 {
6777 rc = VERR_NO_MEMORY;
6778 break;
6779 }
6780 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
6781 }
6782 if (RT_SUCCESS(rc))
6783 {
6784 /* Make sure the descriptor gets written back. */
6785 pImage->Descriptor.fDirty = true;
6786 /* Flush the descriptor now, in case it is embedded. */
6787 vmdkFlushImage(pImage, NULL);
6788 /* Close and rename/move extents. */
6789 for (i = 0; i < pRenameState->cExtents; i++)
6790 {
6791 PVMDKEXTENT pExtent = &pImage->pExtents[i];
6792 /* Compose new name for the extent. */
6793 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
6794 pRenameState->pszOldFullName,
6795 pRenameState->pszNewFullName);
6796 if (!pRenameState->apszNewName[i])
6797 {
6798 rc = VERR_NO_MEMORY;
6799 break;
6800 }
6801 /* Close the extent file. */
6802 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
6803 if (RT_FAILURE(rc))
6804 break;;
6805 /* Rename the extent file. */
6806 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
6807 if (RT_FAILURE(rc))
6808 break;
6809 /* Remember the old name. */
6810 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
6811 }
6812 if (RT_SUCCESS(rc))
6813 {
6814 /* Release all old stuff. */
6815 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
6816 if (RT_SUCCESS(rc))
6817 {
6818 pRenameState->fImageFreed = true;
6819 /* Last elements of new/old name arrays are intended for
6820 * storing descriptor's names.
6821 */
6822 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
6823 /* Rename the descriptor file if it's separate. */
6824 if (!pRenameState->fEmbeddedDesc)
6825 {
6826 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
6827 if (RT_SUCCESS(rc))
6828 {
6829 /* Save old name only if we may need to change it back. */
6830 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
6831 }
6832 }
6833 /* Update pImage with the new information. */
6834 pImage->pszFilename = pszFilename;
6835 /* Open the new image. */
6836 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6837 }
6838 }
6839 }
6840 return rc;
6841}
6842/** @copydoc VDIMAGEBACKEND::pfnRename */
6843static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
6844{
6845 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
6846 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6847 VMDKRENAMESTATE RenameState;
6848 memset(&RenameState, 0, sizeof(RenameState));
6849 /* Check arguments. */
6850 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
6851 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
6852 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
6853 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER);
6854 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
6855 if (RT_SUCCESS(rc))
6856 {
6857 /* --- Up to this point we have not done any damage yet. --- */
6858 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
6859 /* Roll back all changes in case of failure. */
6860 if (RT_FAILURE(rc))
6861 {
6862 int rrc = vmdkRenameRollback(pImage, &RenameState);
6863 AssertRC(rrc);
6864 }
6865 }
6866 vmdkRenameStateDestroy(&RenameState);
6867 LogFlowFunc(("returns %Rrc\n", rc));
6868 return rc;
6869}
6870/** @copydoc VDIMAGEBACKEND::pfnClose */
6871static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
6872{
6873 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
6874 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6875 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
6876 RTMemFree(pImage);
6877 LogFlowFunc(("returns %Rrc\n", rc));
6878 return rc;
6879}
6880/** @copydoc VDIMAGEBACKEND::pfnRead */
6881static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
6882 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6883{
6884 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6885 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
6886 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6887 AssertPtr(pImage);
6888 Assert(uOffset % 512 == 0);
6889 Assert(cbToRead % 512 == 0);
6890 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
6891 AssertReturn(cbToRead, VERR_INVALID_PARAMETER);
6892 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
6893 /* Find the extent and check access permissions as defined in the extent descriptor. */
6894 PVMDKEXTENT pExtent;
6895 uint64_t uSectorExtentRel;
6896 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6897 &pExtent, &uSectorExtentRel);
6898 if ( RT_SUCCESS(rc)
6899 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
6900 {
6901 /* Clip read range to remain in this extent. */
6902 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6903 /* Handle the read according to the current extent type. */
6904 switch (pExtent->enmType)
6905 {
6906 case VMDKETYPE_HOSTED_SPARSE:
6907 {
6908 uint64_t uSectorExtentAbs;
6909 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
6910 if (RT_FAILURE(rc))
6911 break;
6912 /* Clip read range to at most the rest of the grain. */
6913 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6914 Assert(!(cbToRead % 512));
6915 if (uSectorExtentAbs == 0)
6916 {
6917 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6918 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6919 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6920 rc = VERR_VD_BLOCK_FREE;
6921 else
6922 rc = vmdkStreamReadSequential(pImage, pExtent,
6923 uSectorExtentRel,
6924 pIoCtx, cbToRead);
6925 }
6926 else
6927 {
6928 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6929 {
6930 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6931 ("Async I/O is not supported for stream optimized VMDK's\n"));
6932 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
6933 uSectorExtentAbs -= uSectorInGrain;
6934 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
6935 {
6936 uint64_t uLBA = 0; /* gcc maybe uninitialized */
6937 rc = vmdkFileInflateSync(pImage, pExtent,
6938 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6939 pExtent->pvGrain,
6940 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6941 NULL, &uLBA, NULL);
6942 if (RT_FAILURE(rc))
6943 {
6944 pExtent->uGrainSectorAbs = 0;
6945 break;
6946 }
6947 pExtent->uGrainSectorAbs = uSectorExtentAbs;
6948 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
6949 Assert(uLBA == uSectorExtentRel);
6950 }
6951 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6952 (uint8_t *)pExtent->pvGrain
6953 + VMDK_SECTOR2BYTE(uSectorInGrain),
6954 cbToRead);
6955 }
6956 else
6957 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
6958 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6959 pIoCtx, cbToRead);
6960 }
6961 break;
6962 }
6963 case VMDKETYPE_VMFS:
6964 case VMDKETYPE_FLAT:
6965 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
6966 VMDK_SECTOR2BYTE(uSectorExtentRel),
6967 pIoCtx, cbToRead);
6968 break;
6969 case VMDKETYPE_ZERO:
6970 {
6971 size_t cbSet;
6972 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
6973 Assert(cbSet == cbToRead);
6974 break;
6975 }
6976 }
6977 if (pcbActuallyRead)
6978 *pcbActuallyRead = cbToRead;
6979 }
6980 else if (RT_SUCCESS(rc))
6981 rc = VERR_VD_VMDK_INVALID_STATE;
6982 LogFlowFunc(("returns %Rrc\n", rc));
6983 return rc;
6984}
6985/** @copydoc VDIMAGEBACKEND::pfnWrite */
6986static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
6987 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
6988 size_t *pcbPostRead, unsigned fWrite)
6989{
6990 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
6991 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6992 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6993 int rc;
6994 AssertPtr(pImage);
6995 Assert(uOffset % 512 == 0);
6996 Assert(cbToWrite % 512 == 0);
6997 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
6998 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER);
6999 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7000 {
7001 PVMDKEXTENT pExtent;
7002 uint64_t uSectorExtentRel;
7003 uint64_t uSectorExtentAbs;
7004 /* No size check here, will do that later when the extent is located.
7005 * There are sparse images out there which according to the spec are
7006 * invalid, because the total size is not a multiple of the grain size.
7007 * Also for sparse images which are stitched together in odd ways (not at
7008 * grain boundaries, and with the nominal size not being a multiple of the
7009 * grain size), this would prevent writing to the last grain. */
7010 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7011 &pExtent, &uSectorExtentRel);
7012 if (RT_SUCCESS(rc))
7013 {
7014 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
7015 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7016 && !pImage->pExtents[0].uAppendPosition
7017 && pExtent->enmAccess != VMDKACCESS_READONLY))
7018 rc = VERR_VD_VMDK_INVALID_STATE;
7019 else
7020 {
7021 /* Handle the write according to the current extent type. */
7022 switch (pExtent->enmType)
7023 {
7024 case VMDKETYPE_HOSTED_SPARSE:
7025 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7026 if (RT_SUCCESS(rc))
7027 {
7028 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7029 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
7030 rc = VERR_VD_VMDK_INVALID_WRITE;
7031 else
7032 {
7033 /* Clip write range to at most the rest of the grain. */
7034 cbToWrite = RT_MIN(cbToWrite,
7035 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
7036 - uSectorExtentRel % pExtent->cSectorsPerGrain));
7037 if (uSectorExtentAbs == 0)
7038 {
7039 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7040 {
7041 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7042 {
7043 /* Full block write to a previously unallocated block.
7044 * Check if the caller wants to avoid the automatic alloc. */
7045 if (!(fWrite & VD_WRITE_NO_ALLOC))
7046 {
7047 /* Allocate GT and find out where to store the grain. */
7048 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
7049 uSectorExtentRel, cbToWrite);
7050 }
7051 else
7052 rc = VERR_VD_BLOCK_FREE;
7053 *pcbPreRead = 0;
7054 *pcbPostRead = 0;
7055 }
7056 else
7057 {
7058 /* Clip write range to remain in this extent. */
7059 cbToWrite = RT_MIN(cbToWrite,
7060 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
7061 + pExtent->cNominalSectors - uSectorExtentRel));
7062 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7063 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
7064 rc = VERR_VD_BLOCK_FREE;
7065 }
7066 }
7067 else
7068 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
7069 pIoCtx, cbToWrite);
7070 }
7071 else
7072 {
7073 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7074 {
7075 /* A partial write to a streamOptimized image is simply
7076 * invalid. It requires rewriting already compressed data
7077 * which is somewhere between expensive and impossible. */
7078 rc = VERR_VD_VMDK_INVALID_STATE;
7079 pExtent->uGrainSectorAbs = 0;
7080 AssertRC(rc);
7081 }
7082 else
7083 {
7084 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7085 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7086 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7087 pIoCtx, cbToWrite, NULL, NULL);
7088 }
7089 }
7090 }
7091 }
7092 break;
7093 case VMDKETYPE_VMFS:
7094 case VMDKETYPE_FLAT:
7095 /* Clip write range to remain in this extent. */
7096 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7097 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7098 VMDK_SECTOR2BYTE(uSectorExtentRel),
7099 pIoCtx, cbToWrite, NULL, NULL);
7100 break;
7101 case VMDKETYPE_ZERO:
7102 /* Clip write range to remain in this extent. */
7103 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7104 break;
7105 }
7106 }
7107 if (pcbWriteProcess)
7108 *pcbWriteProcess = cbToWrite;
7109 }
7110 }
7111 else
7112 rc = VERR_VD_IMAGE_READ_ONLY;
7113 LogFlowFunc(("returns %Rrc\n", rc));
7114 return rc;
7115}
7116/** @copydoc VDIMAGEBACKEND::pfnFlush */
7117static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
7118{
7119 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7120 return vmdkFlushImage(pImage, pIoCtx);
7121}
7122/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
7123static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
7124{
7125 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7126 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7127 AssertPtrReturn(pImage, 0);
7128 return VMDK_IMAGE_VERSION;
7129}
7130/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
7131static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
7132{
7133 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7134 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7135 uint64_t cb = 0;
7136 AssertPtrReturn(pImage, 0);
7137 if (pImage->pFile != NULL)
7138 {
7139 uint64_t cbFile;
7140 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
7141 if (RT_SUCCESS(rc))
7142 cb += cbFile;
7143 }
7144 for (unsigned i = 0; i < pImage->cExtents; i++)
7145 {
7146 if (pImage->pExtents[i].pFile != NULL)
7147 {
7148 uint64_t cbFile;
7149 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
7150 if (RT_SUCCESS(rc))
7151 cb += cbFile;
7152 }
7153 }
7154 LogFlowFunc(("returns %lld\n", cb));
7155 return cb;
7156}
7157/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
7158static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7159{
7160 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7161 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7162 int rc = VINF_SUCCESS;
7163 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7164 if (pImage->PCHSGeometry.cCylinders)
7165 *pPCHSGeometry = pImage->PCHSGeometry;
7166 else
7167 rc = VERR_VD_GEOMETRY_NOT_SET;
7168 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7169 return rc;
7170}
7171/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
7172static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7173{
7174 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
7175 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7176 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7177 int rc = VINF_SUCCESS;
7178 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7179 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7180 {
7181 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7182 {
7183 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7184 if (RT_SUCCESS(rc))
7185 pImage->PCHSGeometry = *pPCHSGeometry;
7186 }
7187 else
7188 rc = VERR_NOT_SUPPORTED;
7189 }
7190 else
7191 rc = VERR_VD_IMAGE_READ_ONLY;
7192 LogFlowFunc(("returns %Rrc\n", rc));
7193 return rc;
7194}
7195/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
7196static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7197{
7198 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7199 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7200 int rc = VINF_SUCCESS;
7201 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7202 if (pImage->LCHSGeometry.cCylinders)
7203 *pLCHSGeometry = pImage->LCHSGeometry;
7204 else
7205 rc = VERR_VD_GEOMETRY_NOT_SET;
7206 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7207 return rc;
7208}
7209/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7210static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7211{
7212 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7213 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7214 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7215 int rc = VINF_SUCCESS;
7216 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7217 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7218 {
7219 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7220 {
7221 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7222 if (RT_SUCCESS(rc))
7223 pImage->LCHSGeometry = *pLCHSGeometry;
7224 }
7225 else
7226 rc = VERR_NOT_SUPPORTED;
7227 }
7228 else
7229 rc = VERR_VD_IMAGE_READ_ONLY;
7230 LogFlowFunc(("returns %Rrc\n", rc));
7231 return rc;
7232}
7233/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
7234static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
7235{
7236 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
7237 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7238 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
7239 *ppRegionList = &pThis->RegionList;
7240 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
7241 return VINF_SUCCESS;
7242}
7243/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
7244static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
7245{
7246 RT_NOREF1(pRegionList);
7247 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
7248 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7249 AssertPtr(pThis); RT_NOREF(pThis);
7250 /* Nothing to do here. */
7251}
7252/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
7253static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
7254{
7255 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7256 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7257 AssertPtrReturn(pImage, 0);
7258 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
7259 return pImage->uImageFlags;
7260}
7261/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
7262static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
7263{
7264 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7265 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7266 AssertPtrReturn(pImage, 0);
7267 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
7268 return pImage->uOpenFlags;
7269}
7270/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
7271static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
7272{
7273 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
7274 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7275 int rc;
7276 /* Image must be opened and the new flags must be valid. */
7277 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
7278 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
7279 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
7280 rc = VERR_INVALID_PARAMETER;
7281 else
7282 {
7283 /* StreamOptimized images need special treatment: reopen is prohibited. */
7284 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7285 {
7286 if (pImage->uOpenFlags == uOpenFlags)
7287 rc = VINF_SUCCESS;
7288 else
7289 rc = VERR_INVALID_PARAMETER;
7290 }
7291 else
7292 {
7293 /* Implement this operation via reopening the image. */
7294 vmdkFreeImage(pImage, false, true /*fFlush*/);
7295 rc = vmdkOpenImage(pImage, uOpenFlags);
7296 }
7297 }
7298 LogFlowFunc(("returns %Rrc\n", rc));
7299 return rc;
7300}
7301/** @copydoc VDIMAGEBACKEND::pfnGetComment */
7302static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
7303{
7304 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
7305 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7306 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7307 char *pszCommentEncoded = NULL;
7308 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
7309 "ddb.comment", &pszCommentEncoded);
7310 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
7311 {
7312 pszCommentEncoded = NULL;
7313 rc = VINF_SUCCESS;
7314 }
7315 if (RT_SUCCESS(rc))
7316 {
7317 if (pszComment && pszCommentEncoded)
7318 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
7319 else if (pszComment)
7320 *pszComment = '\0';
7321 if (pszCommentEncoded)
7322 RTMemTmpFree(pszCommentEncoded);
7323 }
7324 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
7325 return rc;
7326}
7327/** @copydoc VDIMAGEBACKEND::pfnSetComment */
7328static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
7329{
7330 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
7331 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7332 int rc;
7333 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7334 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7335 {
7336 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7337 rc = vmdkSetImageComment(pImage, pszComment);
7338 else
7339 rc = VERR_NOT_SUPPORTED;
7340 }
7341 else
7342 rc = VERR_VD_IMAGE_READ_ONLY;
7343 LogFlowFunc(("returns %Rrc\n", rc));
7344 return rc;
7345}
7346/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
7347static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
7348{
7349 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7350 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7351 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7352 *pUuid = pImage->ImageUuid;
7353 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7354 return VINF_SUCCESS;
7355}
7356/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
7357static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
7358{
7359 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7360 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7361 int rc = VINF_SUCCESS;
7362 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7363 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7364 {
7365 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7366 {
7367 pImage->ImageUuid = *pUuid;
7368 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7369 VMDK_DDB_IMAGE_UUID, pUuid);
7370 if (RT_FAILURE(rc))
7371 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7372 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
7373 }
7374 else
7375 rc = VERR_NOT_SUPPORTED;
7376 }
7377 else
7378 rc = VERR_VD_IMAGE_READ_ONLY;
7379 LogFlowFunc(("returns %Rrc\n", rc));
7380 return rc;
7381}
7382/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
7383static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
7384{
7385 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7386 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7387 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7388 *pUuid = pImage->ModificationUuid;
7389 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7390 return VINF_SUCCESS;
7391}
7392/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
7393static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
7394{
7395 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7396 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7397 int rc = VINF_SUCCESS;
7398 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7399 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7400 {
7401 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7402 {
7403 /* Only touch the modification uuid if it changed. */
7404 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
7405 {
7406 pImage->ModificationUuid = *pUuid;
7407 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7408 VMDK_DDB_MODIFICATION_UUID, pUuid);
7409 if (RT_FAILURE(rc))
7410 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
7411 }
7412 }
7413 else
7414 rc = VERR_NOT_SUPPORTED;
7415 }
7416 else
7417 rc = VERR_VD_IMAGE_READ_ONLY;
7418 LogFlowFunc(("returns %Rrc\n", rc));
7419 return rc;
7420}
7421/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
7422static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
7423{
7424 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7425 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7426 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7427 *pUuid = pImage->ParentUuid;
7428 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7429 return VINF_SUCCESS;
7430}
7431/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
7432static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
7433{
7434 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7435 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7436 int rc = VINF_SUCCESS;
7437 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7438 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7439 {
7440 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7441 {
7442 pImage->ParentUuid = *pUuid;
7443 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7444 VMDK_DDB_PARENT_UUID, pUuid);
7445 if (RT_FAILURE(rc))
7446 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
7447 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7448 }
7449 else
7450 rc = VERR_NOT_SUPPORTED;
7451 }
7452 else
7453 rc = VERR_VD_IMAGE_READ_ONLY;
7454 LogFlowFunc(("returns %Rrc\n", rc));
7455 return rc;
7456}
7457/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
7458static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
7459{
7460 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7461 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7462 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7463 *pUuid = pImage->ParentModificationUuid;
7464 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
7465 return VINF_SUCCESS;
7466}
7467/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
7468static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
7469{
7470 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7471 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7472 int rc = VINF_SUCCESS;
7473 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7474 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7475 {
7476 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7477 {
7478 pImage->ParentModificationUuid = *pUuid;
7479 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7480 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
7481 if (RT_FAILURE(rc))
7482 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7483 }
7484 else
7485 rc = VERR_NOT_SUPPORTED;
7486 }
7487 else
7488 rc = VERR_VD_IMAGE_READ_ONLY;
7489 LogFlowFunc(("returns %Rrc\n", rc));
7490 return rc;
7491}
7492/** @copydoc VDIMAGEBACKEND::pfnDump */
7493static DECLCALLBACK(void) vmdkDump(void *pBackendData)
7494{
7495 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7496 AssertPtrReturnVoid(pImage);
7497 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
7498 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
7499 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
7500 VMDK_BYTE2SECTOR(pImage->cbSize));
7501 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
7502 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
7503 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
7504 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
7505}
7506
7507static int vmdkRepaceExtentSize(PVMDKIMAGE pImage, unsigned line, uint64_t cSectorsOld,
7508 uint64_t cSectorsNew)
7509{
7510 char * szOldExtentSectors = (char *)RTMemAlloc(UINT64_MAX_BUFF_SIZE);
7511 if (!szOldExtentSectors)
7512 return VERR_NO_MEMORY;
7513
7514 int cbWritten = RTStrPrintf2(szOldExtentSectors, UINT64_MAX_BUFF_SIZE, "%llu", cSectorsOld);
7515 if (cbWritten <= 0 || cbWritten > UINT64_MAX_BUFF_SIZE)
7516 {
7517 RTMemFree(szOldExtentSectors);
7518 szOldExtentSectors = NULL;
7519
7520 return VERR_BUFFER_OVERFLOW;
7521 }
7522
7523 char * szNewExtentSectors = (char *)RTMemAlloc(UINT64_MAX_BUFF_SIZE);
7524 if (!szNewExtentSectors)
7525 return VERR_NO_MEMORY;
7526
7527 cbWritten = RTStrPrintf2(szNewExtentSectors, UINT64_MAX_BUFF_SIZE, "%llu", cSectorsNew);
7528 if (cbWritten <= 0 || cbWritten > UINT64_MAX_BUFF_SIZE)
7529 {
7530 RTMemFree(szOldExtentSectors);
7531 szOldExtentSectors = NULL;
7532
7533 RTMemFree(szNewExtentSectors);
7534 szNewExtentSectors = NULL;
7535
7536 return VERR_BUFFER_OVERFLOW;
7537 }
7538
7539 char * szNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[line],
7540 szOldExtentSectors,
7541 szNewExtentSectors);
7542
7543 RTMemFree(szOldExtentSectors);
7544 szOldExtentSectors = NULL;
7545
7546 RTMemFree(szNewExtentSectors);
7547 szNewExtentSectors = NULL;
7548
7549 if (!szNewExtentLine)
7550 return VERR_INVALID_PARAMETER;
7551
7552 pImage->Descriptor.aLines[line] = szNewExtentLine;
7553
7554 return VINF_SUCCESS;
7555}
7556
7557/** @copydoc VDIMAGEBACKEND::pfnResize */
7558static DECLCALLBACK(int) vmdkResize(void *pBackendData, uint64_t cbSize,
7559 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
7560 unsigned uPercentStart, unsigned uPercentSpan,
7561 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7562 PVDINTERFACE pVDIfsOperation)
7563{
7564 RT_NOREF5(uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation);
7565
7566 // Establish variables and objects needed
7567 int rc = VINF_SUCCESS;
7568 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7569 unsigned uImageFlags = pImage->uImageFlags;
7570 PVMDKEXTENT pExtent = &pImage->pExtents[0];
7571
7572 uint64_t cSectorsNew = cbSize / VMDK_SECTOR_SIZE; /** < New number of sectors in the image after the resize */
7573 if (cbSize % VMDK_SECTOR_SIZE)
7574 cSectorsNew++;
7575
7576 uint64_t cSectorsOld = pImage->cbSize / VMDK_SECTOR_SIZE; /** < Number of sectors before the resize. Only for FLAT images. */
7577 if (pImage->cbSize % VMDK_SECTOR_SIZE)
7578 cSectorsOld++;
7579 unsigned cExtents = pImage->cExtents;
7580
7581 /* Check size is within min/max bounds. */
7582 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
7583 && ( !cbSize
7584 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)) )
7585 return VERR_VD_INVALID_SIZE;
7586
7587 /*
7588 * Making the image smaller is not supported at the moment.
7589 */
7590 /** @todo implement making the image smaller, it is the responsibility of
7591 * the user to know what he's doing. */
7592 if (cbSize < pImage->cbSize)
7593 rc = VERR_VD_SHRINK_NOT_SUPPORTED;
7594 else if (cbSize > pImage->cbSize)
7595 {
7596 /**
7597 * monolithicFlat. FIXED flag and not split up into 2 GB parts.
7598 */
7599 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
7600 {
7601 /** Required space in bytes for the extent after the resize. */
7602 uint64_t cbSectorSpaceNew = cSectorsNew * VMDK_SECTOR_SIZE;
7603 pExtent = &pImage->pExtents[0];
7604
7605 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSectorSpaceNew,
7606 0 /* fFlags */, NULL,
7607 uPercentStart, uPercentSpan);
7608 if (RT_FAILURE(rc))
7609 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
7610
7611 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew);
7612 if (RT_FAILURE(rc))
7613 return rc;
7614 }
7615
7616 /**
7617 * twoGbMaxExtentFlat. FIXED flag and SPLIT into 2 GB parts.
7618 */
7619 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
7620 {
7621 /* Check to see how much space remains in last extent */
7622 bool fSpaceAvailible = false;
7623 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
7624 if (cLastExtentRemSectors)
7625 fSpaceAvailible = true;
7626
7627 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
7628 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
7629 {
7630 pExtent = &pImage->pExtents[cExtents - 1];
7631 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage,
7632 VMDK_SECTOR2BYTE(cSectorsNeeded + cLastExtentRemSectors),
7633 0 /* fFlags */, NULL, uPercentStart, uPercentSpan);
7634 if (RT_FAILURE(rc))
7635 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
7636
7637 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent + cExtents - 1,
7638 pExtent->cNominalSectors, cSectorsNeeded + cLastExtentRemSectors);
7639 if (RT_FAILURE(rc))
7640 return rc;
7641 }
7642 else
7643 {
7644 if (fSpaceAvailible)
7645 {
7646 pExtent = &pImage->pExtents[cExtents - 1];
7647 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, VMDK_2G_SPLIT_SIZE,
7648 0 /* fFlags */, NULL,
7649 uPercentStart, uPercentSpan);
7650 if (RT_FAILURE(rc))
7651 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
7652
7653 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
7654
7655 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent + cExtents - 1,
7656 pExtent->cNominalSectors, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
7657 if (RT_FAILURE(rc))
7658 return rc;
7659 }
7660
7661 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
7662 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
7663 cNewExtents++;
7664
7665 for (unsigned i = cExtents;
7666 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
7667 i++)
7668 {
7669 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
7670 if (RT_FAILURE(rc))
7671 return rc;
7672
7673 pExtent = &pImage->pExtents[i];
7674
7675 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
7676 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
7677 }
7678
7679 if (cSectorsNeeded)
7680 {
7681 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
7682 if (RT_FAILURE(rc))
7683 return rc;
7684 }
7685 }
7686 }
7687
7688 /* Successful resize. Update metadata */
7689 if (RT_SUCCESS(rc))
7690 {
7691 /* Update size and new block count. */
7692 pImage->cbSize = cbSize;
7693 /** @todo r=jack: update cExtents if needed */
7694 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
7695
7696 /* Update geometry. */
7697 pImage->PCHSGeometry = *pPCHSGeometry;
7698 pImage->LCHSGeometry = *pLCHSGeometry;
7699 }
7700
7701 /* Update header information in base image file. */
7702 rc = vmdkWriteDescriptor(pImage, NULL);
7703
7704 if (RT_FAILURE(rc))
7705 return rc;
7706
7707 rc = vmdkFlushImage(pImage, NULL);
7708
7709 if (RT_FAILURE(rc))
7710 return rc;
7711 }
7712 /* Same size doesn't change the image at all. */
7713
7714 LogFlowFunc(("returns %Rrc\n", rc));
7715 return rc;
7716}
7717
7718const VDIMAGEBACKEND g_VmdkBackend =
7719{
7720 /* u32Version */
7721 VD_IMGBACKEND_VERSION,
7722 /* pszBackendName */
7723 "VMDK",
7724 /* uBackendCaps */
7725 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
7726 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
7727 | VD_CAP_VFS | VD_CAP_PREFERRED,
7728 /* paFileExtensions */
7729 s_aVmdkFileExtensions,
7730 /* paConfigInfo */
7731 s_aVmdkConfigInfo,
7732 /* pfnProbe */
7733 vmdkProbe,
7734 /* pfnOpen */
7735 vmdkOpen,
7736 /* pfnCreate */
7737 vmdkCreate,
7738 /* pfnRename */
7739 vmdkRename,
7740 /* pfnClose */
7741 vmdkClose,
7742 /* pfnRead */
7743 vmdkRead,
7744 /* pfnWrite */
7745 vmdkWrite,
7746 /* pfnFlush */
7747 vmdkFlush,
7748 /* pfnDiscard */
7749 NULL,
7750 /* pfnGetVersion */
7751 vmdkGetVersion,
7752 /* pfnGetFileSize */
7753 vmdkGetFileSize,
7754 /* pfnGetPCHSGeometry */
7755 vmdkGetPCHSGeometry,
7756 /* pfnSetPCHSGeometry */
7757 vmdkSetPCHSGeometry,
7758 /* pfnGetLCHSGeometry */
7759 vmdkGetLCHSGeometry,
7760 /* pfnSetLCHSGeometry */
7761 vmdkSetLCHSGeometry,
7762 /* pfnQueryRegions */
7763 vmdkQueryRegions,
7764 /* pfnRegionListRelease */
7765 vmdkRegionListRelease,
7766 /* pfnGetImageFlags */
7767 vmdkGetImageFlags,
7768 /* pfnGetOpenFlags */
7769 vmdkGetOpenFlags,
7770 /* pfnSetOpenFlags */
7771 vmdkSetOpenFlags,
7772 /* pfnGetComment */
7773 vmdkGetComment,
7774 /* pfnSetComment */
7775 vmdkSetComment,
7776 /* pfnGetUuid */
7777 vmdkGetUuid,
7778 /* pfnSetUuid */
7779 vmdkSetUuid,
7780 /* pfnGetModificationUuid */
7781 vmdkGetModificationUuid,
7782 /* pfnSetModificationUuid */
7783 vmdkSetModificationUuid,
7784 /* pfnGetParentUuid */
7785 vmdkGetParentUuid,
7786 /* pfnSetParentUuid */
7787 vmdkSetParentUuid,
7788 /* pfnGetParentModificationUuid */
7789 vmdkGetParentModificationUuid,
7790 /* pfnSetParentModificationUuid */
7791 vmdkSetParentModificationUuid,
7792 /* pfnDump */
7793 vmdkDump,
7794 /* pfnGetTimestamp */
7795 NULL,
7796 /* pfnGetParentTimestamp */
7797 NULL,
7798 /* pfnSetParentTimestamp */
7799 NULL,
7800 /* pfnGetParentFilename */
7801 NULL,
7802 /* pfnSetParentFilename */
7803 NULL,
7804 /* pfnComposeLocation */
7805 genericFileComposeLocation,
7806 /* pfnComposeName */
7807 genericFileComposeName,
7808 /* pfnCompact */
7809 NULL,
7810 /* pfnResize */
7811 vmdkResize,
7812 /* pfnRepair */
7813 NULL,
7814 /* pfnTraverseMetadata */
7815 NULL,
7816 /* u32VersionEnd */
7817 VD_IMGBACKEND_VERSION
7818};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette